Merge tag 'wireless-drivers-next-for-davem-2018-08-05' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next
Kalle Valo says: ==================== wireless-drivers-next patches for 4.19 This time a bigger pull request as we have two new Mediatek drivers MT76x2u (CONFIG_MT76x2U) and MT76x0U (CONFIG_MT76x0U). Also iwlwifi got support for the new IEEE 802.11ax standard, the successor for 802.11ac. And naturally smaller new features and bugfixes all over. Major changes: wcn36xx * fix WEP in client mode wil6210 * add support for Talyn-MB (Talyn ver 2.0) device * add support for enhanced DMA firmware feature iwlwifi * implement 802.11ax D2.0 * support for the new 22560 device family * new PCI IDs for 22000 and 22560 qtnfmac * implement cfg80211 power management callback * enable multiple SSIDs scan support * qtnfmac: implement basic WoWLAN support mt7601u * fall back to software encryption for hw unsupported ciphers * enable 802.11 Management Frame Protection (MFP) mt76 * support setting RTS threshold * add USB support * add support for MT76x2u devices * add support for MT76x0U devices mwifiex * allow user space to set all other IEs except WMM IE rsi * add firmware support for AP+BT dual mode ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
207
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
Normal file
207
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
Normal file
@@ -0,0 +1,207 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-fh.h"
|
||||
#include "iwl-context-info-gen3.h"
|
||||
#include "internal.h"
|
||||
#include "iwl-prph.h"
|
||||
|
||||
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
const struct fw_img *fw)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_context_info_gen3 *ctxt_info_gen3;
|
||||
struct iwl_prph_scratch *prph_scratch;
|
||||
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
|
||||
struct iwl_prph_info *prph_info;
|
||||
void *iml_img;
|
||||
u32 control_flags = 0;
|
||||
int ret;
|
||||
|
||||
/* Allocate prph scratch */
|
||||
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
|
||||
&trans_pcie->prph_scratch_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!prph_scratch)
|
||||
return -ENOMEM;
|
||||
|
||||
prph_sc_ctrl = &prph_scratch->ctrl_cfg;
|
||||
|
||||
prph_sc_ctrl->version.version = 0;
|
||||
prph_sc_ctrl->version.mac_id =
|
||||
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
|
||||
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
|
||||
|
||||
control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
|
||||
IWL_PRPH_SCRATCH_MTR_MODE |
|
||||
(IWL_PRPH_MTR_FORMAT_256B &
|
||||
IWL_PRPH_SCRATCH_MTR_FORMAT) |
|
||||
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
|
||||
IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
|
||||
prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
|
||||
|
||||
/* initialize RX default queue */
|
||||
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
|
||||
cpu_to_le64(trans_pcie->rxq->bd_dma);
|
||||
|
||||
/* Configure debug, for integration */
|
||||
iwl_pcie_alloc_fw_monitor(trans, 0);
|
||||
prph_sc_ctrl->hwm_cfg.hwm_base_addr =
|
||||
cpu_to_le64(trans_pcie->fw_mon_phys);
|
||||
prph_sc_ctrl->hwm_cfg.hwm_size =
|
||||
cpu_to_le32(trans_pcie->fw_mon_size);
|
||||
|
||||
/* allocate ucode sections in dram and set addresses */
|
||||
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
|
||||
if (ret) {
|
||||
dma_free_coherent(trans->dev,
|
||||
sizeof(*prph_scratch),
|
||||
prph_scratch,
|
||||
trans_pcie->prph_scratch_dma_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Allocate prph information
|
||||
* currently we don't assign to the prph info anything, but it would get
|
||||
* assigned later */
|
||||
prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
|
||||
&trans_pcie->prph_info_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!prph_info)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate context info */
|
||||
ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
|
||||
sizeof(*ctxt_info_gen3),
|
||||
&trans_pcie->ctxt_info_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!ctxt_info_gen3)
|
||||
return -ENOMEM;
|
||||
|
||||
ctxt_info_gen3->prph_info_base_addr =
|
||||
cpu_to_le64(trans_pcie->prph_info_dma_addr);
|
||||
ctxt_info_gen3->prph_scratch_base_addr =
|
||||
cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
|
||||
ctxt_info_gen3->prph_scratch_size =
|
||||
cpu_to_le32(sizeof(*prph_scratch));
|
||||
ctxt_info_gen3->cr_head_idx_arr_base_addr =
|
||||
cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
|
||||
ctxt_info_gen3->tr_tail_idx_arr_base_addr =
|
||||
cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
|
||||
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
|
||||
cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
|
||||
ctxt_info_gen3->cr_idx_arr_size =
|
||||
cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
|
||||
ctxt_info_gen3->tr_idx_arr_size =
|
||||
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
|
||||
ctxt_info_gen3->mtr_base_addr =
|
||||
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
|
||||
ctxt_info_gen3->mcr_base_addr =
|
||||
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
|
||||
ctxt_info_gen3->mtr_size =
|
||||
cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
|
||||
ctxt_info_gen3->mcr_size =
|
||||
cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
|
||||
|
||||
trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
|
||||
trans_pcie->prph_info = prph_info;
|
||||
trans_pcie->prph_scratch = prph_scratch;
|
||||
|
||||
/* Allocate IML */
|
||||
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
|
||||
&trans_pcie->iml_dma_addr, GFP_KERNEL);
|
||||
if (!iml_img)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(iml_img, trans->iml, trans->iml_len);
|
||||
|
||||
iwl_enable_interrupts(trans);
|
||||
|
||||
/* kick FW self load */
|
||||
iwl_write64(trans, CSR_CTXT_INFO_ADDR,
|
||||
trans_pcie->ctxt_info_dma_addr);
|
||||
iwl_write64(trans, CSR_IML_DATA_ADDR,
|
||||
trans_pcie->iml_dma_addr);
|
||||
iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
|
||||
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (!trans_pcie->ctxt_info_gen3)
|
||||
return;
|
||||
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
|
||||
trans_pcie->ctxt_info_gen3,
|
||||
trans_pcie->ctxt_info_dma_addr);
|
||||
trans_pcie->ctxt_info_dma_addr = 0;
|
||||
trans_pcie->ctxt_info_gen3 = NULL;
|
||||
|
||||
iwl_pcie_ctxt_info_free_fw_img(trans);
|
||||
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
|
||||
trans_pcie->prph_scratch,
|
||||
trans_pcie->prph_scratch_dma_addr);
|
||||
trans_pcie->prph_scratch_dma_addr = 0;
|
||||
trans_pcie->prph_scratch = NULL;
|
||||
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
|
||||
trans_pcie->prph_info,
|
||||
trans_pcie->prph_info_dma_addr);
|
||||
trans_pcie->prph_info_dma_addr = 0;
|
||||
trans_pcie->prph_info = NULL;
|
||||
}
|
@@ -6,6 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -19,6 +20,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -55,57 +57,6 @@
|
||||
#include "internal.h"
|
||||
#include "iwl-prph.h"
|
||||
|
||||
static int iwl_pcie_get_num_sections(const struct fw_img *fw,
|
||||
int start)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
while (start < fw->num_sec &&
|
||||
fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
|
||||
fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
|
||||
start++;
|
||||
i++;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
|
||||
const struct fw_desc *sec,
|
||||
struct iwl_dram_data *dram)
|
||||
{
|
||||
dram->block = dma_alloc_coherent(trans->dev, sec->len,
|
||||
&dram->physical,
|
||||
GFP_KERNEL);
|
||||
if (!dram->block)
|
||||
return -ENOMEM;
|
||||
|
||||
dram->size = sec->len;
|
||||
memcpy(dram->block, sec->data, sec->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
|
||||
int i;
|
||||
|
||||
if (!dram->fw) {
|
||||
WARN_ON(dram->fw_cnt);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < dram->fw_cnt; i++)
|
||||
dma_free_coherent(trans->dev, dram->fw[i].size,
|
||||
dram->fw[i].block, dram->fw[i].physical);
|
||||
|
||||
kfree(dram->fw);
|
||||
dram->fw_cnt = 0;
|
||||
dram->fw = NULL;
|
||||
}
|
||||
|
||||
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
@@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
|
||||
dram->paging = NULL;
|
||||
}
|
||||
|
||||
static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
|
||||
const struct fw_img *fw,
|
||||
struct iwl_context_info *ctxt_info)
|
||||
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
|
||||
const struct fw_img *fw,
|
||||
struct iwl_context_info_dram *ctxt_dram)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
|
||||
struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
|
||||
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
|
||||
|
||||
if (WARN(dram->paging,
|
||||
@@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
||||
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
|
||||
|
||||
/* allocate ucode sections in dram and set addresses */
|
||||
ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
|
||||
ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
|
||||
if (ret) {
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
|
||||
ctxt_info, trans_pcie->ctxt_info_dma_addr);
|
||||
|
@@ -828,19 +828,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
|
||||
/* 22000 Series */
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
|
||||
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
@@ -1003,6 +1016,10 @@ static int iwl_pci_resume(struct device *device)
|
||||
if (!trans->op_mode)
|
||||
return 0;
|
||||
|
||||
/* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
|
||||
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
|
||||
return 0;
|
||||
|
||||
/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
|
||||
iwl_pcie_conf_msix_hw(trans_pcie);
|
||||
|
||||
|
@@ -3,6 +3,7 @@
|
||||
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
*
|
||||
* Portions of this file are derived from the ipw3945 project, as well
|
||||
* as portions of the ieee80211 subsystem header files.
|
||||
@@ -17,8 +18,7 @@
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
|
||||
* this program.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution in the
|
||||
* file called LICENSE.
|
||||
@@ -45,6 +45,7 @@
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-op-mode.h"
|
||||
#include "iwl-drv.h"
|
||||
|
||||
/* We need 2 entries for the TX command and header, and another one might
|
||||
* be needed for potential data in the SKB's head. The remaining ones can
|
||||
@@ -59,6 +60,7 @@
|
||||
#define RX_POST_REQ_ALLOC 2
|
||||
#define RX_CLAIM_REQ_ALLOC 8
|
||||
#define RX_PENDING_WATERMARK 16
|
||||
#define FIRST_RX_QUEUE 512
|
||||
|
||||
struct iwl_host_cmd;
|
||||
|
||||
@@ -71,6 +73,7 @@ struct iwl_host_cmd;
|
||||
* @page: driver's pointer to the rxb page
|
||||
* @invalid: rxb is in driver ownership - not owned by HW
|
||||
* @vid: index of this rxb in the global table
|
||||
* @size: size used from the buffer
|
||||
*/
|
||||
struct iwl_rx_mem_buffer {
|
||||
dma_addr_t page_dma;
|
||||
@@ -78,6 +81,7 @@ struct iwl_rx_mem_buffer {
|
||||
u16 vid;
|
||||
bool invalid;
|
||||
struct list_head list;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -98,14 +102,121 @@ struct isr_statistics {
|
||||
u32 unhandled;
|
||||
};
|
||||
|
||||
#define IWL_CD_STTS_OPTIMIZED_POS 0
|
||||
#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
|
||||
#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
|
||||
#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
|
||||
#define IWL_CD_STTS_WIFI_STATUS_POS 4
|
||||
#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
|
||||
|
||||
/**
|
||||
* enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
|
||||
* @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
|
||||
* In sniffer mode, when split is used, set in last CD completion. (RX)
|
||||
* @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
|
||||
* all CD completion. (RX)
|
||||
* @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
|
||||
*/
|
||||
enum iwl_completion_desc_transfer_status {
|
||||
IWL_CD_STTS_UNUSED,
|
||||
IWL_CD_STTS_UNUSED_2,
|
||||
IWL_CD_STTS_END_TRANSFER,
|
||||
IWL_CD_STTS_OVERFLOW,
|
||||
IWL_CD_STTS_ABORTED,
|
||||
IWL_CD_STTS_ERROR,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
|
||||
* @IWL_CD_STTS_VALID: the packet is valid (RX)
|
||||
* @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
|
||||
* @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
|
||||
* @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
|
||||
* @IWL_CD_STTS_DUP: duplicate packet (RX)
|
||||
* @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
|
||||
* @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
|
||||
* @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
|
||||
* @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
|
||||
* @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
|
||||
* @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
|
||||
* @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
|
||||
* @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
|
||||
* @IWL_CD_STTS_NOT_USED: completed but not used (RX)
|
||||
* @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
|
||||
*/
|
||||
enum iwl_completion_desc_wifi_status {
|
||||
IWL_CD_STTS_VALID,
|
||||
IWL_CD_STTS_FCS_ERR,
|
||||
IWL_CD_STTS_SEC_KEY_ERR,
|
||||
IWL_CD_STTS_DECRYPTION_ERR,
|
||||
IWL_CD_STTS_DUP,
|
||||
IWL_CD_STTS_ICV_MIC_ERR,
|
||||
IWL_CD_STTS_INTERNAL_SNAP_ERR,
|
||||
IWL_CD_STTS_SEC_PORT_FAIL,
|
||||
IWL_CD_STTS_BA_OLD_SN,
|
||||
IWL_CD_STTS_QOS_NULL,
|
||||
IWL_CD_STTS_MAC_HDR_ERR,
|
||||
IWL_CD_STTS_MAX_RETRANS,
|
||||
IWL_CD_STTS_EX_LIFETIME,
|
||||
IWL_CD_STTS_NOT_USED,
|
||||
IWL_CD_STTS_REPLAY_ERR,
|
||||
};
|
||||
|
||||
#define IWL_RX_TD_TYPE_MSK 0xff000000
|
||||
#define IWL_RX_TD_SIZE_MSK 0x00ffffff
|
||||
#define IWL_RX_TD_SIZE_2K BIT(11)
|
||||
#define IWL_RX_TD_TYPE 0
|
||||
|
||||
/**
|
||||
* struct iwl_rx_transfer_desc - transfer descriptor
|
||||
* @type_n_size: buffer type (bit 0: external buff valid,
|
||||
* bit 1: optional footer valid, bit 2-7: reserved)
|
||||
* and buffer size
|
||||
* @addr: ptr to free buffer start address
|
||||
* @rbid: unique tag of the buffer
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct iwl_rx_transfer_desc {
|
||||
__le32 type_n_size;
|
||||
__le64 addr;
|
||||
__le16 rbid;
|
||||
__le16 reserved;
|
||||
} __packed;
|
||||
|
||||
#define IWL_RX_CD_SIZE 0xffffff00
|
||||
|
||||
/**
|
||||
* struct iwl_rx_completion_desc - completion descriptor
|
||||
* @type: buffer type (bit 0: external buff valid,
|
||||
* bit 1: optional footer valid, bit 2-7: reserved)
|
||||
* @status: status of the completion
|
||||
* @reserved1: reserved
|
||||
* @rbid: unique tag of the received buffer
|
||||
* @size: buffer size, masked by IWL_RX_CD_SIZE
|
||||
* @reserved2: reserved
|
||||
*/
|
||||
struct iwl_rx_completion_desc {
|
||||
u8 type;
|
||||
u8 status;
|
||||
__le16 reserved1;
|
||||
__le16 rbid;
|
||||
__le32 size;
|
||||
u8 reserved2[22];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_rxq - Rx queue
|
||||
* @id: queue index
|
||||
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
|
||||
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
|
||||
* In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
|
||||
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
|
||||
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
|
||||
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
|
||||
* @tr_tail: driver's pointer to the transmission ring tail buffer
|
||||
* @tr_tail_dma: physical address of the buffer for the transmission ring tail
|
||||
* @cr_tail: driver's pointer to the completion ring tail buffer
|
||||
* @cr_tail_dma: physical address of the buffer for the completion ring tail
|
||||
* @read: Shared index to newest available Rx buffer
|
||||
* @write: Shared index to oldest written Rx packet
|
||||
* @free_count: Number of pre-allocated buffers in rx_free
|
||||
@@ -125,8 +236,16 @@ struct iwl_rxq {
|
||||
int id;
|
||||
void *bd;
|
||||
dma_addr_t bd_dma;
|
||||
__le32 *used_bd;
|
||||
union {
|
||||
void *used_bd;
|
||||
__le32 *bd_32;
|
||||
struct iwl_rx_completion_desc *cd;
|
||||
};
|
||||
dma_addr_t used_bd_dma;
|
||||
__le16 *tr_tail;
|
||||
dma_addr_t tr_tail_dma;
|
||||
__le16 *cr_tail;
|
||||
dma_addr_t cr_tail_dma;
|
||||
u32 read;
|
||||
u32 write;
|
||||
u32 free_count;
|
||||
@@ -136,7 +255,7 @@ struct iwl_rxq {
|
||||
struct list_head rx_free;
|
||||
struct list_head rx_used;
|
||||
bool need_update;
|
||||
struct iwl_rb_status *rb_stts;
|
||||
void *rb_stts;
|
||||
dma_addr_t rb_stts_dma;
|
||||
spinlock_t lock;
|
||||
struct napi_struct napi;
|
||||
@@ -175,18 +294,36 @@ struct iwl_dma_ptr {
|
||||
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
|
||||
* @index -- current index
|
||||
*/
|
||||
static inline int iwl_queue_inc_wrap(int index)
|
||||
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
|
||||
{
|
||||
return ++index & (TFD_QUEUE_SIZE_MAX - 1);
|
||||
return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_get_closed_rb_stts - get closed rb stts from different structs
|
||||
* @rxq - the rxq to get the rb stts from
|
||||
*/
|
||||
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq)
|
||||
{
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
|
||||
__le16 *rb_stts = rxq->rb_stts;
|
||||
|
||||
return READ_ONCE(*rb_stts);
|
||||
} else {
|
||||
struct iwl_rb_status *rb_stts = rxq->rb_stts;
|
||||
|
||||
return READ_ONCE(rb_stts->closed_rb_num);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
|
||||
* @index -- current index
|
||||
*/
|
||||
static inline int iwl_queue_dec_wrap(int index)
|
||||
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
|
||||
{
|
||||
return --index & (TFD_QUEUE_SIZE_MAX - 1);
|
||||
return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
|
||||
}
|
||||
|
||||
struct iwl_cmd_meta {
|
||||
@@ -314,6 +451,18 @@ enum iwl_shared_irq_flags {
|
||||
IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_image_response_code - image response values
|
||||
* @IWL_IMAGE_RESP_DEF: the default value of the register
|
||||
* @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
|
||||
* @IWL_IMAGE_RESP_FAIL: iml reading failed
|
||||
*/
|
||||
enum iwl_image_response_code {
|
||||
IWL_IMAGE_RESP_DEF = 0,
|
||||
IWL_IMAGE_RESP_SUCCESS = 1,
|
||||
IWL_IMAGE_RESP_FAIL = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_dram_data
|
||||
* @physical: page phy pointer
|
||||
@@ -347,6 +496,12 @@ struct iwl_self_init_dram {
|
||||
* @global_table: table mapping received VID from hw to rxb
|
||||
* @rba: allocator for RX replenishing
|
||||
* @ctxt_info: context information for FW self init
|
||||
* @ctxt_info_gen3: context information for gen3 devices
|
||||
* @prph_info: prph info for self init
|
||||
* @prph_scratch: prph scratch for self init
|
||||
* @ctxt_info_dma_addr: dma addr of context information
|
||||
* @prph_info_dma_addr: dma addr of prph info
|
||||
* @prph_scratch_dma_addr: dma addr of prph scratch
|
||||
* @ctxt_info_dma_addr: dma addr of context information
|
||||
* @init_dram: DRAM data of firmware image (including paging).
|
||||
* Context information addresses will be taken from here.
|
||||
@@ -391,8 +546,16 @@ struct iwl_trans_pcie {
|
||||
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
|
||||
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
|
||||
struct iwl_rb_allocator rba;
|
||||
struct iwl_context_info *ctxt_info;
|
||||
union {
|
||||
struct iwl_context_info *ctxt_info;
|
||||
struct iwl_context_info_gen3 *ctxt_info_gen3;
|
||||
};
|
||||
struct iwl_prph_info *prph_info;
|
||||
struct iwl_prph_scratch *prph_scratch;
|
||||
dma_addr_t ctxt_info_dma_addr;
|
||||
dma_addr_t prph_info_dma_addr;
|
||||
dma_addr_t prph_scratch_dma_addr;
|
||||
dma_addr_t iml_dma_addr;
|
||||
struct iwl_self_init_dram init_dram;
|
||||
struct iwl_trans *trans;
|
||||
|
||||
@@ -477,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
|
||||
return (void *)trans->trans_specific;
|
||||
}
|
||||
|
||||
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
|
||||
struct msix_entry *entry)
|
||||
{
|
||||
/*
|
||||
* Before sending the interrupt the HW disables it to prevent
|
||||
* a nested interrupt. This is done by writing 1 to the corresponding
|
||||
* bit in the mask register. After handling the interrupt, it should be
|
||||
* re-enabled by clearing this bit. This register is defined as
|
||||
* write 1 clear (W1C) register, meaning that it's being clear
|
||||
* by writing 1 to the bit.
|
||||
*/
|
||||
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
|
||||
}
|
||||
|
||||
static inline struct iwl_trans *
|
||||
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
|
||||
{
|
||||
@@ -504,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
|
||||
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
|
||||
int iwl_pcie_rx_stop(struct iwl_trans *trans);
|
||||
void iwl_pcie_rx_free(struct iwl_trans *trans);
|
||||
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
|
||||
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
|
||||
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
|
||||
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
|
||||
struct iwl_rxq *rxq);
|
||||
|
||||
/*****************************************************
|
||||
* ICT - interrupt handling
|
||||
@@ -588,6 +770,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
|
||||
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
|
||||
}
|
||||
|
||||
#define IWL_NUM_OF_COMPLETION_RINGS 31
|
||||
#define IWL_NUM_OF_TRANSFER_RINGS 527
|
||||
|
||||
static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
|
||||
int start)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
while (start < fw->num_sec &&
|
||||
fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
|
||||
fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
|
||||
start++;
|
||||
i++;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
|
||||
const struct fw_desc *sec,
|
||||
struct iwl_dram_data *dram)
|
||||
{
|
||||
dram->block = dma_alloc_coherent(trans->dev, sec->len,
|
||||
&dram->physical,
|
||||
GFP_KERNEL);
|
||||
if (!dram->block)
|
||||
return -ENOMEM;
|
||||
|
||||
dram->size = sec->len;
|
||||
memcpy(dram->block, sec->data, sec->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
|
||||
int i;
|
||||
|
||||
if (!dram->fw) {
|
||||
WARN_ON(dram->fw_cnt);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < dram->fw_cnt; i++)
|
||||
dma_free_coherent(trans->dev, dram->fw[i].size,
|
||||
dram->fw[i].block, dram->fw[i].physical);
|
||||
|
||||
kfree(dram->fw);
|
||||
dram->fw_cnt = 0;
|
||||
dram->fw = NULL;
|
||||
}
|
||||
|
||||
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
@@ -660,7 +896,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
|
||||
}
|
||||
}
|
||||
|
||||
static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
|
||||
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
|
||||
{
|
||||
return index & (q->n_window - 1);
|
||||
}
|
||||
@@ -676,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
|
||||
return txq->tfds + trans_pcie->tfd_size * idx;
|
||||
}
|
||||
|
||||
static inline const char *queue_name(struct device *dev,
|
||||
struct iwl_trans_pcie *trans_p, int i)
|
||||
{
|
||||
if (trans_p->shared_vec_mask) {
|
||||
int vec = trans_p->shared_vec_mask &
|
||||
IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
|
||||
|
||||
if (i == 0)
|
||||
return DRV_NAME ": shared IRQ";
|
||||
|
||||
return devm_kasprintf(dev, GFP_KERNEL,
|
||||
DRV_NAME ": queue %d", i + vec);
|
||||
}
|
||||
if (i == 0)
|
||||
return DRV_NAME ": default queue";
|
||||
|
||||
if (i == trans_p->alloc_vecs - 1)
|
||||
return DRV_NAME ": exception";
|
||||
|
||||
return devm_kasprintf(dev, GFP_KERNEL,
|
||||
DRV_NAME ": queue %d", i);
|
||||
}
|
||||
|
||||
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
@@ -730,9 +989,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
|
||||
|
||||
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
|
||||
{
|
||||
return q->write_ptr >= q->read_ptr ?
|
||||
(i >= q->read_ptr && i < q->write_ptr) :
|
||||
!(i < q->read_ptr && i >= q->write_ptr);
|
||||
int index = iwl_pcie_get_cmd_index(q, i);
|
||||
int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
|
||||
int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
|
||||
|
||||
return w >= r ?
|
||||
(index >= r && index < w) :
|
||||
!(index < r && index >= w);
|
||||
}
|
||||
|
||||
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
|
||||
@@ -801,7 +1064,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
|
||||
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
|
||||
bool was_in_rfkill);
|
||||
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
|
||||
int iwl_queue_space(const struct iwl_txq *q);
|
||||
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
|
||||
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
|
||||
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
|
||||
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
@@ -818,6 +1081,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
|
||||
#endif
|
||||
|
||||
/* common functions that are used by gen3 transport */
|
||||
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
|
||||
|
||||
/* transport gen 2 exported functions */
|
||||
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw, bool run_in_rfkill);
|
||||
|
@@ -18,8 +18,7 @@
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
|
||||
* this program.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution in the
|
||||
* file called LICENSE.
|
||||
@@ -37,6 +36,7 @@
|
||||
#include "iwl-io.h"
|
||||
#include "internal.h"
|
||||
#include "iwl-op-mode.h"
|
||||
#include "iwl-context-info-gen3.h"
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
@@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
|
||||
*/
|
||||
int iwl_pcie_rx_stop(struct iwl_trans *trans)
|
||||
{
|
||||
if (trans->cfg->mq_rx_supported) {
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
|
||||
/* TODO: remove this for 22560 once fw does it */
|
||||
iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
|
||||
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
|
||||
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
|
||||
} else if (trans->cfg->mq_rx_supported) {
|
||||
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
|
||||
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
|
||||
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
|
||||
@@ -209,7 +214,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
rxq->write_actual = round_down(rxq->write, 8);
|
||||
if (trans->cfg->mq_rx_supported)
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
iwl_write32(trans, HBUS_TARG_WRPTR,
|
||||
(rxq->write_actual |
|
||||
((FIRST_RX_QUEUE + rxq->id) << 16)));
|
||||
else if (trans->cfg->mq_rx_supported)
|
||||
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
|
||||
rxq->write_actual);
|
||||
else
|
||||
@@ -233,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_pcie_restock_bd(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq,
|
||||
struct iwl_rx_mem_buffer *rxb)
|
||||
{
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
|
||||
struct iwl_rx_transfer_desc *bd = rxq->bd;
|
||||
|
||||
bd[rxq->write].type_n_size =
|
||||
cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
|
||||
((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
|
||||
bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
|
||||
bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
|
||||
} else {
|
||||
__le64 *bd = rxq->bd;
|
||||
|
||||
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
|
||||
*/
|
||||
@@ -254,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
|
||||
|
||||
spin_lock(&rxq->lock);
|
||||
while (rxq->free_count) {
|
||||
__le64 *bd = (__le64 *)rxq->bd;
|
||||
|
||||
/* Get next free Rx buffer, remove from free list */
|
||||
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
|
||||
list);
|
||||
@@ -264,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
|
||||
/* 12 first bits are expected to be empty */
|
||||
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
|
||||
/* Point to Rx buffer via next RBD in circular buffer */
|
||||
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
|
||||
iwl_pcie_restock_bd(trans, rxq, rxb);
|
||||
rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
|
||||
rxq->free_count--;
|
||||
}
|
||||
@@ -391,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
|
||||
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
|
||||
* allocated buffers.
|
||||
*/
|
||||
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
|
||||
struct iwl_rxq *rxq)
|
||||
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
|
||||
struct iwl_rxq *rxq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
@@ -448,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
|
||||
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int i;
|
||||
@@ -608,14 +634,153 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
|
||||
iwl_pcie_rx_allocator(trans_pcie->trans);
|
||||
}
|
||||
|
||||
static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
|
||||
{
|
||||
struct iwl_rx_transfer_desc *rx_td;
|
||||
|
||||
if (use_rx_td)
|
||||
return sizeof(*rx_td);
|
||||
else
|
||||
return trans->cfg->mq_rx_supported ? sizeof(__le64) :
|
||||
sizeof(__le32);
|
||||
}
|
||||
|
||||
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq)
|
||||
{
|
||||
struct device *dev = trans->dev;
|
||||
bool use_rx_td = (trans->cfg->device_family >=
|
||||
IWL_DEVICE_FAMILY_22560);
|
||||
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
|
||||
|
||||
if (rxq->bd)
|
||||
dma_free_coherent(trans->dev,
|
||||
free_size * rxq->queue_size,
|
||||
rxq->bd, rxq->bd_dma);
|
||||
rxq->bd_dma = 0;
|
||||
rxq->bd = NULL;
|
||||
|
||||
if (rxq->rb_stts)
|
||||
dma_free_coherent(trans->dev,
|
||||
use_rx_td ? sizeof(__le16) :
|
||||
sizeof(struct iwl_rb_status),
|
||||
rxq->rb_stts, rxq->rb_stts_dma);
|
||||
rxq->rb_stts_dma = 0;
|
||||
rxq->rb_stts = NULL;
|
||||
|
||||
if (rxq->used_bd)
|
||||
dma_free_coherent(trans->dev,
|
||||
(use_rx_td ? sizeof(*rxq->cd) :
|
||||
sizeof(__le32)) * rxq->queue_size,
|
||||
rxq->used_bd, rxq->used_bd_dma);
|
||||
rxq->used_bd_dma = 0;
|
||||
rxq->used_bd = NULL;
|
||||
|
||||
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
|
||||
return;
|
||||
|
||||
if (rxq->tr_tail)
|
||||
dma_free_coherent(dev, sizeof(__le16),
|
||||
rxq->tr_tail, rxq->tr_tail_dma);
|
||||
rxq->tr_tail_dma = 0;
|
||||
rxq->tr_tail = NULL;
|
||||
|
||||
if (rxq->cr_tail)
|
||||
dma_free_coherent(dev, sizeof(__le16),
|
||||
rxq->cr_tail, rxq->cr_tail_dma);
|
||||
rxq->cr_tail_dma = 0;
|
||||
rxq->cr_tail = NULL;
|
||||
}
|
||||
|
||||
static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct device *dev = trans->dev;
|
||||
int i;
|
||||
int free_size;
|
||||
bool use_rx_td = (trans->cfg->device_family >=
|
||||
IWL_DEVICE_FAMILY_22560);
|
||||
|
||||
spin_lock_init(&rxq->lock);
|
||||
if (trans->cfg->mq_rx_supported)
|
||||
rxq->queue_size = MQ_RX_TABLE_SIZE;
|
||||
else
|
||||
rxq->queue_size = RX_QUEUE_SIZE;
|
||||
|
||||
free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
|
||||
|
||||
/*
|
||||
* Allocate the circular buffer of Read Buffer Descriptors
|
||||
* (RBDs)
|
||||
*/
|
||||
rxq->bd = dma_zalloc_coherent(dev,
|
||||
free_size * rxq->queue_size,
|
||||
&rxq->bd_dma, GFP_KERNEL);
|
||||
if (!rxq->bd)
|
||||
goto err;
|
||||
|
||||
if (trans->cfg->mq_rx_supported) {
|
||||
rxq->used_bd = dma_zalloc_coherent(dev,
|
||||
(use_rx_td ?
|
||||
sizeof(*rxq->cd) :
|
||||
sizeof(__le32)) *
|
||||
rxq->queue_size,
|
||||
&rxq->used_bd_dma,
|
||||
GFP_KERNEL);
|
||||
if (!rxq->used_bd)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Allocate the driver's pointer to receive buffer status */
|
||||
rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
|
||||
sizeof(__le16) :
|
||||
sizeof(struct iwl_rb_status),
|
||||
&rxq->rb_stts_dma,
|
||||
GFP_KERNEL);
|
||||
if (!rxq->rb_stts)
|
||||
goto err;
|
||||
|
||||
if (!use_rx_td)
|
||||
return 0;
|
||||
|
||||
/* Allocate the driver's pointer to TR tail */
|
||||
rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
|
||||
&rxq->tr_tail_dma,
|
||||
GFP_KERNEL);
|
||||
if (!rxq->tr_tail)
|
||||
goto err;
|
||||
|
||||
/* Allocate the driver's pointer to CR tail */
|
||||
rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
|
||||
&rxq->cr_tail_dma,
|
||||
GFP_KERNEL);
|
||||
if (!rxq->cr_tail)
|
||||
goto err;
|
||||
/*
|
||||
* W/A 22560 device step Z0 must be non zero bug
|
||||
* TODO: remove this when stop supporting Z0
|
||||
*/
|
||||
*rxq->cr_tail = cpu_to_le16(500);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < trans->num_rx_queues; i++) {
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
|
||||
|
||||
iwl_pcie_free_rxq_dma(trans, rxq);
|
||||
}
|
||||
kfree(trans_pcie->rxq);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
struct device *dev = trans->dev;
|
||||
int i;
|
||||
int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
|
||||
sizeof(__le32);
|
||||
int i, ret;
|
||||
|
||||
if (WARN_ON(trans_pcie->rxq))
|
||||
return -EINVAL;
|
||||
@@ -630,65 +795,11 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
|
||||
for (i = 0; i < trans->num_rx_queues; i++) {
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
|
||||
|
||||
spin_lock_init(&rxq->lock);
|
||||
if (trans->cfg->mq_rx_supported)
|
||||
rxq->queue_size = MQ_RX_TABLE_SIZE;
|
||||
else
|
||||
rxq->queue_size = RX_QUEUE_SIZE;
|
||||
|
||||
/*
|
||||
* Allocate the circular buffer of Read Buffer Descriptors
|
||||
* (RBDs)
|
||||
*/
|
||||
rxq->bd = dma_zalloc_coherent(dev,
|
||||
free_size * rxq->queue_size,
|
||||
&rxq->bd_dma, GFP_KERNEL);
|
||||
if (!rxq->bd)
|
||||
goto err;
|
||||
|
||||
if (trans->cfg->mq_rx_supported) {
|
||||
rxq->used_bd = dma_zalloc_coherent(dev,
|
||||
sizeof(__le32) *
|
||||
rxq->queue_size,
|
||||
&rxq->used_bd_dma,
|
||||
GFP_KERNEL);
|
||||
if (!rxq->used_bd)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*Allocate the driver's pointer to receive buffer status */
|
||||
rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
|
||||
&rxq->rb_stts_dma,
|
||||
GFP_KERNEL);
|
||||
if (!rxq->rb_stts)
|
||||
goto err;
|
||||
ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < trans->num_rx_queues; i++) {
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
|
||||
|
||||
if (rxq->bd)
|
||||
dma_free_coherent(dev, free_size * rxq->queue_size,
|
||||
rxq->bd, rxq->bd_dma);
|
||||
rxq->bd_dma = 0;
|
||||
rxq->bd = NULL;
|
||||
|
||||
if (rxq->rb_stts)
|
||||
dma_free_coherent(trans->dev,
|
||||
sizeof(struct iwl_rb_status),
|
||||
rxq->rb_stts, rxq->rb_stts_dma);
|
||||
|
||||
if (rxq->used_bd)
|
||||
dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
|
||||
rxq->used_bd, rxq->used_bd_dma);
|
||||
rxq->used_bd_dma = 0;
|
||||
rxq->used_bd = NULL;
|
||||
}
|
||||
kfree(trans_pcie->rxq);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
|
||||
@@ -792,6 +903,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
|
||||
int i;
|
||||
|
||||
switch (trans_pcie->rx_buf_size) {
|
||||
case IWL_AMSDU_2K:
|
||||
rb_size = RFH_RXF_DMA_RB_SIZE_2K;
|
||||
break;
|
||||
case IWL_AMSDU_4K:
|
||||
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
|
||||
break;
|
||||
@@ -872,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
|
||||
iwl_pcie_enable_rx_wake(trans, true);
|
||||
}
|
||||
|
||||
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
|
||||
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
|
||||
{
|
||||
lockdep_assert_held(&rxq->lock);
|
||||
|
||||
@@ -882,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
|
||||
rxq->used_count = 0;
|
||||
}
|
||||
|
||||
static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
|
||||
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
@@ -931,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
rxq->read = 0;
|
||||
rxq->write = 0;
|
||||
rxq->write_actual = 0;
|
||||
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
|
||||
memset(rxq->rb_stts, 0,
|
||||
(trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
|
||||
sizeof(__le16) : sizeof(struct iwl_rb_status));
|
||||
|
||||
iwl_pcie_rx_init_rxb_lists(rxq);
|
||||
|
||||
@@ -1002,8 +1118,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rb_allocator *rba = &trans_pcie->rba;
|
||||
int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
|
||||
sizeof(__le32);
|
||||
int i;
|
||||
|
||||
/*
|
||||
@@ -1022,27 +1136,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
|
||||
for (i = 0; i < trans->num_rx_queues; i++) {
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
|
||||
|
||||
if (rxq->bd)
|
||||
dma_free_coherent(trans->dev,
|
||||
free_size * rxq->queue_size,
|
||||
rxq->bd, rxq->bd_dma);
|
||||
rxq->bd_dma = 0;
|
||||
rxq->bd = NULL;
|
||||
|
||||
if (rxq->rb_stts)
|
||||
dma_free_coherent(trans->dev,
|
||||
sizeof(struct iwl_rb_status),
|
||||
rxq->rb_stts, rxq->rb_stts_dma);
|
||||
else
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"Free rxq->rb_stts which is NULL\n");
|
||||
|
||||
if (rxq->used_bd)
|
||||
dma_free_coherent(trans->dev,
|
||||
sizeof(__le32) * rxq->queue_size,
|
||||
rxq->used_bd, rxq->used_bd_dma);
|
||||
rxq->used_bd_dma = 0;
|
||||
rxq->used_bd = NULL;
|
||||
iwl_pcie_free_rxq_dma(trans, rxq);
|
||||
|
||||
if (rxq->napi.poll)
|
||||
netif_napi_del(&rxq->napi);
|
||||
@@ -1202,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
page_stolen |= rxcb._page_stolen;
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
break;
|
||||
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
|
||||
}
|
||||
|
||||
@@ -1236,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
|
||||
}
|
||||
|
||||
static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq, int i)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
u16 vid;
|
||||
|
||||
if (!trans->cfg->mq_rx_supported) {
|
||||
rxb = rxq->queue[i];
|
||||
rxq->queue[i] = NULL;
|
||||
return rxb;
|
||||
}
|
||||
|
||||
/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
|
||||
else
|
||||
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
|
||||
|
||||
if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
|
||||
goto out_err;
|
||||
|
||||
rxb = trans_pcie->global_table[vid - 1];
|
||||
if (rxb->invalid)
|
||||
goto out_err;
|
||||
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
|
||||
|
||||
rxb->invalid = true;
|
||||
|
||||
return rxb;
|
||||
|
||||
out_err:
|
||||
WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
|
||||
iwl_force_nmi(trans);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
|
||||
*/
|
||||
@@ -1250,7 +1385,7 @@ restart:
|
||||
spin_lock(&rxq->lock);
|
||||
/* uCode's read index (stored in shared DRAM) indicates the last Rx
|
||||
* buffer that the driver may process (last buffer filled by ucode). */
|
||||
r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
|
||||
r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
|
||||
i = rxq->read;
|
||||
|
||||
/* W/A 9000 device step A0 wrap-around bug */
|
||||
@@ -1266,30 +1401,9 @@ restart:
|
||||
if (unlikely(rxq->used_count == rxq->queue_size / 2))
|
||||
emergency = true;
|
||||
|
||||
if (trans->cfg->mq_rx_supported) {
|
||||
/*
|
||||
* used_bd is a 32 bit but only 12 are used to retrieve
|
||||
* the vid
|
||||
*/
|
||||
u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
|
||||
|
||||
if (WARN(!vid ||
|
||||
vid > ARRAY_SIZE(trans_pcie->global_table),
|
||||
"Invalid rxb index from HW %u\n", (u32)vid)) {
|
||||
iwl_force_nmi(trans);
|
||||
goto out;
|
||||
}
|
||||
rxb = trans_pcie->global_table[vid - 1];
|
||||
if (WARN(rxb->invalid,
|
||||
"Invalid rxb from HW %u\n", (u32)vid)) {
|
||||
iwl_force_nmi(trans);
|
||||
goto out;
|
||||
}
|
||||
rxb->invalid = true;
|
||||
} else {
|
||||
rxb = rxq->queue[i];
|
||||
rxq->queue[i] = NULL;
|
||||
}
|
||||
rxb = iwl_pcie_get_rxb(trans, rxq, i);
|
||||
if (!rxb)
|
||||
goto out;
|
||||
|
||||
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
|
||||
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
|
||||
@@ -1331,6 +1445,9 @@ restart:
|
||||
out:
|
||||
/* Backtrack one entry */
|
||||
rxq->read = i;
|
||||
/* update cr tail with the rxq read pointer */
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
*rxq->cr_tail = cpu_to_le16(r);
|
||||
spin_unlock(&rxq->lock);
|
||||
|
||||
/*
|
||||
@@ -1362,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
|
||||
return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
|
||||
}
|
||||
|
||||
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
|
||||
struct msix_entry *entry)
|
||||
{
|
||||
/*
|
||||
* Before sending the interrupt the HW disables it to prevent
|
||||
* a nested interrupt. This is done by writing 1 to the corresponding
|
||||
* bit in the mask register. After handling the interrupt, it should be
|
||||
* re-enabled by clearing this bit. This register is defined as
|
||||
* write 1 clear (W1C) register, meaning that it's being clear
|
||||
* by writing 1 to the bit.
|
||||
*/
|
||||
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
|
||||
}
|
||||
|
||||
/*
|
||||
* iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
|
||||
* This interrupt handler should be used with RSS queue only.
|
||||
@@ -1970,7 +2073,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
|
||||
/* Error detected by uCode */
|
||||
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
|
||||
(inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
|
||||
(inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
|
||||
(inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
|
||||
IWL_ERR(trans,
|
||||
"Microcode SW error detected. Restarting 0x%X.\n",
|
||||
inta_fh);
|
||||
@@ -1995,8 +2099,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
/* uCode wakes up after power-down sleep */
|
||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
|
||||
inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
|
||||
/* Reflect IML transfer status */
|
||||
int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
|
||||
|
||||
IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
|
||||
if (res == IWL_IMAGE_RESP_FAIL) {
|
||||
isr_stats->sw++;
|
||||
iwl_pcie_irq_handle_error(trans);
|
||||
}
|
||||
} else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
|
||||
/* uCode wakes up after power-down sleep */
|
||||
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
|
||||
iwl_pcie_rxq_check_wrptr(trans);
|
||||
iwl_pcie_txq_check_wrptrs(trans);
|
||||
|
@@ -53,6 +53,7 @@
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-context-info.h"
|
||||
#include "iwl-context-info-gen3.h"
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
@@ -188,7 +189,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
|
||||
}
|
||||
|
||||
iwl_pcie_ctxt_info_free_paging(trans);
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
|
||||
iwl_pcie_ctxt_info_gen3_free(trans);
|
||||
else
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
|
||||
/* Make sure (redundant) we've released our request to stay awake */
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
@@ -346,7 +350,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = iwl_pcie_ctxt_info_init(trans, fw);
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
|
||||
ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
|
||||
else
|
||||
ret = iwl_pcie_ctxt_info_init(trans, fw);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@@ -84,6 +84,7 @@
|
||||
#include "iwl-scd.h"
|
||||
#include "iwl-agn-hw.h"
|
||||
#include "fw/error-dump.h"
|
||||
#include "fw/dbg.h"
|
||||
#include "internal.h"
|
||||
#include "iwl-fh.h"
|
||||
|
||||
@@ -203,7 +204,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
|
||||
trans_pcie->fw_mon_size = 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
|
||||
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct page *page = NULL;
|
||||
@@ -1132,21 +1133,44 @@ static struct iwl_causes_list causes_list[] = {
|
||||
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
|
||||
};
|
||||
|
||||
static struct iwl_causes_list causes_list_v2[] = {
|
||||
{MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
|
||||
{MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
|
||||
{MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
|
||||
{MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
|
||||
{MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
|
||||
{MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
|
||||
{MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
|
||||
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
|
||||
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
|
||||
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
|
||||
{MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
|
||||
{MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
|
||||
{MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
|
||||
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
|
||||
};
|
||||
|
||||
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
|
||||
int i;
|
||||
int i, arr_size =
|
||||
(trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
|
||||
ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
|
||||
|
||||
/*
|
||||
* Access all non RX causes and map them to the default irq.
|
||||
* In case we are missing at least one interrupt vector,
|
||||
* the first interrupt vector will serve non-RX and FBQ causes.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
|
||||
iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
|
||||
iwl_clear_bit(trans, causes_list[i].mask_reg,
|
||||
causes_list[i].cause_num);
|
||||
for (i = 0; i < arr_size; i++) {
|
||||
struct iwl_causes_list *causes =
|
||||
(trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
|
||||
causes_list : causes_list_v2;
|
||||
|
||||
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
|
||||
iwl_clear_bit(trans, causes[i].mask_reg,
|
||||
causes[i].cause_num);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1539,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
|
||||
|
||||
iwl_pcie_enable_rx_wake(trans, true);
|
||||
|
||||
/*
|
||||
* Reconfigure IVAR table in case of MSIX or reset ict table in
|
||||
* MSI mode since HW reset erased it.
|
||||
* Also enables interrupts - none will happen as
|
||||
* the device doesn't know we're waking it up, only when
|
||||
* the opmode actually tells it after this call.
|
||||
*/
|
||||
iwl_pcie_conf_msix_hw(trans_pcie);
|
||||
if (!trans_pcie->msix_enabled)
|
||||
iwl_pcie_reset_ict(trans);
|
||||
iwl_enable_interrupts(trans);
|
||||
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
BIT(trans->cfg->csr->flag_mac_access_req));
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
@@ -1568,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reconfigure IVAR table in case of MSIX or reset ict table in
|
||||
* MSI mode since HW reset erased it.
|
||||
* Also enables interrupts - none will happen as
|
||||
* the device doesn't know we're waking it up, only when
|
||||
* the opmode actually tells it after this call.
|
||||
*/
|
||||
iwl_pcie_conf_msix_hw(trans_pcie);
|
||||
if (!trans_pcie->msix_enabled)
|
||||
iwl_pcie_reset_ict(trans);
|
||||
iwl_enable_interrupts(trans);
|
||||
|
||||
iwl_pcie_set_pwr(trans, false);
|
||||
|
||||
if (!reset) {
|
||||
@@ -1685,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
|
||||
}
|
||||
}
|
||||
|
||||
static const char *queue_name(struct device *dev,
|
||||
struct iwl_trans_pcie *trans_p, int i)
|
||||
{
|
||||
if (trans_p->shared_vec_mask) {
|
||||
int vec = trans_p->shared_vec_mask &
|
||||
IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
|
||||
|
||||
if (i == 0)
|
||||
return DRV_NAME ": shared IRQ";
|
||||
|
||||
return devm_kasprintf(dev, GFP_KERNEL,
|
||||
DRV_NAME ": queue %d", i + vec);
|
||||
}
|
||||
if (i == 0)
|
||||
return DRV_NAME ": default queue";
|
||||
|
||||
if (i == trans_p->alloc_vecs - 1)
|
||||
return DRV_NAME ": exception";
|
||||
|
||||
return devm_kasprintf(dev, GFP_KERNEL,
|
||||
DRV_NAME ": queue %d", i);
|
||||
}
|
||||
|
||||
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
|
||||
struct iwl_trans_pcie *trans_pcie)
|
||||
{
|
||||
@@ -2236,12 +2237,28 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
jiffies_to_msecs(txq->wd_timeout),
|
||||
txq->read_ptr, txq->write_ptr,
|
||||
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
|
||||
(TFD_QUEUE_SIZE_MAX - 1),
|
||||
(trans->cfg->base_params->max_tfd_queue_size - 1),
|
||||
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
|
||||
(TFD_QUEUE_SIZE_MAX - 1),
|
||||
(trans->cfg->base_params->max_tfd_queue_size - 1),
|
||||
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
|
||||
struct iwl_trans_rxq_dma_data *data)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
|
||||
return -EINVAL;
|
||||
|
||||
data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
|
||||
data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
|
||||
data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
|
||||
data->fr_bd_wid = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
@@ -2522,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
|
||||
rxq->free_count);
|
||||
if (rxq->rb_stts) {
|
||||
u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
|
||||
rxq));
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"\tclosed_rb_num: %u\n",
|
||||
le16_to_cpu(rxq->rb_stts->closed_rb_num) &
|
||||
0x0FFF);
|
||||
r & 0x0FFF);
|
||||
} else {
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"\tclosed_rb_num: Not Allocated\n");
|
||||
@@ -2731,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
|
||||
|
||||
spin_lock(&rxq->lock);
|
||||
|
||||
r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
|
||||
r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
|
||||
|
||||
for (i = rxq->read, j = 0;
|
||||
i != r && j < allocated_rb_nums;
|
||||
@@ -2934,11 +2952,12 @@ static struct iwl_trans_dump_data
|
||||
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_fw_error_dump_txcmd *txcmd;
|
||||
struct iwl_trans_dump_data *dump_data;
|
||||
u32 len, num_rbs;
|
||||
u32 len, num_rbs = 0;
|
||||
u32 monitor_len;
|
||||
int i, ptr;
|
||||
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
|
||||
!trans->cfg->mq_rx_supported;
|
||||
!trans->cfg->mq_rx_supported &&
|
||||
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
|
||||
|
||||
/* transport dump header */
|
||||
len = sizeof(*dump_data);
|
||||
@@ -2990,6 +3009,10 @@ static struct iwl_trans_dump_data
|
||||
}
|
||||
|
||||
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
|
||||
if (!(trans->dbg_dump_mask &
|
||||
BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
|
||||
return NULL;
|
||||
|
||||
dump_data = vzalloc(len);
|
||||
if (!dump_data)
|
||||
return NULL;
|
||||
@@ -3002,22 +3025,28 @@ static struct iwl_trans_dump_data
|
||||
}
|
||||
|
||||
/* CSR registers */
|
||||
len += sizeof(*data) + IWL_CSR_TO_DUMP;
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
|
||||
len += sizeof(*data) + IWL_CSR_TO_DUMP;
|
||||
|
||||
/* FH registers */
|
||||
if (trans->cfg->gen2)
|
||||
len += sizeof(*data) +
|
||||
(FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2);
|
||||
else
|
||||
len += sizeof(*data) +
|
||||
(FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
|
||||
if (trans->cfg->gen2)
|
||||
len += sizeof(*data) +
|
||||
(FH_MEM_UPPER_BOUND_GEN2 -
|
||||
FH_MEM_LOWER_BOUND_GEN2);
|
||||
else
|
||||
len += sizeof(*data) +
|
||||
(FH_MEM_UPPER_BOUND -
|
||||
FH_MEM_LOWER_BOUND);
|
||||
}
|
||||
|
||||
if (dump_rbs) {
|
||||
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
|
||||
/* RBs */
|
||||
num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
|
||||
& 0x0FFF;
|
||||
num_rbs =
|
||||
le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
|
||||
& 0x0FFF;
|
||||
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
|
||||
len += num_rbs * (sizeof(*data) +
|
||||
sizeof(struct iwl_fw_error_dump_rb) +
|
||||
@@ -3025,7 +3054,8 @@ static struct iwl_trans_dump_data
|
||||
}
|
||||
|
||||
/* Paged memory for gen2 HW */
|
||||
if (trans->cfg->gen2)
|
||||
if (trans->cfg->gen2 &&
|
||||
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
|
||||
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
|
||||
len += sizeof(*data) +
|
||||
sizeof(struct iwl_fw_error_dump_paging) +
|
||||
@@ -3037,41 +3067,51 @@ static struct iwl_trans_dump_data
|
||||
|
||||
len = 0;
|
||||
data = (void *)dump_data->data;
|
||||
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
|
||||
txcmd = (void *)data->data;
|
||||
spin_lock_bh(&cmdq->lock);
|
||||
ptr = cmdq->write_ptr;
|
||||
for (i = 0; i < cmdq->n_window; i++) {
|
||||
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
|
||||
u32 caplen, cmdlen;
|
||||
|
||||
cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
|
||||
trans_pcie->tfd_size * ptr);
|
||||
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
|
||||
u16 tfd_size = trans_pcie->tfd_size;
|
||||
|
||||
if (cmdlen) {
|
||||
len += sizeof(*txcmd) + caplen;
|
||||
txcmd->cmdlen = cpu_to_le32(cmdlen);
|
||||
txcmd->caplen = cpu_to_le32(caplen);
|
||||
memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
|
||||
txcmd = (void *)((u8 *)txcmd->data + caplen);
|
||||
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
|
||||
txcmd = (void *)data->data;
|
||||
spin_lock_bh(&cmdq->lock);
|
||||
ptr = cmdq->write_ptr;
|
||||
for (i = 0; i < cmdq->n_window; i++) {
|
||||
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
|
||||
u32 caplen, cmdlen;
|
||||
|
||||
cmdlen = iwl_trans_pcie_get_cmdlen(trans,
|
||||
cmdq->tfds +
|
||||
tfd_size * ptr);
|
||||
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
|
||||
|
||||
if (cmdlen) {
|
||||
len += sizeof(*txcmd) + caplen;
|
||||
txcmd->cmdlen = cpu_to_le32(cmdlen);
|
||||
txcmd->caplen = cpu_to_le32(caplen);
|
||||
memcpy(txcmd->data, cmdq->entries[idx].cmd,
|
||||
caplen);
|
||||
txcmd = (void *)((u8 *)txcmd->data + caplen);
|
||||
}
|
||||
|
||||
ptr = iwl_queue_dec_wrap(trans, ptr);
|
||||
}
|
||||
spin_unlock_bh(&cmdq->lock);
|
||||
|
||||
ptr = iwl_queue_dec_wrap(ptr);
|
||||
data->len = cpu_to_le32(len);
|
||||
len += sizeof(*data);
|
||||
data = iwl_fw_error_next_data(data);
|
||||
}
|
||||
spin_unlock_bh(&cmdq->lock);
|
||||
|
||||
data->len = cpu_to_le32(len);
|
||||
len += sizeof(*data);
|
||||
data = iwl_fw_error_next_data(data);
|
||||
|
||||
len += iwl_trans_pcie_dump_csr(trans, &data);
|
||||
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
|
||||
len += iwl_trans_pcie_dump_csr(trans, &data);
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
|
||||
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
|
||||
if (dump_rbs)
|
||||
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
|
||||
|
||||
/* Paged memory for gen2 HW */
|
||||
if (trans->cfg->gen2) {
|
||||
if (trans->cfg->gen2 &&
|
||||
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
|
||||
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
|
||||
struct iwl_fw_error_dump_paging *paging;
|
||||
dma_addr_t addr =
|
||||
@@ -3091,8 +3131,8 @@ static struct iwl_trans_dump_data
|
||||
len += sizeof(*data) + sizeof(*paging) + page_len;
|
||||
}
|
||||
}
|
||||
|
||||
len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
|
||||
len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
|
||||
|
||||
dump_data->len = len;
|
||||
|
||||
@@ -3187,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
|
||||
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
|
||||
.txq_free = iwl_trans_pcie_dyn_txq_free,
|
||||
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
|
||||
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
|
||||
};
|
||||
|
||||
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
@@ -3349,14 +3390,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
|
||||
#if IS_ENABLED(CONFIG_IWLMVM)
|
||||
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
|
||||
if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) {
|
||||
|
||||
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
|
||||
u32 hw_status;
|
||||
|
||||
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
|
||||
if (hw_status & UMAG_GEN_HW_IS_FPGA)
|
||||
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
|
||||
else
|
||||
if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
|
||||
/*
|
||||
* b step fw is the same for physical card and fpga
|
||||
*/
|
||||
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
|
||||
else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
|
||||
CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
|
||||
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
|
||||
} else {
|
||||
/*
|
||||
* a step no FPGA
|
||||
*/
|
||||
trans->cfg = &iwl22000_2ac_cfg_hr;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -6,6 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -19,6 +20,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -50,6 +52,7 @@
|
||||
*****************************************************************************/
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <net/tso.h>
|
||||
#include <linux/tcp.h>
|
||||
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-csr.h"
|
||||
@@ -84,16 +87,20 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
|
||||
/*
|
||||
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
|
||||
*/
|
||||
static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
|
||||
static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
|
||||
struct iwl_txq *txq, u16 byte_cnt,
|
||||
int num_tbs)
|
||||
{
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
|
||||
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
|
||||
struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
|
||||
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
||||
u8 filled_tfd_size, num_fetch_chunks;
|
||||
u16 len = byte_cnt;
|
||||
__le16 bc_ent;
|
||||
|
||||
len = DIV_ROUND_UP(len, 4);
|
||||
if (trans_pcie->bc_table_dword)
|
||||
len = DIV_ROUND_UP(len, 4);
|
||||
|
||||
if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
|
||||
return;
|
||||
@@ -111,7 +118,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
|
||||
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
|
||||
|
||||
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
|
||||
scd_bc_tbl->tfd_offset[idx] = bc_ent;
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
|
||||
else
|
||||
scd_bc_tbl->tfd_offset[idx] = bc_ent;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -355,52 +365,89 @@ out_err:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static
|
||||
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta)
|
||||
static struct
|
||||
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
int hdr_len,
|
||||
int tx_cmd_len)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
||||
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
|
||||
dma_addr_t tb_phys;
|
||||
bool amsdu;
|
||||
int i, len, tb1_len, tb2_len, hdr_len;
|
||||
int len;
|
||||
void *tb1_addr;
|
||||
|
||||
memset(tfd, 0, sizeof(*tfd));
|
||||
|
||||
amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
|
||||
(*ieee80211_get_qos_ctl(hdr) &
|
||||
IEEE80211_QOS_CTL_A_MSDU_PRESENT);
|
||||
|
||||
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
|
||||
/* The first TB points to bi-directional DMA data */
|
||||
if (!amsdu)
|
||||
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE);
|
||||
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
|
||||
|
||||
/* there must be data left over for TB1 or this code must be changed */
|
||||
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
|
||||
|
||||
/*
|
||||
* The second TB (tb1) points to the remainder of the TX command
|
||||
* and the 802.11 header - dword aligned size
|
||||
* (This calculation modifies the TX command, so do it before the
|
||||
* setup of the first TB)
|
||||
*/
|
||||
len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
|
||||
ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
|
||||
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
|
||||
IWL_FIRST_TB_SIZE;
|
||||
|
||||
/* do not align A-MSDU to dword as the subframe header aligns it */
|
||||
if (amsdu)
|
||||
tb1_len = len;
|
||||
else
|
||||
tb1_len = ALIGN(len, 4);
|
||||
|
||||
/* map the data for TB1 */
|
||||
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
|
||||
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
goto out_err;
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
|
||||
|
||||
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
|
||||
len + IWL_FIRST_TB_SIZE,
|
||||
hdr_len, dev_cmd))
|
||||
goto out_err;
|
||||
|
||||
/* building the A-MSDU might have changed this data, memcpy it now */
|
||||
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
|
||||
return tfd;
|
||||
|
||||
out_err:
|
||||
iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct
|
||||
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
int hdr_len,
|
||||
int tx_cmd_len)
|
||||
{
|
||||
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
||||
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
|
||||
dma_addr_t tb_phys;
|
||||
int i, len, tb1_len, tb2_len;
|
||||
void *tb1_addr;
|
||||
|
||||
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
|
||||
|
||||
/* The first TB points to bi-directional DMA data */
|
||||
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
|
||||
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
|
||||
|
||||
/*
|
||||
* The second TB (tb1) points to the remainder of the TX command
|
||||
* and the 802.11 header - dword aligned size
|
||||
* (This calculation modifies the TX command, so do it before the
|
||||
* setup of the first TB)
|
||||
*/
|
||||
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
|
||||
IWL_FIRST_TB_SIZE;
|
||||
|
||||
tb1_len = ALIGN(len, 4);
|
||||
|
||||
/* map the data for TB1 */
|
||||
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
|
||||
@@ -409,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
goto out_err;
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
|
||||
|
||||
hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
||||
|
||||
if (amsdu) {
|
||||
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
|
||||
tb1_len + IWL_FIRST_TB_SIZE,
|
||||
hdr_len, dev_cmd))
|
||||
goto out_err;
|
||||
|
||||
/*
|
||||
* building the A-MSDU might have changed this data, so memcpy
|
||||
* it now
|
||||
*/
|
||||
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE);
|
||||
return tfd;
|
||||
}
|
||||
|
||||
/* set up TFD's third entry to point to remainder of skb's head */
|
||||
tb2_len = skb_headlen(skb) - hdr_len;
|
||||
|
||||
@@ -467,13 +497,50 @@ out_err:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static
|
||||
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
||||
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
|
||||
int len, hdr_len;
|
||||
bool amsdu;
|
||||
|
||||
/* There must be data left over for TB1 or this code must be changed */
|
||||
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
|
||||
|
||||
memset(tfd, 0, sizeof(*tfd));
|
||||
|
||||
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
|
||||
len = sizeof(struct iwl_tx_cmd_gen2);
|
||||
else
|
||||
len = sizeof(struct iwl_tx_cmd_gen3);
|
||||
|
||||
amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
|
||||
(*ieee80211_get_qos_ctl(hdr) &
|
||||
IEEE80211_QOS_CTL_A_MSDU_PRESENT);
|
||||
|
||||
hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
||||
|
||||
if (amsdu)
|
||||
return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
|
||||
out_meta, hdr_len, len);
|
||||
|
||||
return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
|
||||
hdr_len, len);
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
u16 cmd_len;
|
||||
int idx;
|
||||
void *tfd;
|
||||
|
||||
@@ -488,11 +555,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
|
||||
spin_lock(&txq->lock);
|
||||
|
||||
if (iwl_queue_space(txq) < txq->high_mark) {
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
|
||||
struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
|
||||
(void *)dev_cmd->payload;
|
||||
|
||||
cmd_len = le16_to_cpu(tx_cmd_gen3->len);
|
||||
} else {
|
||||
struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
|
||||
(void *)dev_cmd->payload;
|
||||
|
||||
cmd_len = le16_to_cpu(tx_cmd_gen2->len);
|
||||
}
|
||||
|
||||
if (iwl_queue_space(trans, txq) < txq->high_mark) {
|
||||
iwl_stop_queue(trans, txq);
|
||||
|
||||
/* don't put the packet on the ring, if there is no room */
|
||||
if (unlikely(iwl_queue_space(txq) < 3)) {
|
||||
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
|
||||
struct iwl_device_cmd **dev_cmd_ptr;
|
||||
|
||||
dev_cmd_ptr = (void *)((u8 *)skb->cb +
|
||||
@@ -526,7 +605,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
|
||||
iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
|
||||
iwl_pcie_gen2_get_num_tbs(trans, tfd));
|
||||
|
||||
/* start timer if queue currently empty */
|
||||
@@ -538,7 +617,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* Tell device the write index *just past* this latest filled TFD */
|
||||
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
|
||||
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
|
||||
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
|
||||
/*
|
||||
* At this point the frame is "transmitted" successfully
|
||||
@@ -650,7 +729,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
||||
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
|
||||
memset(tfd, 0, sizeof(*tfd));
|
||||
|
||||
if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||
if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||
spin_unlock_bh(&txq->lock);
|
||||
|
||||
IWL_ERR(trans, "No space in command queue\n");
|
||||
@@ -787,7 +866,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
||||
iwl_trans_ref(trans);
|
||||
}
|
||||
/* Increment and update queue's write index */
|
||||
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
|
||||
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
|
||||
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
|
||||
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
|
||||
|
||||
@@ -954,7 +1033,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
iwl_pcie_free_tso_page(trans_pcie, skb);
|
||||
}
|
||||
iwl_pcie_gen2_free_tfd(trans, txq);
|
||||
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
|
||||
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
|
||||
|
||||
if (txq->read_ptr == txq->write_ptr) {
|
||||
unsigned long flags;
|
||||
@@ -1062,6 +1141,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
||||
if (!txq)
|
||||
return -ENOMEM;
|
||||
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
|
||||
(trans->cfg->device_family >=
|
||||
IWL_DEVICE_FAMILY_22560) ?
|
||||
sizeof(struct iwl_gen3_bc_tbl) :
|
||||
sizeof(struct iwlagn_scd_bc_tbl));
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
|
||||
@@ -1113,7 +1195,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
||||
|
||||
txq->id = qid;
|
||||
trans_pcie->txq[qid] = txq;
|
||||
wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
|
||||
wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
|
||||
|
||||
/* Place first TFD at index corresponding to start sequence number */
|
||||
txq->read_ptr = wr_ptr;
|
||||
|
@@ -71,27 +71,28 @@
|
||||
*
|
||||
***************************************************/
|
||||
|
||||
int iwl_queue_space(const struct iwl_txq *q)
|
||||
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
|
||||
{
|
||||
unsigned int max;
|
||||
unsigned int used;
|
||||
|
||||
/*
|
||||
* To avoid ambiguity between empty and completely full queues, there
|
||||
* should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
|
||||
* If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
|
||||
* should always be less than max_tfd_queue_size elements in the queue.
|
||||
* If q->n_window is smaller than max_tfd_queue_size, there is no need
|
||||
* to reserve any queue entries for this purpose.
|
||||
*/
|
||||
if (q->n_window < TFD_QUEUE_SIZE_MAX)
|
||||
if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
|
||||
max = q->n_window;
|
||||
else
|
||||
max = TFD_QUEUE_SIZE_MAX - 1;
|
||||
max = trans->cfg->base_params->max_tfd_queue_size - 1;
|
||||
|
||||
/*
|
||||
* TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
|
||||
* modulo by TFD_QUEUE_SIZE_MAX and is well defined.
|
||||
* max_tfd_queue_size is a power of 2, so the following is equivalent to
|
||||
* modulo by max_tfd_queue_size and is well defined.
|
||||
*/
|
||||
used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
|
||||
used = (q->write_ptr - q->read_ptr) &
|
||||
(trans->cfg->base_params->max_tfd_queue_size - 1);
|
||||
|
||||
if (WARN_ON(used > max))
|
||||
return 0;
|
||||
@@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, bool cmd_queue)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
|
||||
size_t tfd_sz = trans_pcie->tfd_size *
|
||||
trans->cfg->base_params->max_tfd_queue_size;
|
||||
size_t tb0_buf_sz;
|
||||
int i;
|
||||
|
||||
@@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, bool cmd_queue)
|
||||
{
|
||||
int ret;
|
||||
u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
|
||||
|
||||
txq->need_update = false;
|
||||
|
||||
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
|
||||
/* max_tfd_queue_size must be power-of-two size, otherwise
|
||||
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
|
||||
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
|
||||
if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
|
||||
"Max tfd queue size must be a power of two, but is %d",
|
||||
tfd_queue_max_size))
|
||||
return -EINVAL;
|
||||
|
||||
/* Initialize queue's high/low-water marks, and head/tail indexes */
|
||||
ret = iwl_queue_init(txq, slots_num);
|
||||
@@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
iwl_pcie_free_tso_page(trans_pcie, skb);
|
||||
}
|
||||
iwl_pcie_txq_free_tfd(trans, txq);
|
||||
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
|
||||
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
|
||||
|
||||
if (txq->read_ptr == txq->write_ptr) {
|
||||
unsigned long flags;
|
||||
@@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
/* De-alloc circular buffer of TFDs */
|
||||
if (txq->tfds) {
|
||||
dma_free_coherent(dev,
|
||||
trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
|
||||
trans_pcie->tfd_size *
|
||||
trans->cfg->base_params->max_tfd_queue_size,
|
||||
txq->tfds, txq->dma_addr);
|
||||
txq->dma_addr = 0;
|
||||
txq->tfds = NULL;
|
||||
@@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
int ret;
|
||||
int txq_id, slots_num;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
|
||||
|
||||
u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
|
||||
sizeof(struct iwlagn_scd_bc_tbl);
|
||||
bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
|
||||
sizeof(struct iwl_gen3_bc_tbl) :
|
||||
sizeof(struct iwlagn_scd_bc_tbl);
|
||||
|
||||
/*It is not allowed to alloc twice, so warn when this happens.
|
||||
* We cannot rely on the previous allocation, so free and fail */
|
||||
@@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
|
||||
scd_bc_tbls_size);
|
||||
bc_tbls_size);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
|
||||
goto error;
|
||||
@@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
|
||||
int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
|
||||
int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
|
||||
int last_to_free;
|
||||
|
||||
/* This function is not meant to release cmd queue*/
|
||||
@@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (txq->read_ptr == tfd_num)
|
||||
if (read_ptr == tfd_num)
|
||||
goto out;
|
||||
|
||||
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
|
||||
@@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
/*Since we free until index _not_ inclusive, the one before index is
|
||||
* the last we will free. This one must be used */
|
||||
last_to_free = iwl_queue_dec_wrap(tfd_num);
|
||||
last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
|
||||
|
||||
if (!iwl_queue_used(txq, last_to_free)) {
|
||||
IWL_ERR(trans,
|
||||
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
|
||||
__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
|
||||
__func__, txq_id, last_to_free,
|
||||
trans->cfg->base_params->max_tfd_queue_size,
|
||||
txq->write_ptr, txq->read_ptr);
|
||||
goto out;
|
||||
}
|
||||
@@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
goto out;
|
||||
|
||||
for (;
|
||||
txq->read_ptr != tfd_num;
|
||||
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
|
||||
int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
|
||||
struct sk_buff *skb = txq->entries[idx].skb;
|
||||
read_ptr != tfd_num;
|
||||
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
|
||||
read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
|
||||
struct sk_buff *skb = txq->entries[read_ptr].skb;
|
||||
|
||||
if (WARN_ON_ONCE(!skb))
|
||||
continue;
|
||||
@@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
__skb_queue_tail(skbs, skb);
|
||||
|
||||
txq->entries[idx].skb = NULL;
|
||||
txq->entries[read_ptr].skb = NULL;
|
||||
|
||||
if (!trans->cfg->use_tfh)
|
||||
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
|
||||
@@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
iwl_pcie_txq_progress(txq);
|
||||
|
||||
if (iwl_queue_space(txq) > txq->low_mark &&
|
||||
if (iwl_queue_space(trans, txq) > txq->low_mark &&
|
||||
test_bit(txq_id, trans_pcie->queue_stopped)) {
|
||||
struct sk_buff_head overflow_skbs;
|
||||
|
||||
@@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
}
|
||||
spin_lock_bh(&txq->lock);
|
||||
|
||||
if (iwl_queue_space(txq) > txq->low_mark)
|
||||
if (iwl_queue_space(trans, txq) > txq->low_mark)
|
||||
iwl_wake_queue(trans, txq);
|
||||
}
|
||||
|
||||
@@ -1225,23 +1236,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
unsigned long flags;
|
||||
int nfreed = 0;
|
||||
u16 r;
|
||||
|
||||
lockdep_assert_held(&txq->lock);
|
||||
|
||||
if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
|
||||
idx = iwl_pcie_get_cmd_index(txq, idx);
|
||||
r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
|
||||
|
||||
if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
|
||||
(!iwl_queue_used(txq, idx))) {
|
||||
IWL_ERR(trans,
|
||||
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
|
||||
__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
|
||||
__func__, txq_id, idx,
|
||||
trans->cfg->base_params->max_tfd_queue_size,
|
||||
txq->write_ptr, txq->read_ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
|
||||
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
|
||||
for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
|
||||
r = iwl_queue_inc_wrap(trans, r)) {
|
||||
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
|
||||
|
||||
if (nfreed++ > 0) {
|
||||
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
|
||||
idx, txq->write_ptr, txq->read_ptr);
|
||||
idx, txq->write_ptr, r);
|
||||
iwl_force_nmi(trans);
|
||||
}
|
||||
}
|
||||
@@ -1555,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
|
||||
if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||
if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||
spin_unlock_bh(&txq->lock);
|
||||
|
||||
IWL_ERR(trans, "No space in command queue\n");
|
||||
@@ -1711,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
/* Increment and update queue's write index */
|
||||
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
|
||||
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
||||
|
||||
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
|
||||
@@ -2311,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
|
||||
spin_lock(&txq->lock);
|
||||
|
||||
if (iwl_queue_space(txq) < txq->high_mark) {
|
||||
if (iwl_queue_space(trans, txq) < txq->high_mark) {
|
||||
iwl_stop_queue(trans, txq);
|
||||
|
||||
/* don't put the packet on the ring, if there is no room */
|
||||
if (unlikely(iwl_queue_space(txq) < 3)) {
|
||||
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
|
||||
struct iwl_device_cmd **dev_cmd_ptr;
|
||||
|
||||
dev_cmd_ptr = (void *)((u8 *)skb->cb +
|
||||
@@ -2444,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* Tell device the write index *just past* this latest filled TFD */
|
||||
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
|
||||
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
|
||||
if (!wait_write_ptr)
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
||||
|
||||
|
Reference in New Issue
Block a user