Merge tag 'wireless-drivers-next-for-davem-2017-04-21' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next
Kalle Valo says: ==================== wireless-drivers-next patches for 4.12 Quite a lot of patches for rtlwifi and iwlwifi this time, but changes also for other active wireless drivers. Major changes: ath9k * add support for Dell Wireless 1601 PCI device * add debugfs file to manually override noise floor ath10k * bump up FW API to 6 for a new QCA6174 firmware branch wil6210 * support 8 kB RX buffers iwlwifi * work to support A000 devices continues * add support for FW API 30 * add Geographical and Dynamic Specific Absorption Rate (SAR) support * support a few new PCI device IDs rtlwifi * work on adding Bluetooth coexistance support, not finished yet ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -7,6 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o
|
||||
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
|
||||
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
|
||||
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
|
||||
iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
|
||||
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
|
||||
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o
|
||||
iwlwifi-objs += iwl-trans.o
|
||||
|
@@ -73,8 +73,8 @@
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MAX 17
|
||||
#define IWL7265_UCODE_API_MAX 17
|
||||
#define IWL7265D_UCODE_API_MAX 28
|
||||
#define IWL3168_UCODE_API_MAX 28
|
||||
#define IWL7265D_UCODE_API_MAX 30
|
||||
#define IWL3168_UCODE_API_MAX 30
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MIN 17
|
||||
|
@@ -70,8 +70,8 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MAX 28
|
||||
#define IWL8265_UCODE_API_MAX 28
|
||||
#define IWL8000_UCODE_API_MAX 30
|
||||
#define IWL8265_UCODE_API_MAX 30
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MIN 17
|
||||
|
@@ -5,7 +5,7 @@
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2015-2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -18,7 +18,7 @@
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2015-2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -55,10 +55,10 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL9000_UCODE_API_MAX 28
|
||||
#define IWL9000_UCODE_API_MAX 30
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL9000_UCODE_API_MIN 17
|
||||
#define IWL9000_UCODE_API_MIN 30
|
||||
|
||||
/* NVM versions */
|
||||
#define IWL9000_NVM_VERSION 0x0a1d
|
||||
@@ -73,14 +73,14 @@
|
||||
#define IWL9000_SMEM_LEN 0x68000
|
||||
|
||||
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
|
||||
#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
|
||||
#define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
|
||||
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
|
||||
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
|
||||
#define IWL9000_MODULE_FIRMWARE(api) \
|
||||
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9260_MODULE_FIRMWARE(api) \
|
||||
IWL9260_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9000LC_MODULE_FIRMWARE(api) \
|
||||
IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9260A_MODULE_FIRMWARE(api) \
|
||||
IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9260B_MODULE_FIRMWARE(api) \
|
||||
IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
|
||||
|
||||
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
|
||||
|
||||
@@ -148,7 +148,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
||||
|
||||
const struct iwl_cfg iwl9160_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9160",
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_next_step = IWL9260B_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
@@ -158,7 +159,8 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
|
||||
|
||||
const struct iwl_cfg iwl9260_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9260",
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_next_step = IWL9260B_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
@@ -168,7 +170,8 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
|
||||
|
||||
const struct iwl_cfg iwl9270_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9270",
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_next_step = IWL9260B_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
@@ -198,21 +201,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
|
||||
.integrated = true,
|
||||
};
|
||||
|
||||
/*
|
||||
* TODO the struct below is for internal testing only this should be
|
||||
* removed by EO 2016~
|
||||
*/
|
||||
const struct iwl_cfg iwl9000lc_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9000",
|
||||
.fw_name_pre = IWL9000LC_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
.integrated = true,
|
||||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
|
@@ -5,7 +5,7 @@
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2015-2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -18,7 +18,7 @@
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2015-2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -55,7 +55,7 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_A000_UCODE_API_MAX 28
|
||||
#define IWL_A000_UCODE_API_MAX 30
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_A000_UCODE_API_MIN 24
|
||||
@@ -65,15 +65,16 @@
|
||||
#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
|
||||
|
||||
/* Memory offsets and lengths */
|
||||
#define IWL_A000_DCCM_OFFSET 0x800000
|
||||
#define IWL_A000_DCCM_LEN 0x18000
|
||||
#define IWL_A000_DCCM_OFFSET 0x800000 /* LMAC1 */
|
||||
#define IWL_A000_DCCM_LEN 0x10000 /* LMAC1 */
|
||||
#define IWL_A000_DCCM2_OFFSET 0x880000
|
||||
#define IWL_A000_DCCM2_LEN 0x8000
|
||||
#define IWL_A000_SMEM_OFFSET 0x400000
|
||||
#define IWL_A000_SMEM_LEN 0x68000
|
||||
#define IWL_A000_SMEM_LEN 0xD0000
|
||||
|
||||
#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
|
||||
#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
|
||||
#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
|
||||
#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
|
||||
#define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-a0-hrcdb-a0-"
|
||||
|
||||
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
|
||||
IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
|
||||
@@ -121,7 +122,8 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
|
||||
.vht_mu_mimo_supported = true, \
|
||||
.mac_addr_from_csr = true, \
|
||||
.use_tfh = true, \
|
||||
.rf_id = true
|
||||
.rf_id = true, \
|
||||
.gen2 = true
|
||||
|
||||
const struct iwl_cfg iwla000_2ac_cfg_hr = {
|
||||
.name = "Intel(R) Dual Band Wireless AC a000",
|
||||
@@ -133,6 +135,17 @@ const struct iwl_cfg iwla000_2ac_cfg_hr = {
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
|
||||
.name = "Intel(R) Dual Band Wireless AC a000",
|
||||
.fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
|
||||
IWL_DEVICE_A000,
|
||||
.ht_params = &iwl_a000_ht_params,
|
||||
.nvm_ver = IWL_A000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
.cdb = true,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwla000_2ac_cfg_jf = {
|
||||
.name = "Intel(R) Dual Band Wireless AC a000",
|
||||
.fw_name_pre = IWL_A000_JF_FW_PRE,
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright (C) 2016 Intel Deutschland GmbH
|
||||
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -32,7 +32,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright (C) 2016 Intel Deutschland GmbH
|
||||
* Copyright (C) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -90,16 +90,6 @@ enum iwl_device_family {
|
||||
IWL_DEVICE_FAMILY_8000,
|
||||
};
|
||||
|
||||
static inline bool iwl_has_secure_boot(u32 hw_rev,
|
||||
enum iwl_device_family family)
|
||||
{
|
||||
/* return 1 only for family 8000 B0 */
|
||||
if ((family == IWL_DEVICE_FAMILY_8000) && (hw_rev & 0xC))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* LED mode
|
||||
* IWL_LED_DEFAULT: use device default
|
||||
@@ -283,6 +273,8 @@ struct iwl_pwr_tx_backoff {
|
||||
* @fw_name_pre: Firmware filename prefix. The api version and extension
|
||||
* (.ucode) will be added to filename before loading from disk. The
|
||||
* filename is constructed as fw_name_pre<api>.ucode.
|
||||
* @fw_name_pre_next_step: same as @fw_name_pre, only for next step
|
||||
* (if supported)
|
||||
* @ucode_api_max: Highest version of uCode API supported by driver.
|
||||
* @ucode_api_min: Lowest version of uCode API supported by driver.
|
||||
* @max_inst_size: The maximal length of the fw inst section
|
||||
@@ -321,6 +313,8 @@ struct iwl_pwr_tx_backoff {
|
||||
* @vht_mu_mimo_supported: VHT MU-MIMO support
|
||||
* @rf_id: need to read rf_id to determine the firmware image
|
||||
* @integrated: discrete or integrated
|
||||
* @gen2: a000 and on transport operation
|
||||
* @cdb: CDB support
|
||||
*
|
||||
* We enable the driver to be backward compatible wrt. hardware features.
|
||||
* API differences in uCode shouldn't be handled here but through TLVs
|
||||
@@ -330,6 +324,7 @@ struct iwl_cfg {
|
||||
/* params specific to an individual device within a device family */
|
||||
const char *name;
|
||||
const char *fw_name_pre;
|
||||
const char *fw_name_pre_next_step;
|
||||
/* params not likely to change within a device family */
|
||||
const struct iwl_base_params *base_params;
|
||||
/* params likely to change within a device family */
|
||||
@@ -365,7 +360,9 @@ struct iwl_cfg {
|
||||
vht_mu_mimo_supported:1,
|
||||
rf_id:1,
|
||||
integrated:1,
|
||||
use_tfh:1;
|
||||
use_tfh:1,
|
||||
gen2:1,
|
||||
cdb:1;
|
||||
u8 valid_tx_ant;
|
||||
u8 valid_rx_ant;
|
||||
u8 non_shared_ant;
|
||||
@@ -449,13 +446,13 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
|
||||
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
|
||||
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
|
||||
extern const struct iwl_cfg iwl9000lc_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9160_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9260_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9270_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9460_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9560_2ac_cfg;
|
||||
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
|
||||
extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
|
||||
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
|
203
drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
Normal file
203
drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
Normal file
@@ -0,0 +1,203 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#ifndef __iwl_context_info_file_h__
|
||||
#define __iwl_context_info_file_h__
|
||||
|
||||
/* maximmum number of DRAM map entries supported by FW */
|
||||
#define IWL_MAX_DRAM_ENTRY 64
|
||||
#define CSR_CTXT_INFO_BA 0x40
|
||||
|
||||
/**
|
||||
* enum iwl_context_info_flags - Context information control flags
|
||||
* @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting
|
||||
* the init done for driver command that configures several system modes
|
||||
* @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
|
||||
* @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
|
||||
* @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K)
|
||||
* @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
|
||||
* exponent, the actual size is 2**value, valid sizes are 8-2048.
|
||||
* The value is four bits long. Maximum valid exponent is 12
|
||||
* @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
|
||||
* default is short format - not supported by the driver)
|
||||
*/
|
||||
enum iwl_context_info_flags {
|
||||
IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
|
||||
IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
|
||||
IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
|
||||
IWL_CTXT_INFO_RB_SIZE_4K = BIT(3),
|
||||
IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
|
||||
IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
|
||||
};
|
||||
|
||||
/*
|
||||
* struct iwl_context_info_version - version structure
|
||||
* @mac_id: SKU and revision id
|
||||
* @version: context information version id
|
||||
* @size: the size of the context information in DWs
|
||||
*/
|
||||
struct iwl_context_info_version {
|
||||
__le16 mac_id;
|
||||
__le16 version;
|
||||
__le16 size;
|
||||
__le16 reserved;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct iwl_context_info_control - version structure
|
||||
* @control_flags: context information flags see &enum iwl_context_info_flags
|
||||
*/
|
||||
struct iwl_context_info_control {
|
||||
__le32 control_flags;
|
||||
__le32 reserved;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct iwl_context_info_dram - images DRAM map
|
||||
* each entry in the map represents a DRAM chunk of up to 32 KB
|
||||
* @umac_img: UMAC image DRAM map
|
||||
* @lmac_img: LMAC image DRAM map
|
||||
* @virtual_img: paged image DRAM map
|
||||
*/
|
||||
struct iwl_context_info_dram {
|
||||
__le64 umac_img[IWL_MAX_DRAM_ENTRY];
|
||||
__le64 lmac_img[IWL_MAX_DRAM_ENTRY];
|
||||
__le64 virtual_img[IWL_MAX_DRAM_ENTRY];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct iwl_context_info_rbd_cfg - RBDs configuration
|
||||
* @free_rbd_addr: default queue free RB CB base address
|
||||
* @used_rbd_addr: default queue used RB CB base address
|
||||
* @status_wr_ptr: default queue used RB status write pointer
|
||||
*/
|
||||
struct iwl_context_info_rbd_cfg {
|
||||
__le64 free_rbd_addr;
|
||||
__le64 used_rbd_addr;
|
||||
__le64 status_wr_ptr;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct iwl_context_info_hcmd_cfg - command queue configuration
|
||||
* @cmd_queue_addr: address of command queue
|
||||
* @cmd_queue_size: number of entries
|
||||
*/
|
||||
struct iwl_context_info_hcmd_cfg {
|
||||
__le64 cmd_queue_addr;
|
||||
u8 cmd_queue_size;
|
||||
u8 reserved[7];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct iwl_context_info_dump_cfg - Core Dump configuration
|
||||
* @core_dump_addr: core dump (debug DRAM address) start address
|
||||
* @core_dump_size: size, in DWs
|
||||
*/
|
||||
struct iwl_context_info_dump_cfg {
|
||||
__le64 core_dump_addr;
|
||||
__le32 core_dump_size;
|
||||
__le32 reserved;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct iwl_context_info_pnvm_cfg - platform NVM data configuration
|
||||
* @platform_nvm_addr: Platform NVM data start address
|
||||
* @platform_nvm_size: size in DWs
|
||||
*/
|
||||
struct iwl_context_info_pnvm_cfg {
|
||||
__le64 platform_nvm_addr;
|
||||
__le32 platform_nvm_size;
|
||||
__le32 reserved;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct iwl_context_info_early_dbg_cfg - early debug configuration for
|
||||
* dumping DRAM addresses
|
||||
* @early_debug_addr: early debug start address
|
||||
* @early_debug_size: size in DWs
|
||||
*/
|
||||
struct iwl_context_info_early_dbg_cfg {
|
||||
__le64 early_debug_addr;
|
||||
__le32 early_debug_size;
|
||||
__le32 reserved;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* struct iwl_context_info - device INIT configuration
|
||||
* @version: version information of context info and HW
|
||||
* @control: control flags of FH configurations
|
||||
* @rbd_cfg: default RX queue configuration
|
||||
* @hcmd_cfg: command queue configuration
|
||||
* @dump_cfg: core dump data
|
||||
* @edbg_cfg: early debug configuration
|
||||
* @pnvm_cfg: platform nvm configuration
|
||||
* @dram: firmware image addresses in DRAM
|
||||
*/
|
||||
struct iwl_context_info {
|
||||
struct iwl_context_info_version version;
|
||||
struct iwl_context_info_control control;
|
||||
__le64 reserved0;
|
||||
struct iwl_context_info_rbd_cfg rbd_cfg;
|
||||
struct iwl_context_info_hcmd_cfg hcmd_cfg;
|
||||
__le32 reserved1[4];
|
||||
struct iwl_context_info_dump_cfg dump_cfg;
|
||||
struct iwl_context_info_early_dbg_cfg edbg_cfg;
|
||||
struct iwl_context_info_pnvm_cfg pnvm_cfg;
|
||||
__le32 reserved2[16];
|
||||
struct iwl_context_info_dram dram;
|
||||
__le32 reserved3[16];
|
||||
} __packed;
|
||||
|
||||
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
|
||||
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
|
||||
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
|
||||
|
||||
#endif /* __iwl_context_info_file_h__ */
|
@@ -348,7 +348,6 @@ enum {
|
||||
|
||||
/* RF_ID value */
|
||||
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
|
||||
#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
|
||||
#define CSR_HW_RF_ID_TYPE_HR (0x00109000)
|
||||
|
||||
/* EEPROM REG */
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -211,24 +211,46 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
|
||||
|
||||
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
||||
{
|
||||
const char *name_pre = drv->trans->cfg->fw_name_pre;
|
||||
const struct iwl_cfg *cfg = drv->trans->cfg;
|
||||
char tag[8];
|
||||
const char *fw_pre_name;
|
||||
|
||||
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
|
||||
CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
|
||||
fw_pre_name = cfg->fw_name_pre_next_step;
|
||||
else
|
||||
fw_pre_name = cfg->fw_name_pre;
|
||||
|
||||
if (first) {
|
||||
drv->fw_index = drv->trans->cfg->ucode_api_max;
|
||||
drv->fw_index = cfg->ucode_api_max;
|
||||
sprintf(tag, "%d", drv->fw_index);
|
||||
} else {
|
||||
drv->fw_index--;
|
||||
sprintf(tag, "%d", drv->fw_index);
|
||||
}
|
||||
|
||||
if (drv->fw_index < drv->trans->cfg->ucode_api_min) {
|
||||
if (drv->fw_index < cfg->ucode_api_min) {
|
||||
IWL_ERR(drv, "no suitable firmware found!\n");
|
||||
|
||||
if (cfg->ucode_api_min == cfg->ucode_api_max) {
|
||||
IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
|
||||
cfg->ucode_api_max);
|
||||
} else {
|
||||
IWL_ERR(drv, "minimum version required: %s%d\n",
|
||||
fw_pre_name,
|
||||
cfg->ucode_api_min);
|
||||
IWL_ERR(drv, "maximum version supported: %s%d\n",
|
||||
fw_pre_name,
|
||||
cfg->ucode_api_max);
|
||||
}
|
||||
|
||||
IWL_ERR(drv,
|
||||
"check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
|
||||
name_pre, tag);
|
||||
fw_pre_name, tag);
|
||||
|
||||
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
|
||||
drv->firmware_name);
|
||||
|
@@ -614,6 +614,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
|
||||
#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \
|
||||
IWL_MAX_RX_HW_QUEUES * \
|
||||
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
|
||||
/* cb size is the exponent */
|
||||
#define RX_QUEUE_CB_SIZE(x) ilog2(x)
|
||||
|
||||
#define RX_QUEUE_SIZE 256
|
||||
#define RX_QUEUE_MASK 255
|
||||
@@ -639,6 +641,8 @@ struct iwl_rb_status {
|
||||
|
||||
|
||||
#define TFD_QUEUE_SIZE_MAX (256)
|
||||
/* cb size is the exponent - 3 */
|
||||
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
|
||||
#define TFD_QUEUE_SIZE_BC_DUP (64)
|
||||
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
|
||||
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
|
||||
@@ -647,7 +651,7 @@ struct iwl_rb_status {
|
||||
|
||||
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
|
||||
{
|
||||
return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
|
||||
return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
|
||||
}
|
||||
/**
|
||||
* struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
|
||||
|
@@ -241,6 +241,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
|
||||
* iteration complete notification, and the timestamp reported for RX
|
||||
* received during scan, are reported in TSF of the mac specified in the
|
||||
* scan request.
|
||||
* @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
|
||||
* ADD_MODIFY_STA_KEY_API_S_VER_2.
|
||||
*
|
||||
* @NUM_IWL_UCODE_TLV_API: number of bits used
|
||||
*/
|
||||
@@ -250,6 +252,7 @@ enum iwl_ucode_tlv_api {
|
||||
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
|
||||
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
|
||||
IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
|
||||
|
||||
NUM_IWL_UCODE_TLV_API
|
||||
#ifdef __CHECKER__
|
||||
@@ -344,6 +347,8 @@ enum iwl_ucode_tlv_capa {
|
||||
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
|
||||
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
|
||||
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
|
||||
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
|
||||
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
|
||||
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
|
||||
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
|
||||
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
|
||||
|
@@ -54,8 +54,8 @@ IWL_EXPORT_SYMBOL(iwl_write32);
|
||||
void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val)
|
||||
{
|
||||
trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val);
|
||||
iwl_trans_write32(trans, ofs, val & 0xffffffff);
|
||||
iwl_trans_write32(trans, ofs + 4, val >> 32);
|
||||
iwl_trans_write32(trans, ofs, lower_32_bits(val));
|
||||
iwl_trans_write32(trans, ofs + 4, upper_32_bits(val));
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_write64);
|
||||
|
||||
|
@@ -76,8 +76,8 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
|
||||
|
||||
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
|
||||
struct iwl_rx_packet *pkt)
|
||||
bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
bool triggered = false;
|
||||
|
||||
@@ -118,13 +118,11 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
|
||||
}
|
||||
}
|
||||
spin_unlock(¬if_wait->notif_wait_lock);
|
||||
|
||||
}
|
||||
|
||||
if (triggered)
|
||||
wake_up_all(¬if_wait->notif_waitq);
|
||||
return triggered;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
|
||||
IWL_EXPORT_SYMBOL(iwl_notification_wait);
|
||||
|
||||
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
|
||||
{
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -32,6 +32,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -89,10 +90,10 @@ struct iwl_notif_wait_data {
|
||||
*
|
||||
* This structure is not used directly, to wait for a
|
||||
* notification declare it on the stack, and call
|
||||
* iwlagn_init_notification_wait() with appropriate
|
||||
* iwl_init_notification_wait() with appropriate
|
||||
* parameters. Then do whatever will cause the ucode
|
||||
* to notify the driver, and to wait for that then
|
||||
* call iwlagn_wait_notification().
|
||||
* call iwl_wait_notification().
|
||||
*
|
||||
* Each notification is one-shot. If at some point we
|
||||
* need to support multi-shot notifications (which
|
||||
@@ -114,10 +115,24 @@ struct iwl_notification_wait {
|
||||
|
||||
/* caller functions */
|
||||
void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
|
||||
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
|
||||
struct iwl_rx_packet *pkt);
|
||||
bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data,
|
||||
struct iwl_rx_packet *pkt);
|
||||
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
|
||||
|
||||
static inline void
|
||||
iwl_notification_notify(struct iwl_notif_wait_data *notif_data)
|
||||
{
|
||||
wake_up_all(¬if_data->notif_waitq);
|
||||
}
|
||||
|
||||
static inline void
|
||||
iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
if (iwl_notification_wait(notif_data, pkt))
|
||||
iwl_notification_notify(notif_data);
|
||||
}
|
||||
|
||||
/* user functions */
|
||||
void __acquires(wait_entry)
|
||||
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
|
||||
|
@@ -294,9 +294,6 @@
|
||||
|
||||
/*********************** END TX SCHEDULER *************************************/
|
||||
|
||||
/* tcp checksum offload */
|
||||
#define RX_EN_CSUM (0x00a00d88)
|
||||
|
||||
/* Oscillator clock */
|
||||
#define OSC_CLK (0xa04068)
|
||||
#define OSC_CLK_FORCE_CONTROL (0x8)
|
||||
@@ -309,6 +306,7 @@
|
||||
* Note this address is cleared after MAC reset.
|
||||
*/
|
||||
#define UREG_UCODE_LOAD_STATUS (0xa05c40)
|
||||
#define UREG_CPU_INIT_RUN (0xa05c44)
|
||||
|
||||
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
|
||||
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
|
||||
@@ -316,6 +314,8 @@
|
||||
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
|
||||
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
|
||||
|
||||
#define LMAC2_PRPH_OFFSET (0x100000)
|
||||
|
||||
/* Rx FIFO */
|
||||
#define RXF_SIZE_ADDR (0xa00c88)
|
||||
#define RXF_RD_D_SPACE (0xa00c40)
|
||||
@@ -378,6 +378,7 @@
|
||||
#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
|
||||
#define RFIC_REG_RD 0xAD0470
|
||||
#define WFPM_CTRL_REG 0xA03030
|
||||
#define WFPM_GP2 0xA030B4
|
||||
enum {
|
||||
ENABLE_WFPM = BIT(31),
|
||||
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
|
||||
@@ -398,6 +399,8 @@ enum aux_misc_master1_en {
|
||||
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
|
||||
#define SB_CPU_1_STATUS 0xA01E30
|
||||
#define SB_CPU_2_STATUS 0xA01E34
|
||||
#define UMAG_SB_CPU_1_STATUS 0xA038C0
|
||||
#define UMAG_SB_CPU_2_STATUS 0xA038C4
|
||||
|
||||
/* FW chicken bits */
|
||||
#define LMPM_CHICK 0xA01FF8
|
||||
|
@@ -70,8 +70,7 @@
|
||||
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
struct device *dev,
|
||||
const struct iwl_cfg *cfg,
|
||||
const struct iwl_trans_ops *ops,
|
||||
size_t dev_cmd_headroom)
|
||||
const struct iwl_trans_ops *ops)
|
||||
{
|
||||
struct iwl_trans *trans;
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
@@ -90,15 +89,13 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
trans->dev = dev;
|
||||
trans->cfg = cfg;
|
||||
trans->ops = ops;
|
||||
trans->dev_cmd_headroom = dev_cmd_headroom;
|
||||
trans->num_rx_queues = 1;
|
||||
|
||||
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
|
||||
"iwl_cmd_pool:%s", dev_name(trans->dev));
|
||||
trans->dev_cmd_pool =
|
||||
kmem_cache_create(trans->dev_cmd_pool_name,
|
||||
sizeof(struct iwl_device_cmd)
|
||||
+ trans->dev_cmd_headroom,
|
||||
sizeof(struct iwl_device_cmd),
|
||||
sizeof(void *),
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -397,6 +397,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
|
||||
*/
|
||||
#define IWL_MAX_HW_QUEUES 32
|
||||
#define IWL_MAX_TID_COUNT 8
|
||||
#define IWL_MGMT_TID 15
|
||||
#define IWL_FRAME_LIMIT 64
|
||||
#define IWL_MAX_RX_HW_QUEUES 16
|
||||
|
||||
@@ -530,6 +531,44 @@ struct iwl_trans_txq_scd_cfg {
|
||||
int frame_limit;
|
||||
};
|
||||
|
||||
/* Available options for &struct iwl_tx_queue_cfg_cmd */
|
||||
enum iwl_tx_queue_cfg_actions {
|
||||
TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
|
||||
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
|
||||
* @sta_id: station id
|
||||
* @tid: tid of the queue
|
||||
* @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format
|
||||
* @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
|
||||
* Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
|
||||
* @byte_cnt_addr: address of byte count table
|
||||
* @tfdq_addr: address of TFD circular buffer
|
||||
*/
|
||||
struct iwl_tx_queue_cfg_cmd {
|
||||
u8 sta_id;
|
||||
u8 tid;
|
||||
__le16 flags;
|
||||
__le32 cb_size;
|
||||
__le64 byte_cnt_addr;
|
||||
__le64 tfdq_addr;
|
||||
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
|
||||
* @queue_number: queue number assigned to this RA -TID
|
||||
* @flags: set on failure
|
||||
* @write_pointer: initial value for write pointer
|
||||
*/
|
||||
struct iwl_tx_queue_cfg_rsp {
|
||||
__le16 queue_number;
|
||||
__le16 flags;
|
||||
__le16 write_pointer;
|
||||
__le16 reserved;
|
||||
} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_trans_ops - transport specific operations
|
||||
*
|
||||
@@ -640,12 +679,16 @@ struct iwl_trans_ops {
|
||||
unsigned int queue_wdg_timeout);
|
||||
void (*txq_disable)(struct iwl_trans *trans, int queue,
|
||||
bool configure_scd);
|
||||
/* a000 functions */
|
||||
int (*txq_alloc)(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue_cfg_cmd *cmd,
|
||||
int cmd_id,
|
||||
unsigned int queue_wdg_timeout);
|
||||
void (*txq_free)(struct iwl_trans *trans, int queue);
|
||||
|
||||
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
|
||||
bool shared);
|
||||
|
||||
dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id);
|
||||
|
||||
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
|
||||
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
|
||||
bool freeze);
|
||||
@@ -774,9 +817,6 @@ enum iwl_plat_pm_mode {
|
||||
* the transport must set this before calling iwl_drv_start()
|
||||
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
|
||||
* The user should use iwl_trans_{alloc,free}_tx_cmd.
|
||||
* @dev_cmd_headroom: room needed for the transport's private use before the
|
||||
* device_cmd for Tx - for internal use only
|
||||
* The user should use iwl_trans_{alloc,free}_tx_cmd.
|
||||
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
|
||||
* starting the firmware, used for tracing
|
||||
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
|
||||
@@ -827,7 +867,6 @@ struct iwl_trans {
|
||||
|
||||
/* The following fields are internal only */
|
||||
struct kmem_cache *dev_cmd_pool;
|
||||
size_t dev_cmd_headroom;
|
||||
char dev_cmd_pool_name[50];
|
||||
|
||||
struct dentry *dbgfs_dir;
|
||||
@@ -1000,13 +1039,13 @@ iwl_trans_dump_data(struct iwl_trans *trans,
|
||||
static inline struct iwl_device_cmd *
|
||||
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
|
||||
{
|
||||
u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
|
||||
struct iwl_device_cmd *dev_cmd_ptr =
|
||||
kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
|
||||
|
||||
if (unlikely(dev_cmd_ptr == NULL))
|
||||
return NULL;
|
||||
|
||||
return (struct iwl_device_cmd *)
|
||||
(dev_cmd_ptr + trans->dev_cmd_headroom);
|
||||
return dev_cmd_ptr;
|
||||
}
|
||||
|
||||
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
@@ -1014,9 +1053,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
|
||||
struct iwl_device_cmd *dev_cmd)
|
||||
{
|
||||
u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
|
||||
|
||||
kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
|
||||
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
@@ -1065,6 +1102,34 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
|
||||
trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
|
||||
}
|
||||
|
||||
static inline void
|
||||
iwl_trans_txq_free(struct iwl_trans *trans, int queue)
|
||||
{
|
||||
if (WARN_ON_ONCE(!trans->ops->txq_free))
|
||||
return;
|
||||
|
||||
trans->ops->txq_free(trans, queue);
|
||||
}
|
||||
|
||||
static inline int
|
||||
iwl_trans_txq_alloc(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue_cfg_cmd *cmd,
|
||||
int cmd_id,
|
||||
unsigned int queue_wdg_timeout)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
if (WARN_ON_ONCE(!trans->ops->txq_alloc))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
|
||||
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
|
||||
int queue, bool shared_mode)
|
||||
{
|
||||
@@ -1072,15 +1137,6 @@ static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
|
||||
trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
|
||||
}
|
||||
|
||||
static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans,
|
||||
int queue)
|
||||
{
|
||||
/* we should never be called if the trans doesn't support it */
|
||||
BUG_ON(!trans->ops->get_txq_byte_table);
|
||||
|
||||
return trans->ops->get_txq_byte_table(trans, queue);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
|
||||
int fifo, int sta_id, int tid,
|
||||
int frame_limit, u16 ssn,
|
||||
@@ -1248,8 +1304,7 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
|
||||
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
struct device *dev,
|
||||
const struct iwl_cfg *cfg,
|
||||
const struct iwl_trans_ops *ops,
|
||||
size_t dev_cmd_headroom);
|
||||
const struct iwl_trans_ops *ops);
|
||||
void iwl_trans_free(struct iwl_trans *trans);
|
||||
|
||||
/*****************************************************
|
||||
|
@@ -6,6 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -31,6 +32,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -82,6 +84,19 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
|
||||
struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
|
||||
int i, ret;
|
||||
u32 status;
|
||||
int size;
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
|
||||
size = sizeof(cmd);
|
||||
if (phyctxt->channel->band == NL80211_BAND_2GHZ ||
|
||||
!iwl_mvm_is_cdb_supported(mvm))
|
||||
cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
|
||||
else
|
||||
cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
|
||||
} else {
|
||||
size = IWL_BINDING_CMD_SIZE_V1;
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
@@ -99,7 +114,7 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
|
||||
|
||||
status = 0;
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
|
||||
sizeof(cmd), &cmd, &status);
|
||||
size, &cmd, &status);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
|
||||
action, ret);
|
||||
|
@@ -756,7 +756,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
* Rssi update while not associated - can happen since the statistics
|
||||
* are handled asynchronously
|
||||
*/
|
||||
if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
|
||||
if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)
|
||||
return;
|
||||
|
||||
/* No BT - reports should be disabled */
|
||||
|
@@ -665,6 +665,19 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct iwl_binding_cmd binding_cmd = {};
|
||||
struct iwl_time_quota_cmd quota_cmd = {};
|
||||
u32 status;
|
||||
int size;
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
|
||||
size = sizeof(binding_cmd);
|
||||
if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
|
||||
!iwl_mvm_is_cdb_supported(mvm))
|
||||
binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
|
||||
else
|
||||
binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
|
||||
} else {
|
||||
size = IWL_BINDING_CMD_SIZE_V1;
|
||||
}
|
||||
|
||||
/* add back the PHY */
|
||||
if (WARN_ON(!mvmvif->phy_ctxt))
|
||||
@@ -711,8 +724,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
||||
status = 0;
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
|
||||
sizeof(binding_cmd), &binding_cmd,
|
||||
&status);
|
||||
size, &binding_cmd, &status);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
|
||||
return ret;
|
||||
@@ -986,7 +998,9 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (key_data.use_tkip) {
|
||||
if (key_data.use_tkip &&
|
||||
!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WOWLAN_TKIP_PARAM,
|
||||
cmd_flags, sizeof(tkip_cmd),
|
||||
@@ -1194,7 +1208,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
|
||||
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
|
||||
if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
|
||||
/* if we're not associated, this must be netdetect */
|
||||
if (!wowlan->nd_config) {
|
||||
ret = 1;
|
||||
@@ -2102,6 +2116,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
||||
*/
|
||||
iwl_mvm_update_changed_regdom(mvm);
|
||||
|
||||
if (!unified_image)
|
||||
/* Re-configure default SAR profile */
|
||||
iwl_mvm_sar_select_profile(mvm, 1, 1);
|
||||
|
||||
if (mvm->net_detect) {
|
||||
/* If this is a non-unified image, we restart the FW,
|
||||
* so no need to stop the netdetect scan. If that
|
||||
|
@@ -280,7 +280,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
|
||||
mvmvif->queue_params[i].uapsd);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
ap_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
ap_sta_id != IWL_MVM_INVALID_STA) {
|
||||
struct iwl_mvm_sta *mvm_sta;
|
||||
|
||||
mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
|
||||
|
@@ -330,7 +330,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
|
@@ -73,7 +73,9 @@
|
||||
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
|
||||
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
|
||||
|
||||
#define IWL_MVM_STATION_COUNT 16
|
||||
#define IWL_MVM_STATION_COUNT 16
|
||||
#define IWL_MVM_INVALID_STA 0xFF
|
||||
|
||||
#define IWL_MVM_TDLS_STA_COUNT 4
|
||||
|
||||
enum iwl_ac {
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -351,6 +351,45 @@ struct iwl_dev_tx_power_cmd {
|
||||
u8 reserved[3];
|
||||
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
|
||||
|
||||
#define IWL_NUM_GEO_PROFILES 3
|
||||
|
||||
/**
|
||||
* enum iwl_geo_per_chain_offset_operation - type of operation
|
||||
* @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW.
|
||||
* @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table.
|
||||
*/
|
||||
enum iwl_geo_per_chain_offset_operation {
|
||||
IWL_PER_CHAIN_OFFSET_SET_TABLES,
|
||||
IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
|
||||
}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */
|
||||
|
||||
/**
|
||||
* struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT.
|
||||
* @max_tx_power: maximum allowed tx power.
|
||||
* @chain_a: tx power offset for chain a.
|
||||
* @chain_b: tx power offset for chain b.
|
||||
*/
|
||||
struct iwl_per_chain_offset {
|
||||
__le16 max_tx_power;
|
||||
u8 chain_a;
|
||||
u8 chain_b;
|
||||
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
|
||||
|
||||
struct iwl_per_chain_offset_group {
|
||||
struct iwl_per_chain_offset lb;
|
||||
struct iwl_per_chain_offset hb;
|
||||
} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
|
||||
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
|
||||
* @table: offset profile per band.
|
||||
*/
|
||||
struct iwl_geo_tx_power_profiles_cmd {
|
||||
__le32 ops;
|
||||
struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
|
||||
} __packed; /* GEO_TX_POWER_LIMIT */
|
||||
|
||||
/**
|
||||
* struct iwl_beacon_filter_cmd
|
||||
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
|
||||
|
@@ -516,7 +516,7 @@ struct iwl_scan_dwell {
|
||||
* scan_config_channel_flag
|
||||
* @channel_array: default supported channels
|
||||
*/
|
||||
struct iwl_scan_config {
|
||||
struct iwl_scan_config_v1 {
|
||||
__le32 flags;
|
||||
__le32 tx_chains;
|
||||
__le32 rx_chains;
|
||||
@@ -532,7 +532,7 @@ struct iwl_scan_config {
|
||||
|
||||
#define SCAN_TWO_LMACS 2
|
||||
|
||||
struct iwl_scan_config_cdb {
|
||||
struct iwl_scan_config {
|
||||
__le32 flags;
|
||||
__le32 tx_chains;
|
||||
__le32 rx_chains;
|
||||
@@ -669,7 +669,7 @@ struct iwl_scan_req_umac {
|
||||
u8 n_channels;
|
||||
__le16 reserved;
|
||||
u8 data[];
|
||||
} no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
|
||||
} v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
|
||||
struct {
|
||||
__le32 max_out_time[SCAN_TWO_LMACS];
|
||||
__le32 suspend_time[SCAN_TWO_LMACS];
|
||||
@@ -679,13 +679,13 @@ struct iwl_scan_req_umac {
|
||||
u8 n_channels;
|
||||
__le16 reserved;
|
||||
u8 data[];
|
||||
} cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */
|
||||
} v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
|
||||
};
|
||||
} __packed;
|
||||
|
||||
#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac)
|
||||
#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \
|
||||
2 * sizeof(__le32))
|
||||
#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
|
||||
#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
|
||||
2 * sizeof(__le32))
|
||||
|
||||
/**
|
||||
* struct iwl_umac_scan_abort
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -179,7 +179,7 @@ enum iwl_sta_key_flag {
|
||||
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
|
||||
* @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
|
||||
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
|
||||
* @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_trigger_acs
|
||||
* @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs
|
||||
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
|
||||
* @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
|
||||
* @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
|
||||
@@ -351,10 +351,12 @@ struct iwl_mvm_add_sta_cmd_v7 {
|
||||
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
|
||||
* mac-addr.
|
||||
* @beamform_flags: beam forming controls
|
||||
* @tfd_queue_msk: tfd queues used by this station
|
||||
* @tfd_queue_msk: tfd queues used by this station.
|
||||
* Obselete for new TX API (9 and above).
|
||||
* @rx_ba_window: aggregation window size
|
||||
* @scd_queue_bank: queue bank in used. Each bank contains 32 queues. 0 means
|
||||
* that the queues used by this station are in the first 32.
|
||||
* @sp_length: the size of the SP as it appears in the WME IE
|
||||
* @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
|
||||
* enabled ACs.
|
||||
*
|
||||
* The device contains an internal table of per-station information, with info
|
||||
* on security keys, aggregation parameters, and Tx rates for initial Tx
|
||||
@@ -384,32 +386,54 @@ struct iwl_mvm_add_sta_cmd {
|
||||
__le16 beamform_flags;
|
||||
__le32 tfd_queue_msk;
|
||||
__le16 rx_ba_window;
|
||||
u8 scd_queue_bank;
|
||||
u8 uapsd_trigger_acs;
|
||||
} __packed; /* ADD_STA_CMD_API_S_VER_8 */
|
||||
u8 sp_length;
|
||||
u8 uapsd_acs;
|
||||
} __packed; /* ADD_STA_CMD_API_S_VER_9 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
|
||||
* struct iwl_mvm_add_sta_key_common - add/modify sta key common part
|
||||
* ( REPLY_ADD_STA_KEY = 0x17 )
|
||||
* @sta_id: index of station in uCode's station table
|
||||
* @key_offset: key offset in key storage
|
||||
* @key_flags: type %iwl_sta_key_flag
|
||||
* @key: key material data
|
||||
* @rx_secur_seq_cnt: RX security sequence counter for the key
|
||||
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
|
||||
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
|
||||
*/
|
||||
struct iwl_mvm_add_sta_key_cmd {
|
||||
struct iwl_mvm_add_sta_key_common {
|
||||
u8 sta_id;
|
||||
u8 key_offset;
|
||||
__le16 key_flags;
|
||||
u8 key[32];
|
||||
u8 rx_secur_seq_cnt[16];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key
|
||||
* @common: see &struct iwl_mvm_add_sta_key_common
|
||||
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
|
||||
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
|
||||
*/
|
||||
struct iwl_mvm_add_sta_key_cmd_v1 {
|
||||
struct iwl_mvm_add_sta_key_common common;
|
||||
u8 tkip_rx_tsc_byte2;
|
||||
u8 reserved;
|
||||
__le16 tkip_rx_ttak[5];
|
||||
} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
|
||||
* @common: see &struct iwl_mvm_add_sta_key_common
|
||||
* @rx_mic_key: TKIP RX unicast or multicast key
|
||||
* @tx_mic_key: TKIP TX key
|
||||
* @transmit_seq_cnt: TSC, transmit packet number
|
||||
*/
|
||||
struct iwl_mvm_add_sta_key_cmd {
|
||||
struct iwl_mvm_add_sta_key_common common;
|
||||
__le64 rx_mic_key;
|
||||
__le64 tx_mic_key;
|
||||
__le64 transmit_seq_cnt;
|
||||
} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
|
||||
* @ADD_STA_SUCCESS: operation was executed successfully
|
||||
|
@@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -32,6 +32,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -123,6 +124,20 @@ enum iwl_tx_flags {
|
||||
TX_CMD_FLG_HCCA_CHUNK = BIT(31)
|
||||
}; /* TX_FLAGS_BITS_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
|
||||
* @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
|
||||
* @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
|
||||
* to a secured STA
|
||||
* @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
|
||||
* selection, retry limits and BT kill
|
||||
*/
|
||||
enum iwl_tx_cmd_flags {
|
||||
IWL_TX_FLAGS_CMD_RATE = BIT(0),
|
||||
IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1),
|
||||
IWL_TX_FLAGS_HIGH_PRI = BIT(2),
|
||||
}; /* TX_FLAGS_BITS_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
* enum iwl_tx_pm_timeouts - pm timeout values in TX command
|
||||
* @PM_FRAME_NONE: no need to suspend sleep mode
|
||||
@@ -159,7 +174,7 @@ enum iwl_tx_cmd_sec_ctrl {
|
||||
TX_CMD_SEC_EXT = 0x04,
|
||||
TX_CMD_SEC_GCMP = 0x05,
|
||||
TX_CMD_SEC_KEY128 = 0x08,
|
||||
TX_CMD_SEC_KEY_FROM_TABLE = 0x08,
|
||||
TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
|
||||
};
|
||||
|
||||
/* TODO: how does these values are OK with only 16 bit variable??? */
|
||||
@@ -301,6 +316,31 @@ struct iwl_tx_cmd {
|
||||
struct ieee80211_hdr hdr[0];
|
||||
} __packed; /* TX_CMD_API_S_VER_6 */
|
||||
|
||||
struct iwl_dram_sec_info {
|
||||
__le32 pn_low;
|
||||
__le16 pn_high;
|
||||
__le16 aux_info;
|
||||
} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
|
||||
* ( TX_CMD = 0x1c )
|
||||
* @len: in bytes of the payload, see below for details
|
||||
* @offload_assist: TX offload configuration
|
||||
* @tx_flags: combination of &iwl_tx_cmd_flags
|
||||
* @dram_info: FW internal DRAM storage
|
||||
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
|
||||
* cleared. Combination of RATE_MCS_*
|
||||
*/
|
||||
struct iwl_tx_cmd_gen2 {
|
||||
__le16 len;
|
||||
__le16 offload_assist;
|
||||
__le32 flags;
|
||||
struct iwl_dram_sec_info dram_info;
|
||||
__le32 rate_n_flags;
|
||||
struct ieee80211_hdr hdr[0];
|
||||
} __packed; /* TX_CMD_API_S_VER_7 */
|
||||
|
||||
/*
|
||||
* TX response related data
|
||||
*/
|
||||
@@ -508,9 +548,11 @@ struct agg_tx_status {
|
||||
* @tlc_info: TLC rate info
|
||||
* @ra_tid: bits [3:0] = ra, bits [7:4] = tid
|
||||
* @frame_ctrl: frame control
|
||||
* @tx_queue: TX queue for this response
|
||||
* @status: for non-agg: frame status TX_STATUS_*
|
||||
* for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
|
||||
* follow this one, up to frame_count.
|
||||
* For version 6 TX response isn't received for aggregation at all.
|
||||
*
|
||||
* After the array of statuses comes the SSN of the SCD. Look at
|
||||
* %iwl_mvm_get_scd_ssn for more details.
|
||||
@@ -537,9 +579,17 @@ struct iwl_mvm_tx_resp {
|
||||
u8 tlc_info;
|
||||
u8 ra_tid;
|
||||
__le16 frame_ctrl;
|
||||
|
||||
struct agg_tx_status status;
|
||||
} __packed; /* TX_RSP_API_S_VER_3 */
|
||||
union {
|
||||
struct {
|
||||
struct agg_tx_status status;
|
||||
} v3;/* TX_RSP_API_S_VER_3 */
|
||||
struct {
|
||||
__le16 tx_queue;
|
||||
__le16 reserved2;
|
||||
struct agg_tx_status status;
|
||||
} v6;
|
||||
};
|
||||
} __packed; /* TX_RSP_API_S_VER_6 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_ba_notif - notifies about reception of BA
|
||||
@@ -579,11 +629,14 @@ struct iwl_mvm_ba_notif {
|
||||
* struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
|
||||
* @q_num: TFD queue number
|
||||
* @tfd_index: Index of first un-acked frame in the TFD queue
|
||||
* @scd_queue: For debug only - the physical queue the TFD queue is bound to
|
||||
*/
|
||||
struct iwl_mvm_compressed_ba_tfd {
|
||||
u8 q_num;
|
||||
u8 reserved;
|
||||
__le16 q_num;
|
||||
__le16 tfd_index;
|
||||
u8 scd_queue;
|
||||
u8 reserved;
|
||||
__le16 reserved2;
|
||||
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
@@ -635,6 +688,10 @@ enum iwl_mvm_ba_resp_flags {
|
||||
* @tx_rate: the rate the aggregation was sent at
|
||||
* @tfd_cnt: number of TFD-Q elements
|
||||
* @ra_tid_cnt: number of RATID-Q elements
|
||||
* @ba_tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
|
||||
* for details.
|
||||
* @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
|
||||
* &iwl_mvm_compressed_ba_ratid for more details.
|
||||
*/
|
||||
struct iwl_mvm_compressed_ba_notif {
|
||||
__le32 flags;
|
||||
@@ -646,6 +703,7 @@ struct iwl_mvm_compressed_ba_notif {
|
||||
__le16 query_frame_cnt;
|
||||
__le16 txed;
|
||||
__le16 done;
|
||||
__le16 reserved;
|
||||
__le32 wireless_time;
|
||||
__le32 tx_rate;
|
||||
__le16 tfd_cnt;
|
||||
@@ -754,25 +812,6 @@ struct iwl_tx_path_flush_cmd {
|
||||
__le16 reserved;
|
||||
} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* iwl_mvm_get_scd_ssn - returns the SSN of the SCD
|
||||
* @tx_resp: the Tx response from the fw (agg or non-agg)
|
||||
*
|
||||
* When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
|
||||
* it can't know that everything will go well until the end of the AMPDU, it
|
||||
* can't know in advance the number of MPDUs that will be sent in the current
|
||||
* batch. This is why it writes the agg Tx response while it fetches the MPDUs.
|
||||
* Hence, it can't know in advance what the SSN of the SCD will be at the end
|
||||
* of the batch. This is why the SSN of the SCD is written at the end of the
|
||||
* whole struct at a variable offset. This function knows how to cope with the
|
||||
* variable offset and returns the SSN of the SCD.
|
||||
*/
|
||||
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
|
||||
{
|
||||
return le32_to_cpup((__le32 *)&tx_resp->status +
|
||||
tx_resp->frame_count) & 0xfff;
|
||||
}
|
||||
|
||||
/* Available options for the SCD_QUEUE_CFG HCMD */
|
||||
enum iwl_scd_cfg_actions {
|
||||
SCD_CFG_DISABLE_QUEUE = 0x0,
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -320,12 +320,14 @@ enum iwl_phy_ops_subcmd_ids {
|
||||
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
|
||||
CTDP_CONFIG_CMD = 0x03,
|
||||
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
|
||||
GEO_TX_POWER_LIMIT = 0x05,
|
||||
CT_KILL_NOTIFICATION = 0xFE,
|
||||
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
|
||||
};
|
||||
|
||||
enum iwl_system_subcmd_ids {
|
||||
SHARED_MEM_CFG_CMD = 0x0,
|
||||
INIT_EXTENDED_CFG_CMD = 0x03,
|
||||
};
|
||||
|
||||
enum iwl_data_path_subcmd_ids {
|
||||
@@ -345,9 +347,10 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
|
||||
NVM_ACCESS_COMPLETE = 0x0,
|
||||
};
|
||||
|
||||
enum iwl_fmac_debug_cmds {
|
||||
enum iwl_debug_cmds {
|
||||
LMAC_RD_WR = 0x0,
|
||||
UMAC_RD_WR = 0x1,
|
||||
MFU_ASSERT_DUMP_NTF = 0xFE,
|
||||
};
|
||||
|
||||
/* command groups */
|
||||
@@ -673,10 +676,8 @@ struct iwl_error_resp {
|
||||
|
||||
|
||||
/* Common PHY, MAC and Bindings definitions */
|
||||
|
||||
#define MAX_MACS_IN_BINDING (3)
|
||||
#define MAX_BINDINGS (4)
|
||||
#define AUX_BINDING_INDEX (3)
|
||||
|
||||
/* Used to extract ID and color from the context dword */
|
||||
#define FW_CTXT_ID_POS (0)
|
||||
@@ -689,7 +690,7 @@ struct iwl_error_resp {
|
||||
(_color << FW_CTXT_COLOR_POS))
|
||||
|
||||
/* Possible actions on PHYs, MACs and Bindings */
|
||||
enum {
|
||||
enum iwl_phy_ctxt_action {
|
||||
FW_CTXT_ACTION_STUB = 0,
|
||||
FW_CTXT_ACTION_ADD,
|
||||
FW_CTXT_ACTION_MODIFY,
|
||||
@@ -960,6 +961,7 @@ struct iwl_time_event_notif {
|
||||
* @action: action to perform, one of FW_CTXT_ACTION_*
|
||||
* @macs: array of MAC id and colors which belong to the binding
|
||||
* @phy: PHY id and color which belongs to the binding
|
||||
* @lmac_id: the lmac id the binding belongs to
|
||||
*/
|
||||
struct iwl_binding_cmd {
|
||||
/* COMMON_INDEX_HDR_API_S_VER_1 */
|
||||
@@ -968,7 +970,13 @@ struct iwl_binding_cmd {
|
||||
/* BINDING_DATA_API_S_VER_1 */
|
||||
__le32 macs[MAX_MACS_IN_BINDING];
|
||||
__le32 phy;
|
||||
} __packed; /* BINDING_CMD_API_S_VER_1 */
|
||||
/* BINDING_CMD_API_S_VER_1 */
|
||||
__le32 lmac_id;
|
||||
} __packed; /* BINDING_CMD_API_S_VER_2 */
|
||||
|
||||
#define IWL_BINDING_CMD_SIZE_V1 offsetof(struct iwl_binding_cmd, lmac_id)
|
||||
#define IWL_LMAC_24G_INDEX 0
|
||||
#define IWL_LMAC_5G_INDEX 1
|
||||
|
||||
/* The maximal number of fragments in the FW's schedule session */
|
||||
#define IWL_MVM_MAX_QUOTA 128
|
||||
@@ -990,6 +998,9 @@ struct iwl_time_quota_data {
|
||||
* struct iwl_time_quota_cmd - configuration of time quota between bindings
|
||||
* ( TIME_QUOTA_CMD = 0x2c )
|
||||
* @quotas: allocations per binding
|
||||
* Note: on non-CDB the fourth one is the auxilary mac and is
|
||||
* essentially zero.
|
||||
* On CDB the fourth one is a regular binding.
|
||||
*/
|
||||
struct iwl_time_quota_cmd {
|
||||
struct iwl_time_quota_data quotas[MAX_BINDINGS];
|
||||
@@ -1230,6 +1241,25 @@ struct iwl_mfuart_load_notif {
|
||||
__le32 image_size;
|
||||
} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/
|
||||
|
||||
/**
|
||||
* struct iwl_mfu_assert_dump_notif - mfuart dump logs
|
||||
* ( MFU_ASSERT_DUMP_NTF = 0xfe )
|
||||
* @assert_id: mfuart assert id that cause the notif
|
||||
* @curr_reset_num: number of asserts since uptime
|
||||
* @index_num: current chunk id
|
||||
* @parts_num: total number of chunks
|
||||
* @data_size: number of data bytes sent
|
||||
* @data: data buffer
|
||||
*/
|
||||
struct iwl_mfu_assert_dump_notif {
|
||||
__le32 assert_id;
|
||||
__le32 curr_reset_num;
|
||||
__le16 index_num;
|
||||
__le16 parts_num;
|
||||
__le32 data_size;
|
||||
__le32 data[0];
|
||||
} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/
|
||||
|
||||
/**
|
||||
* struct iwl_set_calib_default_cmd - set default value for calibration.
|
||||
* ( SET_CALIB_DEFAULT_CMD = 0x8e )
|
||||
@@ -1998,19 +2028,48 @@ struct iwl_shared_mem_cfg_v1 {
|
||||
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
|
||||
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration
|
||||
*
|
||||
* @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB)
|
||||
* @txfifo_size: size of TX FIFOs
|
||||
* @rxfifo1_addr: RXF1 addr
|
||||
* @rxfifo1_size: RXF1 size
|
||||
*/
|
||||
struct iwl_shared_mem_lmac_cfg {
|
||||
__le32 txfifo_addr;
|
||||
__le32 txfifo_size[TX_FIFO_MAX_NUM];
|
||||
__le32 rxfifo1_addr;
|
||||
__le32 rxfifo1_size;
|
||||
|
||||
} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* Shared memory configuration information from the FW
|
||||
*
|
||||
* @shared_mem_addr: shared memory address
|
||||
* @shared_mem_size: shared memory size
|
||||
* @sample_buff_addr: internal sample (mon/adc) buff addr
|
||||
* @sample_buff_size: internal sample buff size
|
||||
* @rxfifo2_addr: start addr of RXF2
|
||||
* @rxfifo2_size: size of RXF2
|
||||
* @page_buff_addr: used by UMAC and performance debug (page miss analysis),
|
||||
* when paging is not supported this should be 0
|
||||
* @page_buff_size: size of %page_buff_addr
|
||||
* @lmac_num: number of LMACs (1 or 2)
|
||||
* @lmac_smem: per - LMAC smem data
|
||||
*/
|
||||
struct iwl_shared_mem_cfg {
|
||||
__le32 shared_mem_addr;
|
||||
__le32 shared_mem_size;
|
||||
__le32 sample_buff_addr;
|
||||
__le32 sample_buff_size;
|
||||
__le32 txfifo_addr;
|
||||
__le32 txfifo_size[TX_FIFO_MAX_NUM];
|
||||
__le32 rxfifo_size[RX_FIFO_MAX_NUM];
|
||||
__le32 rxfifo2_addr;
|
||||
__le32 rxfifo2_size;
|
||||
__le32 page_buff_addr;
|
||||
__le32 page_buff_size;
|
||||
__le32 rxfifo_addr;
|
||||
__le32 internal_txfifo_addr;
|
||||
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
|
||||
__le32 lmac_num;
|
||||
struct iwl_shared_mem_lmac_cfg lmac_smem[2];
|
||||
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
@@ -2178,4 +2237,26 @@ struct iwl_nvm_access_complete_cmd {
|
||||
__le32 reserved;
|
||||
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_extended_cfg_flag - commands driver may send before
|
||||
* finishing init flow
|
||||
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
|
||||
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
|
||||
* @IWL_INIT_PHY: driver is going to send PHY_DB commands
|
||||
*/
|
||||
enum iwl_extended_cfg_flags {
|
||||
IWL_INIT_DEBUG_CFG,
|
||||
IWL_INIT_NVM,
|
||||
IWL_INIT_PHY,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
|
||||
* before finishing init flows
|
||||
* @init_flags: values from iwl_extended_cfg_flags
|
||||
*/
|
||||
struct iwl_init_extended_cfg_cmd {
|
||||
__le32 init_flags;
|
||||
} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
|
||||
|
||||
#endif /* __fw_api_h__ */
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -32,7 +32,7 @@
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -99,10 +99,120 @@ static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm,
|
||||
iwl_trans_release_nic_access(mvm->trans, &flags);
|
||||
}
|
||||
|
||||
static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm,
|
||||
struct iwl_fw_error_dump_data **dump_data,
|
||||
int size, u32 offset, int fifo_num)
|
||||
{
|
||||
struct iwl_fw_error_dump_fifo *fifo_hdr;
|
||||
u32 *fifo_data;
|
||||
u32 fifo_len;
|
||||
int i;
|
||||
|
||||
fifo_hdr = (void *)(*dump_data)->data;
|
||||
fifo_data = (void *)fifo_hdr->data;
|
||||
fifo_len = size;
|
||||
|
||||
/* No need to try to read the data if the length is 0 */
|
||||
if (fifo_len == 0)
|
||||
return;
|
||||
|
||||
/* Add a TLV for the RXF */
|
||||
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
|
||||
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
|
||||
|
||||
fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
|
||||
fifo_hdr->available_bytes =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_RD_D_SPACE + offset));
|
||||
fifo_hdr->wr_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_RD_WR_PTR + offset));
|
||||
fifo_hdr->rd_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_RD_RD_PTR + offset));
|
||||
fifo_hdr->fence_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_RD_FENCE_PTR + offset));
|
||||
fifo_hdr->fence_mode =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_SET_FENCE_MODE + offset));
|
||||
|
||||
/* Lock fence */
|
||||
iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1);
|
||||
/* Set fence pointer to the same place like WR pointer */
|
||||
iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1);
|
||||
/* Set fence offset */
|
||||
iwl_trans_write_prph(mvm->trans,
|
||||
RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
|
||||
|
||||
/* Read FIFO */
|
||||
fifo_len /= sizeof(u32); /* Size in DWORDS */
|
||||
for (i = 0; i < fifo_len; i++)
|
||||
fifo_data[i] = iwl_trans_read_prph(mvm->trans,
|
||||
RXF_FIFO_RD_FENCE_INC +
|
||||
offset);
|
||||
*dump_data = iwl_fw_error_next_data(*dump_data);
|
||||
}
|
||||
|
||||
static void iwl_mvm_dump_txf(struct iwl_mvm *mvm,
|
||||
struct iwl_fw_error_dump_data **dump_data,
|
||||
int size, u32 offset, int fifo_num)
|
||||
{
|
||||
struct iwl_fw_error_dump_fifo *fifo_hdr;
|
||||
u32 *fifo_data;
|
||||
u32 fifo_len;
|
||||
int i;
|
||||
|
||||
fifo_hdr = (void *)(*dump_data)->data;
|
||||
fifo_data = (void *)fifo_hdr->data;
|
||||
fifo_len = size;
|
||||
|
||||
/* No need to try to read the data if the length is 0 */
|
||||
if (fifo_len == 0)
|
||||
return;
|
||||
|
||||
/* Add a TLV for the FIFO */
|
||||
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
|
||||
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
|
||||
|
||||
fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
|
||||
fifo_hdr->available_bytes =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_FIFO_ITEM_CNT + offset));
|
||||
fifo_hdr->wr_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_WR_PTR + offset));
|
||||
fifo_hdr->rd_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_RD_PTR + offset));
|
||||
fifo_hdr->fence_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_FENCE_PTR + offset));
|
||||
fifo_hdr->fence_mode =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_LOCK_FENCE + offset));
|
||||
|
||||
/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
|
||||
iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset,
|
||||
TXF_WR_PTR + offset);
|
||||
|
||||
/* Dummy-read to advance the read pointer to the head */
|
||||
iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset);
|
||||
|
||||
/* Read FIFO */
|
||||
fifo_len /= sizeof(u32); /* Size in DWORDS */
|
||||
for (i = 0; i < fifo_len; i++)
|
||||
fifo_data[i] = iwl_trans_read_prph(mvm->trans,
|
||||
TXF_READ_MODIFY_DATA +
|
||||
offset);
|
||||
*dump_data = iwl_fw_error_next_data(*dump_data);
|
||||
}
|
||||
|
||||
static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
|
||||
struct iwl_fw_error_dump_data **dump_data)
|
||||
{
|
||||
struct iwl_fw_error_dump_fifo *fifo_hdr;
|
||||
struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg;
|
||||
u32 *fifo_data;
|
||||
u32 fifo_len;
|
||||
unsigned long flags;
|
||||
@@ -111,126 +221,47 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
|
||||
if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
|
||||
return;
|
||||
|
||||
/* Pull RXF data from all RXFs */
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
|
||||
/*
|
||||
* Keep aside the additional offset that might be needed for
|
||||
* next RXF
|
||||
*/
|
||||
u32 offset_diff = RXF_DIFF_FROM_PREV * i;
|
||||
/* Pull RXF1 */
|
||||
iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
|
||||
/* Pull RXF2 */
|
||||
iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size,
|
||||
RXF_DIFF_FROM_PREV, 1);
|
||||
/* Pull LMAC2 RXF1 */
|
||||
if (mvm->smem_cfg.num_lmacs > 1)
|
||||
iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size,
|
||||
LMAC2_PRPH_OFFSET, 2);
|
||||
|
||||
fifo_hdr = (void *)(*dump_data)->data;
|
||||
fifo_data = (void *)fifo_hdr->data;
|
||||
fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
|
||||
|
||||
/* No need to try to read the data if the length is 0 */
|
||||
if (fifo_len == 0)
|
||||
continue;
|
||||
|
||||
/* Add a TLV for the RXF */
|
||||
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
|
||||
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
|
||||
|
||||
fifo_hdr->fifo_num = cpu_to_le32(i);
|
||||
fifo_hdr->available_bytes =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_RD_D_SPACE +
|
||||
offset_diff));
|
||||
fifo_hdr->wr_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_RD_WR_PTR +
|
||||
offset_diff));
|
||||
fifo_hdr->rd_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_RD_RD_PTR +
|
||||
offset_diff));
|
||||
fifo_hdr->fence_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_RD_FENCE_PTR +
|
||||
offset_diff));
|
||||
fifo_hdr->fence_mode =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
RXF_SET_FENCE_MODE +
|
||||
offset_diff));
|
||||
|
||||
/* Lock fence */
|
||||
iwl_trans_write_prph(mvm->trans,
|
||||
RXF_SET_FENCE_MODE + offset_diff, 0x1);
|
||||
/* Set fence pointer to the same place like WR pointer */
|
||||
iwl_trans_write_prph(mvm->trans,
|
||||
RXF_LD_WR2FENCE + offset_diff, 0x1);
|
||||
/* Set fence offset */
|
||||
iwl_trans_write_prph(mvm->trans,
|
||||
RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
|
||||
0x0);
|
||||
|
||||
/* Read FIFO */
|
||||
fifo_len /= sizeof(u32); /* Size in DWORDS */
|
||||
for (j = 0; j < fifo_len; j++)
|
||||
fifo_data[j] = iwl_trans_read_prph(mvm->trans,
|
||||
RXF_FIFO_RD_FENCE_INC +
|
||||
offset_diff);
|
||||
*dump_data = iwl_fw_error_next_data(*dump_data);
|
||||
}
|
||||
|
||||
/* Pull TXF data from all TXFs */
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
|
||||
/* Pull TXF data from LMAC1 */
|
||||
for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
|
||||
/* Mark the number of TXF we're pulling now */
|
||||
iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
|
||||
iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i],
|
||||
0, i);
|
||||
}
|
||||
|
||||
fifo_hdr = (void *)(*dump_data)->data;
|
||||
fifo_data = (void *)fifo_hdr->data;
|
||||
fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
|
||||
|
||||
/* No need to try to read the data if the length is 0 */
|
||||
if (fifo_len == 0)
|
||||
continue;
|
||||
|
||||
/* Add a TLV for the FIFO */
|
||||
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
|
||||
(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
|
||||
|
||||
fifo_hdr->fifo_num = cpu_to_le32(i);
|
||||
fifo_hdr->available_bytes =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_FIFO_ITEM_CNT));
|
||||
fifo_hdr->wr_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_WR_PTR));
|
||||
fifo_hdr->rd_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_RD_PTR));
|
||||
fifo_hdr->fence_ptr =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_FENCE_PTR));
|
||||
fifo_hdr->fence_mode =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
TXF_LOCK_FENCE));
|
||||
|
||||
/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
|
||||
iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
|
||||
TXF_WR_PTR);
|
||||
|
||||
/* Dummy-read to advance the read pointer to the head */
|
||||
iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
|
||||
|
||||
/* Read FIFO */
|
||||
fifo_len /= sizeof(u32); /* Size in DWORDS */
|
||||
for (j = 0; j < fifo_len; j++)
|
||||
fifo_data[j] = iwl_trans_read_prph(mvm->trans,
|
||||
TXF_READ_MODIFY_DATA);
|
||||
*dump_data = iwl_fw_error_next_data(*dump_data);
|
||||
/* Pull TXF data from LMAC2 */
|
||||
if (mvm->smem_cfg.num_lmacs > 1) {
|
||||
for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) {
|
||||
/* Mark the number of TXF we're pulling now */
|
||||
iwl_trans_write_prph(mvm->trans,
|
||||
TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
|
||||
i);
|
||||
iwl_mvm_dump_txf(mvm, dump_data,
|
||||
cfg->lmac[1].txfifo_size[i],
|
||||
LMAC2_PRPH_OFFSET,
|
||||
i + cfg->num_txfifo_entries);
|
||||
}
|
||||
}
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
|
||||
/* Pull UMAC internal TXF data from all TXFs */
|
||||
for (i = 0;
|
||||
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
|
||||
i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
|
||||
i++) {
|
||||
fifo_hdr = (void *)(*dump_data)->data;
|
||||
fifo_data = (void *)fifo_hdr->data;
|
||||
fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
|
||||
fifo_len = mvm->smem_cfg.internal_txfifo_size[i];
|
||||
|
||||
/* No need to try to read the data if the length is 0 */
|
||||
if (fifo_len == 0)
|
||||
@@ -246,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
|
||||
|
||||
/* Mark the number of TXF we're pulling now */
|
||||
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
|
||||
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size));
|
||||
mvm->smem_cfg.num_txfifo_entries);
|
||||
|
||||
fifo_hdr->available_bytes =
|
||||
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
|
||||
@@ -553,31 +584,45 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
|
||||
/* reading RXF/TXF sizes */
|
||||
if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
|
||||
struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
|
||||
struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg;
|
||||
|
||||
fifo_data_len = 0;
|
||||
|
||||
/* Count RXF size */
|
||||
for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
|
||||
if (!mem_cfg->rxfifo_size[i])
|
||||
continue;
|
||||
|
||||
/* Count RXF2 size */
|
||||
if (mem_cfg->rxfifo2_size) {
|
||||
/* Add header info */
|
||||
fifo_data_len += mem_cfg->rxfifo_size[i] +
|
||||
fifo_data_len += mem_cfg->rxfifo2_size +
|
||||
sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_fifo);
|
||||
}
|
||||
|
||||
for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
|
||||
if (!mem_cfg->txfifo_size[i])
|
||||
/* Count RXF1 sizes */
|
||||
for (i = 0; i < mem_cfg->num_lmacs; i++) {
|
||||
if (!mem_cfg->lmac[i].rxfifo1_size)
|
||||
continue;
|
||||
|
||||
/* Add header info */
|
||||
fifo_data_len += mem_cfg->txfifo_size[i] +
|
||||
fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
|
||||
sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_fifo);
|
||||
}
|
||||
|
||||
/* Count TXF sizes */
|
||||
for (i = 0; i < mem_cfg->num_lmacs; i++) {
|
||||
int j;
|
||||
|
||||
for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
|
||||
if (!mem_cfg->lmac[i].txfifo_size[j])
|
||||
continue;
|
||||
|
||||
/* Add header info */
|
||||
fifo_data_len +=
|
||||
mem_cfg->lmac[i].txfifo_size[j] +
|
||||
sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_fifo);
|
||||
}
|
||||
}
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
|
||||
for (i = 0;
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,6 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -271,6 +272,27 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
|
||||
__le32 *dump_data = mfu_dump_notif->data;
|
||||
int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
|
||||
int i;
|
||||
|
||||
if (mfu_dump_notif->index_num == 0)
|
||||
IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
|
||||
le32_to_cpu(mfu_dump_notif->assert_id));
|
||||
|
||||
for (i = 0; i < n_words; i++)
|
||||
IWL_DEBUG_INFO(mvm,
|
||||
"MFUART assert dump, dword %u: 0x%08x\n",
|
||||
le16_to_cpu(mfu_dump_notif->index_num) *
|
||||
n_words + i,
|
||||
le32_to_cpu(dump_data[i]));
|
||||
}
|
||||
|
||||
static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
|
||||
const struct fw_img *image)
|
||||
{
|
||||
@@ -617,11 +639,18 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
|
||||
MVM_UCODE_ALIVE_TIMEOUT);
|
||||
if (ret) {
|
||||
if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
struct iwl_trans *trans = mvm->trans;
|
||||
|
||||
if (trans->cfg->gen2)
|
||||
IWL_ERR(mvm,
|
||||
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
|
||||
iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
|
||||
iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
|
||||
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
|
||||
iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
|
||||
else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
IWL_ERR(mvm,
|
||||
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
|
||||
iwl_read_prph(trans, SB_CPU_1_STATUS),
|
||||
iwl_read_prph(trans, SB_CPU_2_STATUS));
|
||||
mvm->cur_ucode = old_type;
|
||||
return ret;
|
||||
}
|
||||
@@ -807,6 +836,9 @@ int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
{
|
||||
struct iwl_notification_wait init_wait;
|
||||
struct iwl_nvm_access_complete_cmd nvm_complete = {};
|
||||
struct iwl_init_extended_cfg_cmd init_cfg = {
|
||||
.init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
|
||||
};
|
||||
static const u16 init_complete[] = {
|
||||
INIT_COMPLETE_NOTIF,
|
||||
};
|
||||
@@ -828,10 +860,14 @@ int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* TODO: remove when integrating context info */
|
||||
ret = iwl_mvm_init_paging(mvm);
|
||||
/* Send init config command to mark that we are sending NVM access
|
||||
* commands
|
||||
*/
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
|
||||
INIT_EXTENDED_CFG_CMD), 0,
|
||||
sizeof(init_cfg), &init_cfg);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to init paging: %d\n",
|
||||
IWL_ERR(mvm, "Failed to run init config command: %d\n",
|
||||
ret);
|
||||
goto error;
|
||||
}
|
||||
@@ -876,24 +912,27 @@ static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
|
||||
int i;
|
||||
int i, lmac;
|
||||
int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
|
||||
|
||||
mvm->shared_mem_cfg.num_txfifo_entries =
|
||||
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
|
||||
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
|
||||
mvm->shared_mem_cfg.txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->txfifo_size[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
|
||||
mvm->shared_mem_cfg.rxfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->rxfifo_size[i]);
|
||||
if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
|
||||
return;
|
||||
|
||||
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
|
||||
sizeof(mem_cfg->internal_txfifo_size));
|
||||
mvm->smem_cfg.num_lmacs = lmac_num;
|
||||
mvm->smem_cfg.num_txfifo_entries =
|
||||
ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
|
||||
mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
|
||||
i++)
|
||||
mvm->shared_mem_cfg.internal_txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
|
||||
for (lmac = 0; lmac < lmac_num; lmac++) {
|
||||
struct iwl_shared_mem_lmac_cfg *lmac_cfg =
|
||||
&mem_cfg->lmac_smem[lmac];
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
|
||||
mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
|
||||
le32_to_cpu(lmac_cfg->txfifo_size[i]);
|
||||
mvm->smem_cfg.lmac[lmac].rxfifo1_size =
|
||||
le32_to_cpu(lmac_cfg->rxfifo1_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
|
||||
@@ -902,25 +941,27 @@ static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
|
||||
struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
|
||||
int i;
|
||||
|
||||
mvm->shared_mem_cfg.num_txfifo_entries =
|
||||
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
|
||||
mvm->smem_cfg.num_lmacs = 1;
|
||||
|
||||
mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
|
||||
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
|
||||
mvm->shared_mem_cfg.txfifo_size[i] =
|
||||
mvm->smem_cfg.lmac[0].txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->txfifo_size[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
|
||||
mvm->shared_mem_cfg.rxfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->rxfifo_size[i]);
|
||||
|
||||
mvm->smem_cfg.lmac[0].rxfifo1_size =
|
||||
le32_to_cpu(mem_cfg->rxfifo_size[0]);
|
||||
mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
|
||||
|
||||
/* new API has more data, from rxfifo_addr field and on */
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
|
||||
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
|
||||
BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
|
||||
sizeof(mem_cfg->internal_txfifo_size));
|
||||
|
||||
for (i = 0;
|
||||
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
|
||||
i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
|
||||
i++)
|
||||
mvm->shared_mem_cfg.internal_txfifo_size[i] =
|
||||
mvm->smem_cfg.internal_txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
|
||||
}
|
||||
}
|
||||
@@ -969,85 +1010,94 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
|
||||
sizeof(cmd), &cmd);
|
||||
}
|
||||
|
||||
#define ACPI_WRDS_METHOD "WRDS"
|
||||
#define ACPI_WRDS_WIFI (0x07)
|
||||
#define ACPI_WRDS_TABLE_SIZE 10
|
||||
|
||||
struct iwl_mvm_sar_table {
|
||||
bool enabled;
|
||||
u8 values[ACPI_WRDS_TABLE_SIZE];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
|
||||
struct iwl_mvm_sar_table *sar_table)
|
||||
#define ACPI_WRDS_METHOD "WRDS"
|
||||
#define ACPI_EWRD_METHOD "EWRD"
|
||||
#define ACPI_WGDS_METHOD "WGDS"
|
||||
#define ACPI_WIFI_DOMAIN (0x07)
|
||||
#define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2)
|
||||
#define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \
|
||||
IWL_MVM_SAR_TABLE_SIZE + 3)
|
||||
#define ACPI_WGDS_WIFI_DATA_SIZE 18
|
||||
#define ACPI_WGDS_NUM_BANDS 2
|
||||
#define ACPI_WGDS_TABLE_SIZE 3
|
||||
|
||||
static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
|
||||
union acpi_object *table,
|
||||
struct iwl_mvm_sar_profile *profile,
|
||||
bool enabled)
|
||||
{
|
||||
union acpi_object *data_pkg;
|
||||
u32 i;
|
||||
int i;
|
||||
|
||||
/* We need at least two packages, one for the revision and one
|
||||
* for the data itself. Also check that the revision is valid
|
||||
* (i.e. it is an integer set to 0).
|
||||
*/
|
||||
if (wrds->type != ACPI_TYPE_PACKAGE ||
|
||||
wrds->package.count < 2 ||
|
||||
wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
|
||||
wrds->package.elements[0].integer.value != 0) {
|
||||
IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
profile->enabled = enabled;
|
||||
|
||||
/* loop through all the packages to find the one for WiFi */
|
||||
for (i = 1; i < wrds->package.count; i++) {
|
||||
union acpi_object *domain;
|
||||
|
||||
data_pkg = &wrds->package.elements[i];
|
||||
|
||||
/* Skip anything that is not a package with the right
|
||||
* amount of elements (i.e. domain_type,
|
||||
* enabled/disabled plus the sar table size.
|
||||
*/
|
||||
if (data_pkg->type != ACPI_TYPE_PACKAGE ||
|
||||
data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
|
||||
continue;
|
||||
|
||||
domain = &data_pkg->package.elements[0];
|
||||
if (domain->type == ACPI_TYPE_INTEGER &&
|
||||
domain->integer.value == ACPI_WRDS_WIFI)
|
||||
break;
|
||||
|
||||
data_pkg = NULL;
|
||||
}
|
||||
|
||||
if (!data_pkg)
|
||||
return -ENOENT;
|
||||
|
||||
if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
|
||||
return -EINVAL;
|
||||
|
||||
sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
|
||||
|
||||
for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
|
||||
union acpi_object *entry;
|
||||
|
||||
entry = &data_pkg->package.elements[i + 2];
|
||||
if ((entry->type != ACPI_TYPE_INTEGER) ||
|
||||
(entry->integer.value > U8_MAX))
|
||||
for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) {
|
||||
if ((table[i].type != ACPI_TYPE_INTEGER) ||
|
||||
(table[i].integer.value > U8_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
sar_table->values[i] = entry->integer.value;
|
||||
profile->table[i] = table[i].integer.value;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sar_table *sar_table)
|
||||
static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm,
|
||||
union acpi_object *data,
|
||||
int data_size)
|
||||
{
|
||||
int i;
|
||||
union acpi_object *wifi_pkg;
|
||||
|
||||
/*
|
||||
* We need at least two packages, one for the revision and one
|
||||
* for the data itself. Also check that the revision is valid
|
||||
* (i.e. it is an integer set to 0).
|
||||
*/
|
||||
if (data->type != ACPI_TYPE_PACKAGE ||
|
||||
data->package.count < 2 ||
|
||||
data->package.elements[0].type != ACPI_TYPE_INTEGER ||
|
||||
data->package.elements[0].integer.value != 0) {
|
||||
IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* loop through all the packages to find the one for WiFi */
|
||||
for (i = 1; i < data->package.count; i++) {
|
||||
union acpi_object *domain;
|
||||
|
||||
wifi_pkg = &data->package.elements[i];
|
||||
|
||||
/* Skip anything that is not a package with the right
|
||||
* amount of elements (i.e. domain_type,
|
||||
* enabled/disabled plus the actual data size.
|
||||
*/
|
||||
if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
|
||||
wifi_pkg->package.count != data_size)
|
||||
continue;
|
||||
|
||||
domain = &wifi_pkg->package.elements[0];
|
||||
if (domain->type == ACPI_TYPE_INTEGER &&
|
||||
domain->integer.value == ACPI_WIFI_DOMAIN)
|
||||
break;
|
||||
|
||||
wifi_pkg = NULL;
|
||||
}
|
||||
|
||||
if (!wifi_pkg)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
return wifi_pkg;
|
||||
}
|
||||
|
||||
static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
|
||||
{
|
||||
union acpi_object *wifi_pkg, *table;
|
||||
acpi_handle root_handle;
|
||||
acpi_handle handle;
|
||||
struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
acpi_status status;
|
||||
bool enabled;
|
||||
int ret;
|
||||
|
||||
root_handle = ACPI_HANDLE(mvm->dev);
|
||||
@@ -1072,62 +1122,301 @@ static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
|
||||
kfree(wrds.pointer);
|
||||
wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer,
|
||||
ACPI_WRDS_WIFI_DATA_SIZE);
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
enabled = !!(wifi_pkg->package.elements[1].integer.value);
|
||||
|
||||
/* position of the actual table */
|
||||
table = &wifi_pkg->package.elements[2];
|
||||
|
||||
/* The profile from WRDS is officially profile 1, but goes
|
||||
* into sar_profiles[0] (because we don't have a profile 0).
|
||||
*/
|
||||
ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
|
||||
enabled);
|
||||
|
||||
out_free:
|
||||
kfree(wrds.pointer);
|
||||
return ret;
|
||||
}
|
||||
#else /* CONFIG_ACPI */
|
||||
static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sar_table *sar_table)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
|
||||
static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
|
||||
{
|
||||
union acpi_object *wifi_pkg;
|
||||
acpi_handle root_handle;
|
||||
acpi_handle handle;
|
||||
struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
acpi_status status;
|
||||
bool enabled;
|
||||
int i, n_profiles, ret;
|
||||
|
||||
root_handle = ACPI_HANDLE(mvm->dev);
|
||||
if (!root_handle) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"Could not retrieve root port ACPI handle\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Get the method's handle */
|
||||
status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD,
|
||||
&handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_DEBUG_RADIO(mvm, "EWRD method not found\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Call EWRD with no arguments */
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &ewrd);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer,
|
||||
ACPI_EWRD_WIFI_DATA_SIZE);
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
|
||||
(wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
enabled = !!(wifi_pkg->package.elements[1].integer.value);
|
||||
n_profiles = wifi_pkg->package.elements[2].integer.value;
|
||||
|
||||
for (i = 0; i < n_profiles; i++) {
|
||||
/* the tables start at element 3 */
|
||||
static int pos = 3;
|
||||
|
||||
/* The EWRD profiles officially go from 2 to 4, but we
|
||||
* save them in sar_profiles[1-3] (because we don't
|
||||
* have profile 0). So in the array we start from 1.
|
||||
*/
|
||||
ret = iwl_mvm_sar_set_profile(mvm,
|
||||
&wifi_pkg->package.elements[pos],
|
||||
&mvm->sar_profiles[i + 1],
|
||||
enabled);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* go to the next table */
|
||||
pos += IWL_MVM_SAR_TABLE_SIZE;
|
||||
}
|
||||
|
||||
out_free:
|
||||
kfree(ewrd.pointer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_geo_table *geo_table)
|
||||
{
|
||||
union acpi_object *wifi_pkg;
|
||||
acpi_handle root_handle;
|
||||
acpi_handle handle;
|
||||
struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
acpi_status status;
|
||||
int i, ret;
|
||||
|
||||
root_handle = ACPI_HANDLE(mvm->dev);
|
||||
if (!root_handle) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"Could not retrieve root port ACPI handle\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Get the method's handle */
|
||||
status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD,
|
||||
&handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_DEBUG_RADIO(mvm, "WGDS method not found\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Call WGDS with no arguments */
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &wgds);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer,
|
||||
ACPI_WGDS_WIFI_DATA_SIZE);
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < ACPI_WGDS_WIFI_DATA_SIZE; i++) {
|
||||
union acpi_object *entry;
|
||||
|
||||
entry = &wifi_pkg->package.elements[i + 1];
|
||||
if ((entry->type != ACPI_TYPE_INTEGER) ||
|
||||
(entry->integer.value > U8_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
geo_table->values[i] = entry->integer.value;
|
||||
}
|
||||
ret = 0;
|
||||
out_free:
|
||||
kfree(wgds.pointer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
|
||||
{
|
||||
struct iwl_mvm_sar_table sar_table;
|
||||
struct iwl_dev_tx_power_cmd cmd = {
|
||||
.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
|
||||
};
|
||||
int ret, i, j, idx;
|
||||
int i, j, idx;
|
||||
int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
|
||||
int len = sizeof(cmd);
|
||||
|
||||
BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2);
|
||||
BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
|
||||
IWL_MVM_SAR_TABLE_SIZE);
|
||||
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
|
||||
len = sizeof(cmd.v3);
|
||||
|
||||
ret = iwl_mvm_sar_get_table(mvm, &sar_table);
|
||||
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
|
||||
struct iwl_mvm_sar_profile *prof;
|
||||
|
||||
/* don't allow SAR to be disabled (profile 0 means disable) */
|
||||
if (profs[i] == 0)
|
||||
return -EPERM;
|
||||
|
||||
/* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */
|
||||
if (profs[i] > IWL_MVM_SAR_PROFILE_NUM)
|
||||
return -EINVAL;
|
||||
|
||||
/* profiles go from 1 to 4, so decrement to access the array */
|
||||
prof = &mvm->sar_profiles[profs[i] - 1];
|
||||
|
||||
/* if the profile is disabled, do nothing */
|
||||
if (!prof->enabled) {
|
||||
IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
|
||||
profs[i]);
|
||||
/* if one of the profiles is disabled, we fail all */
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
|
||||
for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
|
||||
idx = (i * IWL_NUM_SUB_BANDS) + j;
|
||||
cmd.v3.per_chain_restriction[i][j] =
|
||||
cpu_to_le16(prof->table[idx]);
|
||||
IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
|
||||
j, prof->table[idx]);
|
||||
}
|
||||
}
|
||||
|
||||
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
|
||||
}
|
||||
|
||||
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_mvm_geo_table geo_table;
|
||||
struct iwl_geo_tx_power_profiles_cmd cmd = {
|
||||
.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
|
||||
};
|
||||
int ret, i, j, idx;
|
||||
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
|
||||
|
||||
ret = iwl_mvm_sar_get_wgds_table(mvm, &geo_table);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
/* we don't fail if the table is not available */
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!sar_table.enabled)
|
||||
return 0;
|
||||
IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
|
||||
|
||||
IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
|
||||
BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
|
||||
ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE);
|
||||
|
||||
BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
|
||||
ACPI_WRDS_TABLE_SIZE);
|
||||
for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
|
||||
struct iwl_per_chain_offset *chain =
|
||||
(struct iwl_per_chain_offset *)&cmd.table[i];
|
||||
|
||||
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
|
||||
IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
|
||||
for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
|
||||
idx = (i * IWL_NUM_SUB_BANDS) + j;
|
||||
cmd.v3.per_chain_restriction[i][j] =
|
||||
cpu_to_le16(sar_table.values[idx]);
|
||||
IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
|
||||
j, sar_table.values[idx]);
|
||||
for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
|
||||
u8 *value;
|
||||
|
||||
idx = i * ACPI_WGDS_NUM_BANDS * ACPI_WGDS_TABLE_SIZE +
|
||||
j * ACPI_WGDS_TABLE_SIZE;
|
||||
value = &geo_table.values[idx];
|
||||
chain[j].max_tx_power = cpu_to_le16(value[0]);
|
||||
chain[j].chain_a = value[1];
|
||||
chain[j].chain_b = value[2];
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
|
||||
i, j, value[1], value[2], value[0]);
|
||||
}
|
||||
}
|
||||
return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
|
||||
}
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
|
||||
#else /* CONFIG_ACPI */
|
||||
static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = iwl_mvm_sar_get_wrds_table(mvm);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
/* if not available, don't fail and don't bother with EWRD */
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_sar_get_ewrd_table(mvm);
|
||||
/* if EWRD is not available, we can still use WRDS, so don't fail */
|
||||
if (ret < 0)
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
|
||||
/* choose profile 1 (WRDS) as default for both chains */
|
||||
ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
|
||||
|
||||
/* if we don't have profile 0 from BIOS, just skip it */
|
||||
if (ret == -ENOENT)
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1219,7 +1508,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
}
|
||||
|
||||
/* Init RSS configuration */
|
||||
if (iwl_mvm_has_new_rx_api(mvm)) {
|
||||
/* TODO - remove a000 disablement when we have RXQ config API */
|
||||
if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
|
||||
ret = iwl_send_rss_cfg_cmd(mvm);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
|
||||
@@ -1229,10 +1519,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
}
|
||||
|
||||
/* init the fw <-> mac80211 STA mapping */
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
|
||||
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
|
||||
|
||||
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
|
||||
|
||||
/* reset quota debouncing buffer - 0xff will yield invalid data */
|
||||
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
|
||||
@@ -1313,10 +1603,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_csum_supported(mvm) &&
|
||||
mvm->cfg->features & NETIF_F_RXCSUM)
|
||||
iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
|
||||
|
||||
/* allow FW/transport low power modes if not during restart */
|
||||
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
|
||||
@@ -1325,6 +1611,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = iwl_mvm_sar_geo_init(mvm);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
|
||||
return 0;
|
||||
error:
|
||||
@@ -1362,7 +1652,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
|
||||
goto error;
|
||||
|
||||
/* init the fw <-> mac80211 STA mapping */
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
|
||||
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
|
||||
|
||||
/* Add auxiliary station for scanning */
|
||||
|
@@ -472,8 +472,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
|
||||
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
|
||||
}
|
||||
|
||||
mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
|
||||
mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
|
||||
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
|
||||
|
||||
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
|
||||
mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
|
||||
@@ -1442,6 +1443,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_tx_resp *beacon_notify_hdr;
|
||||
struct ieee80211_vif *csa_vif;
|
||||
struct ieee80211_vif *tx_blocked_vif;
|
||||
struct agg_tx_status *agg_status;
|
||||
u16 status;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
@@ -1449,7 +1451,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
|
||||
beacon_notify_hdr = &beacon->beacon_notify_hdr;
|
||||
mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
|
||||
|
||||
status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
|
||||
agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
|
||||
status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
|
||||
IWL_DEBUG_RX(mvm,
|
||||
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
|
||||
status, beacon_notify_hdr->failure_frame,
|
||||
|
@@ -6,8 +6,8 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -33,7 +33,8 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -766,7 +767,7 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
|
||||
goto out;
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
|
||||
if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
|
||||
mvmsta->sta_id != mvm->d0i3_ap_sta_id)
|
||||
goto out;
|
||||
|
||||
@@ -1010,7 +1011,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
mvmvif->uploaded = false;
|
||||
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
|
||||
|
||||
spin_lock_bh(&mvm->time_event_lock);
|
||||
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
|
||||
@@ -1053,7 +1054,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
|
||||
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
|
||||
|
||||
mvm->p2p_device_vif = NULL;
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
|
||||
|
||||
iwl_mvm_reset_phy_ctxts(mvm);
|
||||
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
|
||||
@@ -1351,6 +1352,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
/*
|
||||
* Only queue for this station is the mcast queue,
|
||||
* which shouldn't be in TFD mask anyway
|
||||
*/
|
||||
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
|
||||
0, vif->type);
|
||||
if (ret)
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
iwl_mvm_vif_dbgfs_register(mvm, vif);
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -1516,6 +1528,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
|
||||
mvm->noa_duration = 0;
|
||||
}
|
||||
#endif
|
||||
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
|
||||
iwl_mvm_dealloc_bcast_sta(mvm, vif);
|
||||
goto out_release;
|
||||
}
|
||||
@@ -1952,7 +1965,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
||||
IWL_MVM_SMPS_REQ_PROT,
|
||||
IEEE80211_SMPS_DYNAMIC);
|
||||
}
|
||||
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
} else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
|
||||
/*
|
||||
* If update fails - SF might be running in associated
|
||||
* mode while disassociated - which is forbidden.
|
||||
@@ -1966,8 +1979,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
||||
IWL_ERR(mvm, "failed to remove AP station\n");
|
||||
|
||||
if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
|
||||
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
|
||||
/* remove quota for this interface */
|
||||
ret = iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
if (ret)
|
||||
@@ -2104,6 +2117,10 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
||||
if (ret)
|
||||
goto out_unbind;
|
||||
|
||||
ret = iwl_mvm_add_mcast_sta(mvm, vif);
|
||||
if (ret)
|
||||
goto out_rm_bcast;
|
||||
|
||||
/* must be set before quota calculations */
|
||||
mvmvif->ap_ibss_active = true;
|
||||
|
||||
@@ -2131,6 +2148,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
||||
out_quota_failed:
|
||||
iwl_mvm_power_update_mac(mvm);
|
||||
mvmvif->ap_ibss_active = false;
|
||||
iwl_mvm_rm_mcast_sta(mvm, vif);
|
||||
out_rm_bcast:
|
||||
iwl_mvm_send_rm_bcast_sta(mvm, vif);
|
||||
out_unbind:
|
||||
iwl_mvm_binding_remove_vif(mvm, vif);
|
||||
@@ -2177,6 +2196,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
|
||||
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
|
||||
|
||||
iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
iwl_mvm_rm_mcast_sta(mvm, vif);
|
||||
iwl_mvm_send_rm_bcast_sta(mvm, vif);
|
||||
iwl_mvm_binding_remove_vif(mvm, vif);
|
||||
|
||||
@@ -2343,6 +2363,9 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
||||
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
|
||||
continue;
|
||||
|
||||
if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE)
|
||||
continue;
|
||||
|
||||
__set_bit(tid_data->txq_id, &txqs);
|
||||
|
||||
if (iwl_mvm_tid_queued(tid_data) == 0)
|
||||
@@ -2368,7 +2391,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
|
||||
*/
|
||||
break;
|
||||
case STA_NOTIFY_AWAKE:
|
||||
if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
|
||||
break;
|
||||
|
||||
if (txqs)
|
||||
@@ -3939,7 +3962,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
/* flush the AP-station and all TDLS peers */
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
@@ -4196,7 +4219,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (!iwl_mvm_has_new_rx_api(mvm))
|
||||
/* TODO - remove a000 disablement when we have RXQ config API */
|
||||
if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm))
|
||||
return;
|
||||
|
||||
notif->cookie = mvm->queue_sync_cookie;
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -407,6 +407,7 @@ struct iwl_mvm_vif {
|
||||
struct iwl_mvm_time_event_data hs_time_event_data;
|
||||
|
||||
struct iwl_mvm_int_sta bcast_sta;
|
||||
struct iwl_mvm_int_sta mcast_sta;
|
||||
|
||||
/*
|
||||
* Assigned while mac80211 has the interface in a channel context,
|
||||
@@ -603,10 +604,15 @@ enum iwl_mvm_tdls_cs_state {
|
||||
IWL_MVM_TDLS_SW_ACTIVE,
|
||||
};
|
||||
|
||||
#define MAX_NUM_LMAC 2
|
||||
struct iwl_mvm_shared_mem_cfg {
|
||||
int num_lmacs;
|
||||
int num_txfifo_entries;
|
||||
u32 txfifo_size[TX_FIFO_MAX_NUM];
|
||||
u32 rxfifo_size[RX_FIFO_MAX_NUM];
|
||||
struct {
|
||||
u32 txfifo_size[TX_FIFO_MAX_NUM];
|
||||
u32 rxfifo1_size;
|
||||
} lmac[MAX_NUM_LMAC];
|
||||
u32 rxfifo2_size;
|
||||
u32 internal_txfifo_addr;
|
||||
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
|
||||
};
|
||||
@@ -625,6 +631,7 @@ struct iwl_mvm_shared_mem_cfg {
|
||||
* @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
|
||||
* it is the time of last received sub-frame
|
||||
* @removed: prevent timer re-arming
|
||||
* @valid: reordering is valid for this queue
|
||||
* @lock: protect reorder buffer internal state
|
||||
* @mvm: mvm pointer, needed for frame timer context
|
||||
*/
|
||||
@@ -640,6 +647,7 @@ struct iwl_mvm_reorder_buffer {
|
||||
unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
|
||||
struct timer_list reorder_timer;
|
||||
bool removed;
|
||||
bool valid;
|
||||
spinlock_t lock;
|
||||
struct iwl_mvm *mvm;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
@@ -709,6 +717,21 @@ enum iwl_mvm_queue_status {
|
||||
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
|
||||
#define IWL_MVM_NUM_CIPHERS 10
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
#define IWL_MVM_SAR_TABLE_SIZE 10
|
||||
#define IWL_MVM_SAR_PROFILE_NUM 4
|
||||
#define IWL_MVM_GEO_TABLE_SIZE 18
|
||||
|
||||
struct iwl_mvm_sar_profile {
|
||||
bool enabled;
|
||||
u8 table[IWL_MVM_SAR_TABLE_SIZE];
|
||||
};
|
||||
|
||||
struct iwl_mvm_geo_table {
|
||||
u8 values[IWL_MVM_GEO_TABLE_SIZE];
|
||||
};
|
||||
#endif
|
||||
|
||||
struct iwl_mvm {
|
||||
/* for logger access */
|
||||
struct device *dev;
|
||||
@@ -975,7 +998,10 @@ struct iwl_mvm {
|
||||
#endif
|
||||
|
||||
/* Tx queues */
|
||||
u8 aux_queue;
|
||||
u16 aux_queue;
|
||||
u16 probe_queue;
|
||||
u16 p2p_dev_queue;
|
||||
|
||||
u8 first_agg_queue;
|
||||
u8 last_agg_queue;
|
||||
|
||||
@@ -1018,7 +1044,7 @@ struct iwl_mvm {
|
||||
} peer;
|
||||
} tdls_cs;
|
||||
|
||||
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
|
||||
struct iwl_mvm_shared_mem_cfg smem_cfg;
|
||||
|
||||
u32 ciphers[IWL_MVM_NUM_CIPHERS];
|
||||
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
|
||||
@@ -1035,6 +1061,9 @@ struct iwl_mvm {
|
||||
bool drop_bcn_ap_mode;
|
||||
|
||||
struct delayed_work cs_tx_unblock_dwork;
|
||||
#ifdef CONFIG_ACPI
|
||||
struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Extract MVM priv from op_mode and _hw */
|
||||
@@ -1222,13 +1251,25 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
|
||||
{
|
||||
/*
|
||||
* TODO:
|
||||
* The issue of how to determine CDB support is still not well defined.
|
||||
* It may be that it will be for all next HW devices and it may be per
|
||||
* FW compilation and it may also differ between different devices.
|
||||
* For now take a ride on the new TX API and get back to it when
|
||||
* it is well defined.
|
||||
* The issue of how to determine CDB APIs and usage is still not fully
|
||||
* defined.
|
||||
* There is a compilation for CDB and non-CDB FW, but there may
|
||||
* be also runtime check.
|
||||
* For now there is a TLV for checking compilation mode, but a
|
||||
* runtime check will also have to be here - once defined.
|
||||
*/
|
||||
return iwl_mvm_has_new_tx_api(mvm);
|
||||
return fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
|
||||
}
|
||||
|
||||
static inline struct agg_tx_status*
|
||||
iwl_mvm_get_agg_status(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_tx_resp *tx_resp)
|
||||
{
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return &tx_resp->v6.status;
|
||||
else
|
||||
return &tx_resp->v3.status;
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
|
||||
@@ -1389,6 +1430,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
||||
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||
int queue);
|
||||
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
@@ -1668,6 +1711,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
|
||||
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
|
||||
unsigned int wdg_timeout);
|
||||
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
|
||||
u8 sta_id, u8 tid, unsigned int timeout);
|
||||
|
||||
/*
|
||||
* Disable a TXQ.
|
||||
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
|
||||
@@ -1701,7 +1747,8 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
|
||||
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
||||
{
|
||||
iwl_free_fw_paging(mvm);
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_free_fw_paging(mvm);
|
||||
mvm->ucode_loaded = false;
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
}
|
||||
@@ -1797,4 +1844,14 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
|
||||
u32 duration, u32 timeout);
|
||||
bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
|
||||
#else
|
||||
static inline
|
||||
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
#endif /* __IWL_MVM_H__ */
|
||||
|
@@ -302,6 +302,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
||||
RX_HANDLER_SYNC),
|
||||
RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
|
||||
RX_HANDLER_ASYNC_LOCKED),
|
||||
RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
|
||||
iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
|
||||
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
|
||||
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
|
||||
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
|
||||
@@ -426,6 +428,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
|
||||
*/
|
||||
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
|
||||
HCMD_NAME(SHARED_MEM_CFG_CMD),
|
||||
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
|
||||
};
|
||||
|
||||
/* Please keep this array *SORTED* by hex value.
|
||||
@@ -444,6 +447,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
|
||||
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
|
||||
HCMD_NAME(CTDP_CONFIG_CMD),
|
||||
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
|
||||
HCMD_NAME(GEO_TX_POWER_LIMIT),
|
||||
HCMD_NAME(CT_KILL_NOTIFICATION),
|
||||
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
|
||||
};
|
||||
@@ -452,6 +456,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
|
||||
* Access is done through binary search
|
||||
*/
|
||||
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
|
||||
HCMD_NAME(DQA_ENABLE_CMD),
|
||||
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
|
||||
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
|
||||
HCMD_NAME(STA_PM_NOTIF),
|
||||
@@ -459,6 +464,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
|
||||
HCMD_NAME(RX_QUEUES_NOTIFICATION),
|
||||
};
|
||||
|
||||
/* Please keep this array *SORTED* by hex value.
|
||||
* Access is done through binary search
|
||||
*/
|
||||
static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
|
||||
HCMD_NAME(MFU_ASSERT_DUMP_NTF),
|
||||
};
|
||||
|
||||
/* Please keep this array *SORTED* by hex value.
|
||||
* Access is done through binary search
|
||||
*/
|
||||
@@ -602,6 +614,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
}
|
||||
} else {
|
||||
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
|
||||
mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
||||
mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
||||
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
|
||||
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
|
||||
}
|
||||
@@ -1256,7 +1270,7 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
|
||||
u8 tid;
|
||||
|
||||
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
|
||||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
|
||||
mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
|
||||
return false;
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
|
||||
@@ -1344,7 +1358,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *ap_sta;
|
||||
struct iwl_mvm_sta *mvm_ap_sta;
|
||||
|
||||
if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
|
||||
if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -1414,7 +1428,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
|
||||
mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
|
||||
} else {
|
||||
WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
|
||||
mvm->d0i3_offloading = false;
|
||||
}
|
||||
|
||||
@@ -1427,7 +1441,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
|
||||
return ret;
|
||||
|
||||
/* configure wowlan configuration only if needed */
|
||||
if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
|
||||
/* wake on beacons only if beacon storing isn't supported */
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_BEACON_STORING))
|
||||
@@ -1504,7 +1518,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
|
||||
|
||||
spin_lock_bh(&mvm->d0i3_tx_lock);
|
||||
|
||||
if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
|
||||
if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
|
||||
goto out;
|
||||
|
||||
IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
|
||||
@@ -1542,7 +1556,7 @@ out:
|
||||
}
|
||||
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
|
||||
wake_up(&mvm->d0i3_exit_waitq);
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
|
||||
if (wake_queues)
|
||||
ieee80211_wake_queues(mvm->hw);
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -250,12 +251,30 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
|
||||
struct cfg80211_chan_def *chandef,
|
||||
u8 chains_static, u8 chains_dynamic)
|
||||
{
|
||||
enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/* In CDB mode we cannot modify PHY context between bands so... */
|
||||
if (iwl_mvm_has_new_tx_api(mvm) &&
|
||||
ctxt->channel->band != chandef->chan->band) {
|
||||
int ret;
|
||||
|
||||
/* ... remove it here ...*/
|
||||
ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
|
||||
chains_static, chains_dynamic,
|
||||
FW_CTXT_ACTION_REMOVE, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* ... and proceed to add it again */
|
||||
action = FW_CTXT_ACTION_ADD;
|
||||
}
|
||||
|
||||
ctxt->channel = chandef->chan;
|
||||
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
|
||||
chains_static, chains_dynamic,
|
||||
FW_CTXT_ACTION_MODIFY, 0);
|
||||
action, 0);
|
||||
}
|
||||
|
||||
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -104,7 +104,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
||||
u8 crypt_len,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
unsigned int hdrlen, fraglen;
|
||||
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
unsigned int fraglen;
|
||||
|
||||
/*
|
||||
* The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
|
||||
* but those are all multiples of 4 long) all goes away, but we
|
||||
* want the *end* of it, which is going to be the start of the IP
|
||||
* header, to be aligned when it gets pulled in.
|
||||
* The beginning of the skb->data is aligned on at least a 4-byte
|
||||
* boundary after allocation. Everything here is aligned at least
|
||||
* on a 2-byte boundary so we can just take hdrlen & 3 and pad by
|
||||
* the result.
|
||||
*/
|
||||
skb_reserve(skb, hdrlen & 3);
|
||||
|
||||
/* If frame is small enough to fit in skb->head, pull it completely.
|
||||
* If not, only pull ieee80211_hdr (including crypto if present, and
|
||||
@@ -118,8 +131,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
||||
* If the latter changes (there are efforts in the standards group
|
||||
* to do so) we should revisit this and ieee80211_data_to_8023().
|
||||
*/
|
||||
hdrlen = (len <= skb_tailroom(skb)) ? len :
|
||||
sizeof(*hdr) + crypt_len + 8;
|
||||
hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
|
||||
|
||||
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
|
||||
fraglen = len - hdrlen;
|
||||
@@ -339,7 +351,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
|
||||
id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
|
||||
|
||||
if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
|
||||
if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
|
||||
if (IS_ERR(sta))
|
||||
sta = NULL;
|
||||
@@ -448,9 +460,16 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
if (rate_n_flags & RATE_MCS_BF_MSK)
|
||||
rx_status->vht_flag |= RX_VHT_FLAG_BF;
|
||||
} else {
|
||||
rx_status->rate_idx =
|
||||
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
|
||||
rx_status->band);
|
||||
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
|
||||
rx_status->band);
|
||||
|
||||
if (WARN(rate < 0 || rate > 0xFF,
|
||||
"Invalid rate flags 0x%x, band %d,\n",
|
||||
rate_n_flags, rx_status->band)) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
rx_status->rate_idx = rate;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
@@ -637,6 +656,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
||||
.mvm = mvm,
|
||||
};
|
||||
int expected_size;
|
||||
int i;
|
||||
u8 *energy;
|
||||
__le32 *bytes, *air_time;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm))
|
||||
expected_size = sizeof(*stats);
|
||||
@@ -645,8 +667,11 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
||||
else
|
||||
expected_size = sizeof(struct iwl_notif_statistics_v10);
|
||||
|
||||
if (iwl_rx_packet_payload_len(pkt) != expected_size)
|
||||
goto invalid;
|
||||
if (iwl_rx_packet_payload_len(pkt) != expected_size) {
|
||||
IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
|
||||
iwl_rx_packet_payload_len(pkt));
|
||||
return;
|
||||
}
|
||||
|
||||
data.mac_id = stats->rx.general.mac_id;
|
||||
data.beacon_filter_average_energy =
|
||||
@@ -662,38 +687,6 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
||||
le64_to_cpu(stats->general.common.on_time_scan);
|
||||
|
||||
data.general = &stats->general;
|
||||
if (iwl_mvm_has_new_rx_api(mvm)) {
|
||||
int i;
|
||||
u8 *energy;
|
||||
__le32 *bytes, *air_time;
|
||||
|
||||
if (!iwl_mvm_is_cdb_supported(mvm)) {
|
||||
struct iwl_notif_statistics_v11 *v11 =
|
||||
(void *)&pkt->data;
|
||||
|
||||
energy = (void *)&v11->load_stats.avg_energy;
|
||||
bytes = (void *)&v11->load_stats.byte_count;
|
||||
air_time = (void *)&v11->load_stats.air_time;
|
||||
} else {
|
||||
energy = (void *)&stats->load_stats.avg_energy;
|
||||
bytes = (void *)&stats->load_stats.byte_count;
|
||||
air_time = (void *)&stats->load_stats.air_time;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
struct iwl_mvm_sta *sta;
|
||||
|
||||
if (!energy[i])
|
||||
continue;
|
||||
|
||||
sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
|
||||
if (!sta)
|
||||
continue;
|
||||
sta->avg_energy = energy[i];
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
|
||||
|
||||
@@ -701,10 +694,36 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
||||
IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_stat_iterator,
|
||||
&data);
|
||||
return;
|
||||
invalid:
|
||||
IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
|
||||
iwl_rx_packet_payload_len(pkt));
|
||||
|
||||
if (!iwl_mvm_has_new_rx_api(mvm))
|
||||
return;
|
||||
|
||||
if (!iwl_mvm_is_cdb_supported(mvm)) {
|
||||
struct iwl_notif_statistics_v11 *v11 =
|
||||
(void *)&pkt->data;
|
||||
|
||||
energy = (void *)&v11->load_stats.avg_energy;
|
||||
bytes = (void *)&v11->load_stats.byte_count;
|
||||
air_time = (void *)&v11->load_stats.air_time;
|
||||
} else {
|
||||
energy = (void *)&stats->load_stats.avg_energy;
|
||||
bytes = (void *)&stats->load_stats.byte_count;
|
||||
air_time = (void *)&stats->load_stats.air_time;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
struct iwl_mvm_sta *sta;
|
||||
|
||||
if (!energy[i])
|
||||
continue;
|
||||
|
||||
sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
|
||||
if (!sta)
|
||||
continue;
|
||||
sta->avg_energy = energy[i];
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -29,7 +29,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -462,6 +462,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
|
||||
int i;
|
||||
u16 sn = 0, index = 0;
|
||||
bool expired = false;
|
||||
bool cont = false;
|
||||
|
||||
spin_lock(&buf->lock);
|
||||
|
||||
@@ -473,12 +474,21 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
|
||||
for (i = 0; i < buf->buf_size ; i++) {
|
||||
index = (buf->head_sn + i) % buf->buf_size;
|
||||
|
||||
if (skb_queue_empty(&buf->entries[index]))
|
||||
if (skb_queue_empty(&buf->entries[index])) {
|
||||
/*
|
||||
* If there is a hole and the next frame didn't expire
|
||||
* we want to break and not advance SN
|
||||
*/
|
||||
cont = false;
|
||||
continue;
|
||||
if (!time_after(jiffies, buf->reorder_time[index] +
|
||||
RX_REORDER_BUF_TIMEOUT_MQ))
|
||||
}
|
||||
if (!cont && !time_after(jiffies, buf->reorder_time[index] +
|
||||
RX_REORDER_BUF_TIMEOUT_MQ))
|
||||
break;
|
||||
|
||||
expired = true;
|
||||
/* continue until next hole after this expired frames */
|
||||
cont = true;
|
||||
sn = ieee80211_sn_add(buf->head_sn, i + 1);
|
||||
}
|
||||
|
||||
@@ -626,9 +636,13 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
||||
return false;
|
||||
|
||||
baid_data = rcu_dereference(mvm->baid_map[baid]);
|
||||
if (WARN(!baid_data,
|
||||
"Received baid %d, but no data exists for this BAID\n", baid))
|
||||
if (!baid_data) {
|
||||
WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
|
||||
"Received baid %d, but no data exists for this BAID\n",
|
||||
baid);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
|
||||
"baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
|
||||
baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
|
||||
@@ -643,6 +657,14 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
||||
|
||||
spin_lock_bh(&buffer->lock);
|
||||
|
||||
if (!buffer->valid) {
|
||||
if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
|
||||
spin_unlock_bh(&buffer->lock);
|
||||
return false;
|
||||
}
|
||||
buffer->valid = true;
|
||||
}
|
||||
|
||||
if (ieee80211_is_back_req(hdr->frame_control)) {
|
||||
iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
|
||||
goto drop;
|
||||
@@ -727,7 +749,8 @@ drop:
|
||||
return true;
|
||||
}
|
||||
|
||||
static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
|
||||
static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
|
||||
u32 reorder_data, u8 baid)
|
||||
{
|
||||
unsigned long now = jiffies;
|
||||
unsigned long timeout;
|
||||
@@ -736,8 +759,10 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
|
||||
rcu_read_lock();
|
||||
|
||||
data = rcu_dereference(mvm->baid_map[baid]);
|
||||
if (WARN_ON(!data))
|
||||
if (!data) {
|
||||
WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!data->timeout)
|
||||
goto out;
|
||||
@@ -831,7 +856,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
|
||||
u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
|
||||
|
||||
if (!WARN_ON_ONCE(id >= IWL_MVM_STATION_COUNT)) {
|
||||
if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
|
||||
if (IS_ERR(sta))
|
||||
sta = NULL;
|
||||
@@ -893,26 +918,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
|
||||
if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
|
||||
kfree_skb(skb);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Our hardware de-aggregates AMSDUs but copies the mac header
|
||||
* as it to the de-aggregated MPDUs. We need to turn off the
|
||||
* AMSDU bit in the QoS control ourselves.
|
||||
* In addition, HW reverses addr3 and addr4 - reverse it back.
|
||||
*/
|
||||
if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
|
||||
!WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
|
||||
int i;
|
||||
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
|
||||
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
||||
if (!(desc->amsdu_info &
|
||||
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
|
||||
rx_status->flag |= RX_FLAG_AMSDU_MORE;
|
||||
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
mac_addr[i] = hdr->addr3[ETH_ALEN - i - 1];
|
||||
ether_addr_copy(hdr->addr3, mac_addr);
|
||||
|
||||
if (ieee80211_has_a4(hdr->frame_control)) {
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
mac_addr[i] =
|
||||
hdr->addr4[ETH_ALEN - i - 1];
|
||||
ether_addr_copy(hdr->addr4, mac_addr);
|
||||
}
|
||||
}
|
||||
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
|
||||
u32 reorder_data = le32_to_cpu(desc->reorder_data);
|
||||
|
||||
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
|
||||
}
|
||||
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
|
||||
iwl_mvm_agg_rx_received(mvm, baid);
|
||||
}
|
||||
|
||||
/* Set up the HT phy flags */
|
||||
@@ -953,9 +991,17 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
if (rate_n_flags & RATE_MCS_BF_MSK)
|
||||
rx_status->vht_flag |= RX_VHT_FLAG_BF;
|
||||
} else {
|
||||
rx_status->rate_idx =
|
||||
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
|
||||
rx_status->band);
|
||||
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
|
||||
rx_status->band);
|
||||
|
||||
if (WARN(rate < 0 || rate > 0xFF,
|
||||
"Invalid rate flags 0x%x, band %d,\n",
|
||||
rate_n_flags, rx_status->band)) {
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
rx_status->rate_idx = rate;
|
||||
|
||||
}
|
||||
|
||||
/* management stuff on default queue */
|
||||
@@ -974,6 +1020,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
|
||||
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
|
||||
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -966,11 +966,11 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
|
||||
channels[j] = band->channels[i].hw_value;
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
|
||||
u32 flags, u8 channel_flags)
|
||||
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
|
||||
u32 flags, u8 channel_flags)
|
||||
{
|
||||
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
|
||||
struct iwl_scan_config *cfg = config;
|
||||
struct iwl_scan_config_v1 *cfg = config;
|
||||
|
||||
cfg->flags = cpu_to_le32(flags);
|
||||
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
|
||||
@@ -989,11 +989,11 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
|
||||
iwl_mvm_fill_channels(mvm, cfg->channel_array);
|
||||
}
|
||||
|
||||
static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
|
||||
u32 flags, u8 channel_flags)
|
||||
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
|
||||
u32 flags, u8 channel_flags)
|
||||
{
|
||||
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
|
||||
struct iwl_scan_config_cdb *cfg = config;
|
||||
struct iwl_scan_config *cfg = config;
|
||||
|
||||
cfg->flags = cpu_to_le32(flags);
|
||||
cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
|
||||
@@ -1001,10 +1001,14 @@ static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
|
||||
cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
|
||||
cfg->out_of_channel_time[0] =
|
||||
cpu_to_le32(scan_timing[type].max_out_time);
|
||||
cfg->out_of_channel_time[1] =
|
||||
cpu_to_le32(scan_timing[type].max_out_time);
|
||||
cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
|
||||
cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
cfg->suspend_time[1] =
|
||||
cpu_to_le32(scan_timing[type].suspend_time);
|
||||
cfg->out_of_channel_time[1] =
|
||||
cpu_to_le32(scan_timing[type].max_out_time);
|
||||
}
|
||||
|
||||
iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
|
||||
|
||||
@@ -1033,16 +1037,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
||||
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
|
||||
return -ENOBUFS;
|
||||
|
||||
if (type == mvm->scan_type) {
|
||||
IWL_DEBUG_SCAN(mvm,
|
||||
"Ignoring UMAC scan config of the same type\n");
|
||||
if (type == mvm->scan_type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm))
|
||||
cmd_size = sizeof(struct iwl_scan_config_cdb);
|
||||
else
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
cmd_size = sizeof(struct iwl_scan_config);
|
||||
else
|
||||
cmd_size = sizeof(struct iwl_scan_config_v1);
|
||||
cmd_size += mvm->fw->ucode_capa.n_scan_channels;
|
||||
|
||||
cfg = kzalloc(cmd_size, GFP_KERNEL);
|
||||
@@ -1068,13 +1069,13 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
|
||||
IWL_CHANNEL_FLAG_EBS_ADD |
|
||||
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
|
||||
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
|
||||
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
|
||||
iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
|
||||
} else {
|
||||
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
|
||||
} else {
|
||||
iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
|
||||
}
|
||||
|
||||
cmd.data[0] = cfg;
|
||||
@@ -1119,16 +1120,20 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
|
||||
}
|
||||
cmd->fragmented_dwell = timing->dwell_fragmented;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time);
|
||||
cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time);
|
||||
cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time);
|
||||
cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time);
|
||||
cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
|
||||
cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
|
||||
cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
cmd->v6.max_out_time[1] =
|
||||
cpu_to_le32(timing->max_out_time);
|
||||
cmd->v6.suspend_time[1] =
|
||||
cpu_to_le32(timing->suspend_time);
|
||||
}
|
||||
} else {
|
||||
cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time);
|
||||
cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time);
|
||||
cmd->no_cdb.scan_priority =
|
||||
cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time);
|
||||
cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time);
|
||||
cmd->v1.scan_priority =
|
||||
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
|
||||
}
|
||||
|
||||
@@ -1207,8 +1212,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
int type)
|
||||
{
|
||||
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
|
||||
void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ?
|
||||
(void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data;
|
||||
void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
|
||||
(void *)&cmd->v6.data : (void *)&cmd->v1.data;
|
||||
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
|
||||
sizeof(struct iwl_scan_channel_cfg_umac) *
|
||||
mvm->fw->ucode_capa.n_scan_channels;
|
||||
@@ -1245,12 +1250,12 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
cmd->cdb.channel_flags = channel_flags;
|
||||
cmd->cdb.n_channels = params->n_channels;
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
cmd->v6.channel_flags = channel_flags;
|
||||
cmd->v6.n_channels = params->n_channels;
|
||||
} else {
|
||||
cmd->no_cdb.channel_flags = channel_flags;
|
||||
cmd->no_cdb.n_channels = params->n_channels;
|
||||
cmd->v1.channel_flags = channel_flags;
|
||||
cmd->v1.n_channels = params->n_channels;
|
||||
}
|
||||
|
||||
iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
|
||||
@@ -1692,10 +1697,10 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
|
||||
|
||||
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
|
||||
{
|
||||
int base_size = IWL_SCAN_REQ_UMAC_SIZE;
|
||||
int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm))
|
||||
base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB;
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
base_size = IWL_SCAN_REQ_UMAC_SIZE;
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
|
||||
return base_size +
|
||||
|
@@ -235,7 +235,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
|
||||
iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
|
||||
break;
|
||||
case SF_FULL_ON:
|
||||
if (sta_id == IWL_MVM_STATION_COUNT) {
|
||||
if (sta_id == IWL_MVM_INVALID_STA) {
|
||||
IWL_ERR(mvm,
|
||||
"No station: Cannot switch SF to FULL_ON\n");
|
||||
return -EINVAL;
|
||||
@@ -276,12 +276,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
|
||||
bool remove_vif)
|
||||
{
|
||||
enum iwl_sf_state new_state;
|
||||
u8 sta_id = IWL_MVM_STATION_COUNT;
|
||||
u8 sta_id = IWL_MVM_INVALID_STA;
|
||||
struct iwl_mvm_vif *mvmvif = NULL;
|
||||
struct iwl_mvm_active_iface_iterator_data data = {
|
||||
.ignore_vif = changed_vif,
|
||||
.sta_vif_state = SF_UNINIT,
|
||||
.sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
|
||||
.sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -98,7 +98,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
|
||||
reserved_ids = BIT(0);
|
||||
|
||||
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
|
||||
for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
|
||||
for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
|
||||
if (BIT(sta_id) & reserved_ids)
|
||||
continue;
|
||||
|
||||
@@ -106,7 +106,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
|
||||
lockdep_is_held(&mvm->mutex)))
|
||||
return sta_id;
|
||||
}
|
||||
return IWL_MVM_STATION_COUNT;
|
||||
return IWL_MVM_INVALID_STA;
|
||||
}
|
||||
|
||||
/* send station add/update command to firmware */
|
||||
@@ -127,11 +127,17 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
u32 agg_size = 0, mpdu_dens = 0;
|
||||
|
||||
if (!update || (flags & STA_MODIFY_QUEUES)) {
|
||||
add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
|
||||
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
|
||||
|
||||
if (flags & STA_MODIFY_QUEUES)
|
||||
add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
|
||||
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
||||
add_sta_cmd.tfd_queue_msk =
|
||||
cpu_to_le32(mvm_sta->tfd_queue_msk);
|
||||
|
||||
if (flags & STA_MODIFY_QUEUES)
|
||||
add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
|
||||
} else {
|
||||
WARN_ON(flags & STA_MODIFY_QUEUES);
|
||||
}
|
||||
}
|
||||
|
||||
switch (sta->bandwidth) {
|
||||
@@ -209,13 +215,15 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
|
||||
|
||||
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
|
||||
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BK);
|
||||
add_sta_cmd.uapsd_acs |= BIT(AC_BK);
|
||||
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
|
||||
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_BE);
|
||||
add_sta_cmd.uapsd_acs |= BIT(AC_BE);
|
||||
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
|
||||
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VI);
|
||||
add_sta_cmd.uapsd_acs |= BIT(AC_VI);
|
||||
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
|
||||
add_sta_cmd.uapsd_trigger_acs |= BIT(AC_VO);
|
||||
add_sta_cmd.uapsd_acs |= BIT(AC_VO);
|
||||
add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
|
||||
add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
|
||||
}
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
@@ -337,6 +345,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
|
||||
u8 sta_id;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
@@ -387,6 +398,9 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
@@ -426,6 +440,9 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
@@ -468,6 +485,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
@@ -512,6 +532,8 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&mvm->queue_info_lock);
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
|
||||
|
||||
@@ -596,6 +618,9 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
unsigned long mq;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* If the AC is lower than current one - FIFO needs to be redirected to
|
||||
* the lowest one of the streams in the queue. Check if this is needed
|
||||
@@ -677,6 +702,41 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta, u8 ac,
|
||||
int tid)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
|
||||
u8 mac_queue = mvmsta->vif->hw_queue[ac];
|
||||
int queue = -1;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Allocating queue for sta %d on tid %d\n",
|
||||
mvmsta->sta_id, tid);
|
||||
queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
|
||||
wdg_timeout);
|
||||
if (queue < 0)
|
||||
return queue;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
|
||||
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
mvmsta->tid_data[tid].txq_id = queue;
|
||||
mvmsta->tid_data[tid].is_tid_active = true;
|
||||
mvmsta->tfd_queue_msk |= BIT(queue);
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta, u8 ac, int tid,
|
||||
struct ieee80211_hdr *hdr)
|
||||
@@ -702,6 +762,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
|
||||
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
tfd_queue_mask = mvmsta->tfd_queue_msk;
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
@@ -880,6 +943,9 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
@@ -917,6 +983,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
|
||||
int ssn;
|
||||
int ret = true;
|
||||
|
||||
/* queue sharing is disabled on new TX path */
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
@@ -1199,18 +1269,30 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
||||
ac = tid_to_mac80211_ac[i];
|
||||
mac_queue = mvm_sta->vif->hw_queue[ac];
|
||||
|
||||
cfg.tid = i;
|
||||
cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
|
||||
cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
|
||||
txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Re-mapping sta %d tid %d\n",
|
||||
mvm_sta->sta_id, i);
|
||||
txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
|
||||
mvm_sta->sta_id,
|
||||
i, wdg_timeout);
|
||||
tid_data->txq_id = txq_id;
|
||||
} else {
|
||||
u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Re-mapping sta %d tid %d to queue %d\n",
|
||||
mvm_sta->sta_id, i, txq_id);
|
||||
cfg.tid = i;
|
||||
cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
|
||||
cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
|
||||
txq_id ==
|
||||
IWL_MVM_DQA_BSS_CLIENT_QUEUE);
|
||||
|
||||
iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
|
||||
IEEE80211_SEQ_TO_SN(tid_data->seq_number),
|
||||
&cfg, wdg_timeout);
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Re-mapping sta %d tid %d to queue %d\n",
|
||||
mvm_sta->sta_id, i, txq_id);
|
||||
|
||||
iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
|
||||
wdg_timeout);
|
||||
}
|
||||
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
||||
}
|
||||
@@ -1235,7 +1317,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
||||
else
|
||||
sta_id = mvm_sta->sta_id;
|
||||
|
||||
if (sta_id == IWL_MVM_STATION_COUNT)
|
||||
if (sta_id == IWL_MVM_INVALID_STA)
|
||||
return -ENOSPC;
|
||||
|
||||
spin_lock_init(&mvm_sta->lock);
|
||||
@@ -1317,10 +1399,10 @@ update_fw:
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION) {
|
||||
if (!sta->tdls) {
|
||||
WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT);
|
||||
WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
|
||||
mvmvif->ap_sta_id = sta_id;
|
||||
} else {
|
||||
WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT);
|
||||
WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1571,11 +1653,11 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
return ret;
|
||||
|
||||
/* unassoc - go ahead - remove the AP STA now */
|
||||
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
|
||||
|
||||
/* clear d0i3_ap_sta_id if no longer relevant */
|
||||
if (mvm->d0i3_ap_sta_id == sta_id)
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1584,7 +1666,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
* before the STA is removed.
|
||||
*/
|
||||
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
|
||||
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
|
||||
cancel_delayed_work(&mvm->tdls_cs.dwork);
|
||||
}
|
||||
|
||||
@@ -1641,7 +1723,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
|
||||
{
|
||||
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
|
||||
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
@@ -1652,12 +1734,11 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_int_sta *sta)
|
||||
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
|
||||
{
|
||||
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
|
||||
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
|
||||
sta->sta_id = IWL_MVM_STATION_COUNT;
|
||||
sta->sta_id = IWL_MVM_INVALID_STA;
|
||||
}
|
||||
|
||||
static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
|
||||
@@ -1676,7 +1757,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
|
||||
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
|
||||
color));
|
||||
|
||||
cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
|
||||
cmd.tid_disable_tx = cpu_to_le16(0xffff);
|
||||
|
||||
if (addr)
|
||||
@@ -1701,27 +1783,19 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
|
||||
static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
|
||||
{
|
||||
unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
|
||||
mvm->cfg->base_params->wd_timeout :
|
||||
IWL_WATCHDOG_DISABLED;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/* Map Aux queue to fifo - needs to happen before adding Aux station */
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
|
||||
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
|
||||
|
||||
/* Allocate aux station and assign to it the aux queue */
|
||||
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
|
||||
NL80211_IFTYPE_UNSPECIFIED);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
|
||||
mvm->aux_sta.sta_id,
|
||||
IWL_MAX_TID_COUNT,
|
||||
wdg_timeout);
|
||||
mvm->aux_queue = queue;
|
||||
} else if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_MCAST,
|
||||
.sta_id = mvm->aux_sta.sta_id,
|
||||
@@ -1732,14 +1806,43 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
|
||||
|
||||
iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
|
||||
wdg_timeout);
|
||||
} else {
|
||||
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
|
||||
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
|
||||
}
|
||||
}
|
||||
|
||||
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/* Allocate aux station and assign to it the aux queue */
|
||||
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
|
||||
NL80211_IFTYPE_UNSPECIFIED);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Map Aux queue to fifo - needs to happen before adding Aux station */
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_mvm_enable_aux_queue(mvm);
|
||||
|
||||
ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
|
||||
MAC_INDEX_AUX, 0);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* For a000 firmware and on we cannot add queue to a station unknown
|
||||
* to firmware so enable queue here - after the station was added
|
||||
*/
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_mvm_enable_aux_queue(mvm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
@@ -1790,39 +1893,39 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
|
||||
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
|
||||
const u8 *baddr = _baddr;
|
||||
int queue = 0;
|
||||
int ret;
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_VO,
|
||||
.sta_id = mvmvif->bcast_sta.sta_id,
|
||||
.tid = IWL_MAX_TID_COUNT,
|
||||
.aggregate = false,
|
||||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_VO,
|
||||
.sta_id = mvmvif->bcast_sta.sta_id,
|
||||
.tid = IWL_MAX_TID_COUNT,
|
||||
.aggregate = false,
|
||||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
int queue;
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
|
||||
if (vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC)
|
||||
queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
||||
queue = mvm->probe_queue;
|
||||
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
||||
queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
||||
queue = mvm->p2p_dev_queue;
|
||||
else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
|
||||
return -EINVAL;
|
||||
|
||||
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
|
||||
wdg_timeout);
|
||||
bsta->tfd_queue_msk |= BIT(queue);
|
||||
|
||||
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
|
||||
&cfg, wdg_timeout);
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_ADHOC)
|
||||
baddr = vif->bss_conf.bssid;
|
||||
|
||||
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
|
||||
return -ENOSPC;
|
||||
|
||||
ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
|
||||
@@ -1831,27 +1934,20 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* In AP vif type, we also need to enable the cab_queue. However, we
|
||||
* have to enable it after the ADD_STA command is sent, otherwise the
|
||||
* FW will throw an assert once we send the ADD_STA command (it'll
|
||||
* detect a mismatch in the tfd_queue_msk, as we can't add the
|
||||
* enabled-cab_queue to the mask)
|
||||
* For a000 firmware and on we cannot add queue to a station unknown
|
||||
* to firmware so enable queue here - after the station was added
|
||||
*/
|
||||
if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
(vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC)) {
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_MCAST,
|
||||
.sta_id = mvmvif->bcast_sta.sta_id,
|
||||
.tid = IWL_MAX_TID_COUNT,
|
||||
.aggregate = false,
|
||||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
|
||||
bsta->sta_id,
|
||||
IWL_MAX_TID_COUNT,
|
||||
wdg_timeout);
|
||||
if (vif->type == NL80211_IFTYPE_AP)
|
||||
mvm->probe_queue = queue;
|
||||
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
||||
mvm->p2p_dev_queue = queue;
|
||||
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
0, &cfg, wdg_timeout);
|
||||
bsta->tfd_queue_msk |= BIT(queue);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1869,24 +1965,18 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
||||
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
|
||||
if (mvmvif->bcast_sta.tfd_queue_msk &
|
||||
BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) {
|
||||
iwl_mvm_disable_txq(mvm,
|
||||
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
|
||||
if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->probe_queue)) {
|
||||
iwl_mvm_disable_txq(mvm, mvm->probe_queue,
|
||||
vif->hw_queue[0], IWL_MAX_TID_COUNT,
|
||||
0);
|
||||
mvmvif->bcast_sta.tfd_queue_msk &=
|
||||
~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
|
||||
mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->probe_queue);
|
||||
}
|
||||
|
||||
if (mvmvif->bcast_sta.tfd_queue_msk &
|
||||
BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) {
|
||||
iwl_mvm_disable_txq(mvm,
|
||||
IWL_MVM_DQA_P2P_DEVICE_QUEUE,
|
||||
if (mvmvif->bcast_sta.tfd_queue_msk & BIT(mvm->p2p_dev_queue)) {
|
||||
iwl_mvm_disable_txq(mvm, mvm->p2p_dev_queue,
|
||||
vif->hw_queue[0], IWL_MAX_TID_COUNT,
|
||||
0);
|
||||
mvmvif->bcast_sta.tfd_queue_msk &=
|
||||
~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
|
||||
mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(mvm->p2p_dev_queue);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1982,6 +2072,88 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new station entry for the multicast station to the given vif,
|
||||
* and send it to the FW.
|
||||
* Note that each AP/GO mac should have its own multicast station.
|
||||
*
|
||||
* @mvm: the mvm component
|
||||
* @vif: the interface to which the multicast station is added
|
||||
*/
|
||||
int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
|
||||
static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
|
||||
const u8 *maddr = _maddr;
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.fifo = IWL_MVM_TX_FIFO_MCAST,
|
||||
.sta_id = msta->sta_id,
|
||||
.tid = IWL_MAX_TID_COUNT,
|
||||
.aggregate = false,
|
||||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
|
||||
mvmvif->id, mvmvif->color);
|
||||
if (ret) {
|
||||
iwl_mvm_dealloc_int_sta(mvm, msta);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable cab queue after the ADD_STA command is sent.
|
||||
* This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
|
||||
* command with unknown station id.
|
||||
*/
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
|
||||
msta->sta_id,
|
||||
IWL_MAX_TID_COUNT,
|
||||
timeout);
|
||||
vif->cab_queue = queue;
|
||||
} else {
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
||||
&cfg, timeout);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send the FW a request to remove the station from it's internal data
|
||||
* structures, and in addition remove it from the local data structure.
|
||||
*/
|
||||
int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
return 0;
|
||||
|
||||
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
|
||||
if (ret)
|
||||
IWL_WARN(mvm, "Failed sending remove station\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define IWL_MAX_RX_BA_SESSIONS 16
|
||||
|
||||
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
|
||||
@@ -2059,6 +2231,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
|
||||
reorder_buf->mvm = mvm;
|
||||
reorder_buf->queue = i;
|
||||
reorder_buf->sta_id = sta_id;
|
||||
reorder_buf->valid = false;
|
||||
for (j = 0; j < reorder_buf->buf_size; j++)
|
||||
__skb_queue_head_init(&reorder_buf->entries[j]);
|
||||
}
|
||||
@@ -2226,7 +2399,9 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
|
||||
cmd.sta_id = mvm_sta->sta_id;
|
||||
cmd.add_modify = STA_MODE_MODIFY;
|
||||
cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX;
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
cmd.modify_mask = STA_MODIFY_QUEUES;
|
||||
cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
|
||||
cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
|
||||
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
|
||||
|
||||
@@ -2426,6 +2601,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
* changed from current (become smaller)
|
||||
*/
|
||||
if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
|
||||
/*
|
||||
* On new TX API rs and BA manager are offloaded.
|
||||
* For now though, just don't support being reconfigured
|
||||
*/
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/*
|
||||
* If reconfiguring an existing queue, it first must be
|
||||
* drained
|
||||
@@ -2675,7 +2857,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
|
||||
* station ID, then use AP's station ID.
|
||||
*/
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
|
||||
u8 sta_id = mvmvif->ap_sta_id;
|
||||
|
||||
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
|
||||
@@ -2697,68 +2879,97 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
|
||||
|
||||
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta,
|
||||
struct ieee80211_key_conf *keyconf, bool mcast,
|
||||
struct ieee80211_key_conf *key, bool mcast,
|
||||
u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
|
||||
u8 key_offset)
|
||||
{
|
||||
struct iwl_mvm_add_sta_key_cmd cmd = {};
|
||||
union {
|
||||
struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
|
||||
struct iwl_mvm_add_sta_key_cmd cmd;
|
||||
} u = {};
|
||||
__le16 key_flags;
|
||||
int ret;
|
||||
u32 status;
|
||||
u16 keyidx;
|
||||
int i;
|
||||
u8 sta_id = mvm_sta->sta_id;
|
||||
u64 pn = 0;
|
||||
int i, size;
|
||||
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
|
||||
|
||||
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
|
||||
keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
|
||||
STA_KEY_FLG_KEYID_MSK;
|
||||
key_flags = cpu_to_le16(keyidx);
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
|
||||
|
||||
switch (keyconf->cipher) {
|
||||
switch (key->cipher) {
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
|
||||
cmd.tkip_rx_tsc_byte2 = tkip_iv32;
|
||||
for (i = 0; i < 5; i++)
|
||||
cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
|
||||
memcpy(cmd.key, keyconf->key, keyconf->keylen);
|
||||
if (new_api) {
|
||||
memcpy((void *)&u.cmd.tx_mic_key,
|
||||
&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
|
||||
IWL_MIC_KEY_SIZE);
|
||||
|
||||
memcpy((void *)&u.cmd.rx_mic_key,
|
||||
&key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
|
||||
IWL_MIC_KEY_SIZE);
|
||||
pn = atomic64_read(&key->tx_pn);
|
||||
|
||||
} else {
|
||||
u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
|
||||
for (i = 0; i < 5; i++)
|
||||
u.cmd_v1.tkip_rx_ttak[i] =
|
||||
cpu_to_le16(tkip_p1k[i]);
|
||||
}
|
||||
memcpy(u.cmd.common.key, key->key, key->keylen);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
|
||||
memcpy(cmd.key, keyconf->key, keyconf->keylen);
|
||||
memcpy(u.cmd.common.key, key->key, key->keylen);
|
||||
if (new_api)
|
||||
pn = atomic64_read(&key->tx_pn);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_WEP104:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
|
||||
/* fall through */
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
|
||||
memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
|
||||
memcpy(u.cmd.common.key + 3, key->key, key->keylen);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_GCMP_256:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
|
||||
/* fall through */
|
||||
case WLAN_CIPHER_SUITE_GCMP:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
|
||||
memcpy(cmd.key, keyconf->key, keyconf->keylen);
|
||||
memcpy(u.cmd.common.key, key->key, key->keylen);
|
||||
if (new_api)
|
||||
pn = atomic64_read(&key->tx_pn);
|
||||
break;
|
||||
default:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
|
||||
memcpy(cmd.key, keyconf->key, keyconf->keylen);
|
||||
memcpy(u.cmd.common.key, key->key, key->keylen);
|
||||
}
|
||||
|
||||
if (mcast)
|
||||
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
|
||||
|
||||
cmd.key_offset = key_offset;
|
||||
cmd.key_flags = key_flags;
|
||||
cmd.sta_id = sta_id;
|
||||
u.cmd.common.key_offset = key_offset;
|
||||
u.cmd.common.key_flags = key_flags;
|
||||
u.cmd.common.sta_id = mvm_sta->sta_id;
|
||||
|
||||
if (new_api) {
|
||||
u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
|
||||
size = sizeof(u.cmd);
|
||||
} else {
|
||||
size = sizeof(u.cmd_v1);
|
||||
}
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
if (cmd_flags & CMD_ASYNC)
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
|
||||
&u.cmd);
|
||||
else
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
|
||||
&cmd, &status);
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
|
||||
&u.cmd, &status);
|
||||
|
||||
switch (status) {
|
||||
case ADD_STA_SUCCESS:
|
||||
@@ -2858,7 +3069,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
|
||||
return sta->addr;
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
|
||||
u8 sta_id = mvmvif->ap_sta_id;
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
@@ -2911,9 +3122,14 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
bool mcast)
|
||||
{
|
||||
struct iwl_mvm_add_sta_key_cmd cmd = {};
|
||||
union {
|
||||
struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
|
||||
struct iwl_mvm_add_sta_key_cmd cmd;
|
||||
} u = {};
|
||||
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
|
||||
__le16 key_flags;
|
||||
int ret;
|
||||
int ret, size;
|
||||
u32 status;
|
||||
|
||||
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
|
||||
@@ -2924,13 +3140,19 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
|
||||
if (mcast)
|
||||
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
|
||||
|
||||
cmd.key_flags = key_flags;
|
||||
cmd.key_offset = keyconf->hw_key_idx;
|
||||
cmd.sta_id = sta_id;
|
||||
/*
|
||||
* The fields assigned here are in the same location at the start
|
||||
* of the command, so we can do this union trick.
|
||||
*/
|
||||
u.cmd.common.key_flags = key_flags;
|
||||
u.cmd.common.key_offset = keyconf->hw_key_idx;
|
||||
u.cmd.common.sta_id = sta_id;
|
||||
|
||||
size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
|
||||
|
||||
status = ADD_STA_SUCCESS;
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
|
||||
&cmd, &status);
|
||||
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
|
||||
&status);
|
||||
|
||||
switch (status) {
|
||||
case ADD_STA_SUCCESS:
|
||||
@@ -3044,7 +3266,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
|
||||
{
|
||||
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
|
||||
struct iwl_mvm_sta *mvm_sta;
|
||||
u8 sta_id = IWL_MVM_STATION_COUNT;
|
||||
u8 sta_id = IWL_MVM_INVALID_STA;
|
||||
int ret, i;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
@@ -3301,7 +3523,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/* Block/unblock all the stations of the given mvmvif */
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
|
@@ -532,10 +532,13 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_int_sta *sta,
|
||||
u32 qmask, enum nl80211_iftype iftype);
|
||||
void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
|
||||
int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
|
||||
|
@@ -6,6 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -31,6 +32,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -78,7 +80,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (!sta || IS_ERR(sta) || !sta->tdls)
|
||||
@@ -101,7 +103,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (!sta || IS_ERR(sta) || !sta->tdls)
|
||||
@@ -145,7 +147,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
/* populate TDLS peer data */
|
||||
cnt = 0;
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (IS_ERR_OR_NULL(sta) || !sta->tdls)
|
||||
@@ -251,7 +253,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
|
||||
iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
|
||||
|
||||
if (state == IWL_MVM_TDLS_SW_IDLE)
|
||||
mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
@@ -305,7 +307,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
|
||||
|
||||
/* get the existing peer if it's there */
|
||||
if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
|
||||
mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
|
||||
struct ieee80211_sta *sta = rcu_dereference_protected(
|
||||
mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
@@ -523,7 +525,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
|
||||
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
|
||||
|
||||
/* station might be gone, in that case do nothing */
|
||||
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
|
||||
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
|
||||
goto out;
|
||||
|
||||
sta = rcu_dereference_protected(
|
||||
@@ -573,7 +575,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
|
||||
sta->addr, chandef->chan->center_freq, chandef->width);
|
||||
|
||||
/* we only support a single peer for channel switching */
|
||||
if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
|
||||
if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
|
||||
IWL_DEBUG_TDLS(mvm,
|
||||
"Existing peer. Can't start switch with %pM\n",
|
||||
sta->addr);
|
||||
@@ -633,7 +635,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
|
||||
IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
|
||||
|
||||
/* we only support a single peer for channel switching */
|
||||
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
|
||||
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
|
||||
IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
|
||||
goto out;
|
||||
}
|
||||
@@ -654,7 +656,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
|
||||
mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
|
||||
wait_for_phy = true;
|
||||
|
||||
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
|
||||
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
|
||||
dev_kfree_skb(mvm->tdls_cs.peer.skb);
|
||||
mvm->tdls_cs.peer.skb = NULL;
|
||||
|
||||
@@ -697,7 +699,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
|
||||
if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
|
||||
params->status != 0 &&
|
||||
mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
|
||||
mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
|
||||
struct ieee80211_sta *cur_sta;
|
||||
|
||||
/* make sure it's the same peer */
|
||||
|
@@ -80,7 +80,7 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm)
|
||||
if (IWL_MVM_TOF_IS_RESPONDER) {
|
||||
tof_data->responder_cfg.sub_grp_cmd_id =
|
||||
cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
|
||||
tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
|
||||
tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -356,7 +356,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
int i, err;
|
||||
|
||||
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
|
||||
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
|
||||
if (!mvmsta)
|
||||
continue;
|
||||
|
@@ -475,6 +475,39 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
|
||||
memset(dev_cmd, 0, sizeof(*dev_cmd));
|
||||
dev_cmd->hdr.cmd = TX_CMD;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
|
||||
u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info);
|
||||
|
||||
/* padding is inserted later in transport */
|
||||
/* FIXME - check for AMSDU may need to be removed */
|
||||
if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
|
||||
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
|
||||
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
|
||||
|
||||
cmd->offload_assist |= cpu_to_le16(offload_assist);
|
||||
|
||||
/* Total # bytes to be transmitted */
|
||||
cmd->len = cpu_to_le16((u16)skb->len);
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(cmd->hdr, hdr, hdrlen);
|
||||
|
||||
if (!info->control.hw_key)
|
||||
cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
|
||||
|
||||
/* For data packets rate info comes from the fw */
|
||||
if (ieee80211_is_data(hdr->frame_control) && sta)
|
||||
goto out;
|
||||
|
||||
cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
|
||||
cmd->rate_n_flags =
|
||||
cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
|
||||
|
||||
if (info->control.hw_key)
|
||||
@@ -484,6 +517,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
|
||||
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(tx_cmd->hdr, hdr, hdrlen);
|
||||
|
||||
out:
|
||||
return dev_cmd;
|
||||
}
|
||||
|
||||
@@ -514,21 +551,21 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
||||
*/
|
||||
if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
|
||||
ieee80211_is_deauth(fc))
|
||||
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
||||
return mvm->probe_queue;
|
||||
if (info->hw_queue == info->control.vif->cab_queue)
|
||||
return info->hw_queue;
|
||||
|
||||
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
|
||||
"fc=0x%02x", le16_to_cpu(fc));
|
||||
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
||||
return mvm->probe_queue;
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
if (ieee80211_is_mgmt(fc))
|
||||
return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
||||
return mvm->p2p_dev_queue;
|
||||
if (info->hw_queue == info->control.vif->cab_queue)
|
||||
return info->hw_queue;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
||||
return mvm->p2p_dev_queue;
|
||||
default:
|
||||
WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
|
||||
return -1;
|
||||
@@ -541,7 +578,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info info;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
u8 sta_id;
|
||||
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
int queue;
|
||||
@@ -598,7 +634,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
||||
is_multicast_ether_addr(hdr->addr1)) {
|
||||
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
|
||||
|
||||
if (ap_sta_id != IWL_MVM_STATION_COUNT)
|
||||
if (ap_sta_id != IWL_MVM_INVALID_STA)
|
||||
sta_id = ap_sta_id;
|
||||
} else if (iwl_mvm_is_dqa_supported(mvm) &&
|
||||
info.control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
@@ -616,11 +652,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
||||
/* From now on, we cannot access info->control */
|
||||
iwl_mvm_skb_prepare_status(skb, dev_cmd);
|
||||
|
||||
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(tx_cmd->hdr, hdr, hdrlen);
|
||||
|
||||
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
|
||||
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
||||
return -1;
|
||||
@@ -713,7 +744,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
* fifo to be able to send bursts.
|
||||
*/
|
||||
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
|
||||
mvm->shared_mem_cfg.txfifo_size[txf] - 256);
|
||||
mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
|
||||
|
||||
if (unlikely(dbg_max_amsdu_len))
|
||||
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
|
||||
@@ -862,6 +893,9 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
|
||||
unsigned long now = jiffies;
|
||||
int tid;
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return false;
|
||||
|
||||
for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
|
||||
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
|
||||
@@ -881,7 +915,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
__le16 fc;
|
||||
u16 seq_number = 0;
|
||||
u8 tid = IWL_MAX_TID_COUNT;
|
||||
@@ -896,7 +929,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
if (WARN_ON_ONCE(!mvmsta))
|
||||
return -1;
|
||||
|
||||
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
|
||||
return -1;
|
||||
|
||||
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
|
||||
@@ -904,8 +937,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
if (!dev_cmd)
|
||||
goto drop;
|
||||
|
||||
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
|
||||
|
||||
/*
|
||||
* we handle that entirely ourselves -- for uAPSD the firmware
|
||||
* will always send a notification, and for PS-Poll responses
|
||||
@@ -926,18 +957,27 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
|
||||
goto drop_unlock_sta;
|
||||
|
||||
seq_number = mvmsta->tid_data[tid].seq_number;
|
||||
seq_number &= IEEE80211_SCTL_SEQ;
|
||||
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
|
||||
hdr->seq_ctrl |= cpu_to_le16(seq_number);
|
||||
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
|
||||
if (WARN_ON_ONCE(is_ampdu &&
|
||||
mvmsta->tid_data[tid].state != IWL_AGG_ON))
|
||||
goto drop_unlock_sta;
|
||||
|
||||
seq_number = mvmsta->tid_data[tid].seq_number;
|
||||
seq_number &= IEEE80211_SCTL_SEQ;
|
||||
|
||||
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
|
||||
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
|
||||
hdr->seq_ctrl |= cpu_to_le16(seq_number);
|
||||
/* update the tx_cmd hdr as it was already copied */
|
||||
tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
|
||||
}
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
|
||||
txq_id = mvmsta->tid_data[tid].txq_id;
|
||||
|
||||
if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
|
||||
/* default to TID 0 for non-QoS packets */
|
||||
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
|
||||
@@ -945,9 +985,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
|
||||
}
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(tx_cmd->hdr, hdr, hdrlen);
|
||||
|
||||
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
|
||||
|
||||
/* Check if TXQ needs to be allocated or re-activated */
|
||||
@@ -1036,7 +1073,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
if (WARN_ON_ONCE(!mvmsta))
|
||||
return -1;
|
||||
|
||||
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
|
||||
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
|
||||
return -1;
|
||||
|
||||
memcpy(&info, skb->cb, sizeof(info));
|
||||
@@ -1245,6 +1282,26 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_mvm_get_scd_ssn - returns the SSN of the SCD
|
||||
* @tx_resp: the Tx response from the fw (agg or non-agg)
|
||||
*
|
||||
* When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
|
||||
* it can't know that everything will go well until the end of the AMPDU, it
|
||||
* can't know in advance the number of MPDUs that will be sent in the current
|
||||
* batch. This is why it writes the agg Tx response while it fetches the MPDUs.
|
||||
* Hence, it can't know in advance what the SSN of the SCD will be at the end
|
||||
* of the batch. This is why the SSN of the SCD is written at the end of the
|
||||
* whole struct at a variable offset. This function knows how to cope with the
|
||||
* variable offset and returns the SSN of the SCD.
|
||||
*/
|
||||
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_tx_resp *tx_resp)
|
||||
{
|
||||
return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
|
||||
tx_resp->frame_count) & 0xfff;
|
||||
}
|
||||
|
||||
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
@@ -1254,8 +1311,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
|
||||
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
|
||||
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
|
||||
u32 status = le16_to_cpu(tx_resp->status.status);
|
||||
u16 ssn = iwl_mvm_get_scd_ssn(tx_resp);
|
||||
struct agg_tx_status *agg_status =
|
||||
iwl_mvm_get_agg_status(mvm, tx_resp);
|
||||
u32 status = le16_to_cpu(agg_status->status);
|
||||
u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
struct sk_buff_head skbs;
|
||||
u8 skb_freed = 0;
|
||||
@@ -1264,6 +1323,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
|
||||
__skb_queue_head_init(&skbs);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
txq_id = le16_to_cpu(tx_resp->v6.tx_queue);
|
||||
|
||||
seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
|
||||
|
||||
/* we can free until ssn % q.n_bd not inclusive */
|
||||
@@ -1388,7 +1450,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
if (!IS_ERR(sta)) {
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
if (tid != IWL_TID_NON_QOS) {
|
||||
if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
|
||||
struct iwl_mvm_tid_data *tid_data =
|
||||
&mvmsta->tid_data[tid];
|
||||
bool send_eosp_ndp = false;
|
||||
@@ -1520,7 +1582,8 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
|
||||
struct agg_tx_status *frame_status = &tx_resp->status;
|
||||
struct agg_tx_status *frame_status =
|
||||
iwl_mvm_get_agg_status(mvm, tx_resp);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < tx_resp->frame_count; i++) {
|
||||
@@ -1722,6 +1785,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
ba_info.status.status_driver_data[0] =
|
||||
(void *)(uintptr_t)ba_res->reduced_txp;
|
||||
|
||||
if (!le16_to_cpu(ba_res->tfd_cnt))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* When supporting multi TID aggregations - we need to move
|
||||
@@ -1730,12 +1796,16 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
* This will go together with SN and AddBA offload and cannot
|
||||
* be handled properly for now.
|
||||
*/
|
||||
WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
|
||||
iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
|
||||
(int)ba_res->tfd[0].q_num,
|
||||
WARN_ON(le16_to_cpu(ba_res->ra_tid_cnt) != 1);
|
||||
tid = ba_res->ra_tid[0].tid;
|
||||
if (tid == IWL_MGMT_TID)
|
||||
tid = IWL_MAX_TID_COUNT;
|
||||
iwl_mvm_tx_reclaim(mvm, sta_id, tid,
|
||||
(int)(le16_to_cpu(ba_res->tfd[0].q_num)),
|
||||
le16_to_cpu(ba_res->tfd[0].tfd_index),
|
||||
&ba_info, le32_to_cpu(ba_res->tx_rate));
|
||||
|
||||
out:
|
||||
IWL_DEBUG_TX_REPLY(mvm,
|
||||
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
|
||||
sta_id, le32_to_cpu(ba_res->flags),
|
||||
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015 Intel Deutschland GmbH
|
||||
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,6 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -597,6 +598,9 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
|
||||
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
|
||||
return i;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return -ENOSPC;
|
||||
|
||||
/*
|
||||
* If no free queue found - settle for an inactive one to reconfigure
|
||||
* Make sure that the inactive queue either already belongs to this STA,
|
||||
@@ -627,6 +631,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
|
||||
"Trying to reconfig unallocated queue %d\n", queue)) {
|
||||
@@ -644,20 +651,19 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
|
||||
unsigned int wdg_timeout)
|
||||
static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
||||
int mac80211_queue, u8 sta_id, u8 tid)
|
||||
{
|
||||
bool enable_queue = true;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Make sure this TID isn't already enabled */
|
||||
if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
|
||||
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
|
||||
queue, cfg->tid);
|
||||
return;
|
||||
queue, tid);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Update mappings and refcounts */
|
||||
@@ -666,17 +672,17 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
|
||||
mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
|
||||
mvm->queue_info[queue].hw_queue_refcount++;
|
||||
mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
|
||||
mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
|
||||
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
|
||||
mvm->queue_info[queue].ra_sta_id = sta_id;
|
||||
|
||||
if (enable_queue) {
|
||||
if (cfg->tid != IWL_MAX_TID_COUNT)
|
||||
if (tid != IWL_MAX_TID_COUNT)
|
||||
mvm->queue_info[queue].mac80211_ac =
|
||||
tid_to_mac80211_ac[cfg->tid];
|
||||
tid_to_mac80211_ac[tid];
|
||||
else
|
||||
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
|
||||
|
||||
mvm->queue_info[queue].txq_tid = cfg->tid;
|
||||
mvm->queue_info[queue].txq_tid = tid;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
@@ -686,8 +692,49 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
return enable_queue;
|
||||
}
|
||||
|
||||
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
|
||||
u8 sta_id, u8 tid, unsigned int timeout)
|
||||
{
|
||||
struct iwl_tx_queue_cfg_cmd cmd = {
|
||||
.flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
|
||||
.sta_id = sta_id,
|
||||
.tid = tid,
|
||||
};
|
||||
int queue;
|
||||
|
||||
if (cmd.tid == IWL_MAX_TID_COUNT)
|
||||
cmd.tid = IWL_MGMT_TID;
|
||||
queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
|
||||
SCD_QUEUE_CFG, timeout);
|
||||
|
||||
if (queue < 0) {
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Failed allocating TXQ for sta %d tid %d, ret: %d\n",
|
||||
sta_id, tid, queue);
|
||||
return queue;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
|
||||
queue, sta_id, tid);
|
||||
|
||||
iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, sta_id, tid);
|
||||
|
||||
return queue;
|
||||
}
|
||||
|
||||
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
|
||||
unsigned int wdg_timeout)
|
||||
{
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return;
|
||||
|
||||
/* Send the enabling command if we need to */
|
||||
if (enable_queue) {
|
||||
if (iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
|
||||
cfg->sta_id, cfg->tid)) {
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.action = SCD_CFG_ENABLE_QUEUE,
|
||||
@@ -701,7 +748,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
|
||||
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
|
||||
wdg_timeout);
|
||||
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
|
||||
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
|
||||
sizeof(struct iwl_scd_txq_cfg_cmd),
|
||||
&cmd),
|
||||
"Failed to configure queue %d on FIFO %d\n", queue,
|
||||
cfg->fifo);
|
||||
@@ -716,7 +764,6 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
.action = SCD_CFG_DISABLE_QUEUE,
|
||||
};
|
||||
bool remove_mac_queue = true;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
@@ -787,14 +834,23 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
|
||||
sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
|
||||
queue, ret);
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
iwl_trans_txq_free(mvm->trans, queue);
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
return ret;
|
||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
|
||||
sizeof(struct iwl_scd_txq_cfg_cmd),
|
||||
&cmd);
|
||||
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
|
||||
queue, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -816,7 +872,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
|
||||
.data = { lq, },
|
||||
};
|
||||
|
||||
if (WARN_ON(lq->sta_id == IWL_MVM_STATION_COUNT))
|
||||
if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
|
||||
return -EINVAL;
|
||||
|
||||
return iwl_mvm_send_cmd(mvm, &cmd);
|
||||
@@ -1088,6 +1144,9 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
||||
lockdep_assert_held(&mvmsta->lock);
|
||||
lockdep_assert_held(&mvm->queue_info_lock);
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return;
|
||||
|
||||
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
|
||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
/* If some TFDs are still queued - don't mark TID as inactive */
|
||||
@@ -1154,6 +1213,9 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
|
||||
unsigned long now = jiffies;
|
||||
int i;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
|
||||
if (mvm->queue_info[i].hw_queue_refcount > 0)
|
||||
|
277
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
Normal file
277
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
Normal file
@@ -0,0 +1,277 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-fh.h"
|
||||
#include "iwl-context-info.h"
|
||||
#include "internal.h"
|
||||
#include "iwl-prph.h"
|
||||
|
||||
static int iwl_pcie_get_num_sections(const struct fw_img *fw,
|
||||
int start)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
while (start < fw->num_sec &&
|
||||
fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
|
||||
fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
|
||||
start++;
|
||||
i++;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
|
||||
const struct fw_desc *sec,
|
||||
struct iwl_dram_data *dram)
|
||||
{
|
||||
dram->block = dma_alloc_coherent(trans->dev, sec->len,
|
||||
&dram->physical,
|
||||
GFP_KERNEL);
|
||||
if (!dram->block)
|
||||
return -ENOMEM;
|
||||
|
||||
dram->size = sec->len;
|
||||
memcpy(dram->block, sec->data, sec->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
|
||||
int i;
|
||||
|
||||
if (!dram->fw) {
|
||||
WARN_ON(dram->fw_cnt);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < dram->fw_cnt; i++)
|
||||
dma_free_coherent(trans->dev, dram->fw[i].size,
|
||||
dram->fw[i].block, dram->fw[i].physical);
|
||||
|
||||
kfree(dram->fw);
|
||||
dram->fw_cnt = 0;
|
||||
}
|
||||
|
||||
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
|
||||
int i;
|
||||
|
||||
if (!dram->paging) {
|
||||
WARN_ON(dram->paging_cnt);
|
||||
return;
|
||||
}
|
||||
|
||||
/* free paging*/
|
||||
for (i = 0; i < dram->paging_cnt; i++)
|
||||
dma_free_coherent(trans->dev, dram->paging[i].size,
|
||||
dram->paging[i].block,
|
||||
dram->paging[i].physical);
|
||||
|
||||
kfree(dram->paging);
|
||||
dram->paging_cnt = 0;
|
||||
}
|
||||
|
||||
static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
|
||||
const struct fw_img *fw,
|
||||
struct iwl_context_info *ctxt_info)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
|
||||
struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
|
||||
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
|
||||
|
||||
lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
|
||||
/* add 1 due to separator */
|
||||
umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
|
||||
/* add 2 due to separators */
|
||||
paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
|
||||
|
||||
dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
|
||||
if (!dram->fw)
|
||||
return -ENOMEM;
|
||||
dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
|
||||
if (!dram->paging)
|
||||
return -ENOMEM;
|
||||
|
||||
/* initialize lmac sections */
|
||||
for (i = 0; i < lmac_cnt; i++) {
|
||||
ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
|
||||
&dram->fw[dram->fw_cnt]);
|
||||
if (ret)
|
||||
return ret;
|
||||
ctxt_dram->lmac_img[i] =
|
||||
cpu_to_le64(dram->fw[dram->fw_cnt].physical);
|
||||
dram->fw_cnt++;
|
||||
}
|
||||
|
||||
/* initialize umac sections */
|
||||
for (i = 0; i < umac_cnt; i++) {
|
||||
/* access FW with +1 to make up for lmac separator */
|
||||
ret = iwl_pcie_ctxt_info_alloc_dma(trans,
|
||||
&fw->sec[dram->fw_cnt + 1],
|
||||
&dram->fw[dram->fw_cnt]);
|
||||
if (ret)
|
||||
return ret;
|
||||
ctxt_dram->umac_img[i] =
|
||||
cpu_to_le64(dram->fw[dram->fw_cnt].physical);
|
||||
dram->fw_cnt++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize paging.
|
||||
* Paging memory isn't stored in dram->fw as the umac and lmac - it is
|
||||
* stored separately.
|
||||
* This is since the timing of its release is different -
|
||||
* while fw memory can be released on alive, the paging memory can be
|
||||
* freed only when the device goes down.
|
||||
* Given that, the logic here in accessing the fw image is a bit
|
||||
* different - fw_cnt isn't changing so loop counter is added to it.
|
||||
*/
|
||||
for (i = 0; i < paging_cnt; i++) {
|
||||
/* access FW with +2 to make up for lmac & umac separators */
|
||||
int fw_idx = dram->fw_cnt + i + 2;
|
||||
|
||||
ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
|
||||
&dram->paging[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctxt_dram->virtual_img[i] =
|
||||
cpu_to_le64(dram->paging[i].physical);
|
||||
dram->paging_cnt++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
||||
const struct fw_img *fw)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_context_info *ctxt_info;
|
||||
struct iwl_context_info_rbd_cfg *rx_cfg;
|
||||
u32 control_flags = 0;
|
||||
int ret;
|
||||
|
||||
ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
|
||||
&trans_pcie->ctxt_info_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!ctxt_info)
|
||||
return -ENOMEM;
|
||||
|
||||
ctxt_info->version.version = 0;
|
||||
ctxt_info->version.mac_id =
|
||||
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
|
||||
/* size is in DWs */
|
||||
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
|
||||
|
||||
BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
|
||||
control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
|
||||
IWL_CTXT_INFO_TFD_FORMAT_LONG |
|
||||
RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
|
||||
IWL_CTXT_INFO_RB_CB_SIZE_POS;
|
||||
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
|
||||
|
||||
/* initialize RX default queue */
|
||||
rx_cfg = &ctxt_info->rbd_cfg;
|
||||
rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
|
||||
rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
|
||||
rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
|
||||
|
||||
/* initialize TX command queue */
|
||||
ctxt_info->hcmd_cfg.cmd_queue_addr =
|
||||
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
|
||||
ctxt_info->hcmd_cfg.cmd_queue_size =
|
||||
TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
/* allocate ucode sections in dram and set addresses */
|
||||
ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
|
||||
if (ret) {
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
|
||||
ctxt_info, trans_pcie->ctxt_info_dma_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
trans_pcie->ctxt_info = ctxt_info;
|
||||
|
||||
iwl_enable_interrupts(trans);
|
||||
|
||||
/* kick FW self load */
|
||||
iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
|
||||
iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
|
||||
|
||||
/* Context info will be released upon alive or failure to get one */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (!trans_pcie->ctxt_info)
|
||||
return;
|
||||
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
|
||||
trans_pcie->ctxt_info,
|
||||
trans_pcie->ctxt_info_dma_addr);
|
||||
trans_pcie->ctxt_info_dma_addr = 0;
|
||||
trans_pcie->ctxt_info = NULL;
|
||||
|
||||
iwl_pcie_ctxt_info_free_fw_img(trans);
|
||||
}
|
@@ -501,6 +501,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
|
||||
|
||||
/* 9000 Series */
|
||||
@@ -533,7 +537,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
|
||||
|
||||
/* a000 Series */
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x2722, 0x0A10, iwla000_2ac_cfg_hr)},
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
{0}
|
||||
@@ -667,18 +672,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
iwl_trans->cfg = cfg_7265d;
|
||||
}
|
||||
|
||||
if (iwl_trans->cfg->rf_id) {
|
||||
if (cfg == &iwl9460_2ac_cfg &&
|
||||
iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
|
||||
cfg = &iwl9000lc_2ac_cfg;
|
||||
iwl_trans->cfg = cfg;
|
||||
}
|
||||
|
||||
if (cfg == &iwla000_2ac_cfg_hr &&
|
||||
iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
|
||||
cfg = &iwla000_2ac_cfg_jf;
|
||||
iwl_trans->cfg = cfg;
|
||||
}
|
||||
if (iwl_trans->cfg->rf_id &&
|
||||
(cfg == &iwla000_2ac_cfg_hr || cfg == &iwla000_2ac_cfg_hr_cdb) &&
|
||||
iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
|
||||
cfg = &iwla000_2ac_cfg_jf;
|
||||
iwl_trans->cfg = cfg;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
*
|
||||
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* Portions of this file are derived from the ipw3945 project, as well
|
||||
* as portions of the ieee80211 subsystem header files.
|
||||
@@ -205,11 +205,11 @@ struct iwl_cmd_meta {
|
||||
* into the buffer regardless of whether it should be mapped or not.
|
||||
* This indicates how big the first TB must be to include the scratch buffer
|
||||
* and the assigned PN.
|
||||
* Since PN location is 16 bytes at offset 24, it's 40 now.
|
||||
* Since PN location is 8 bytes at offset 12, it's 20 now.
|
||||
* If we make it bigger then allocations will be bigger and copy slower, so
|
||||
* that's probably not useful.
|
||||
*/
|
||||
#define IWL_FIRST_TB_SIZE 40
|
||||
#define IWL_FIRST_TB_SIZE 20
|
||||
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
|
||||
|
||||
struct iwl_pcie_txq_entry {
|
||||
@@ -237,11 +237,11 @@ struct iwl_pcie_first_tb_buf {
|
||||
* @stuck_timer: timer that fires if queue gets stuck
|
||||
* @trans_pcie: pointer back to transport (for timer)
|
||||
* @need_update: indicates need to update read/write index
|
||||
* @active: stores if queue is active
|
||||
* @ampdu: true if this queue is an ampdu queue for an specific RA/TID
|
||||
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
|
||||
* @frozen: tx stuck queue timer is frozen
|
||||
* @frozen_expiry_remainder: remember how long until the timer fires
|
||||
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
|
||||
* @write_ptr: 1-st empty entry (index) host_w
|
||||
* @read_ptr: last used entry (index) host_r
|
||||
* @dma_addr: physical addr for BD's
|
||||
@@ -277,11 +277,11 @@ struct iwl_txq {
|
||||
struct iwl_trans_pcie *trans_pcie;
|
||||
bool need_update;
|
||||
bool frozen;
|
||||
u8 active;
|
||||
bool ampdu;
|
||||
int block;
|
||||
unsigned long wd_timeout;
|
||||
struct sk_buff_head overflow_q;
|
||||
struct iwl_dma_ptr bc_tbl;
|
||||
|
||||
int write_ptr;
|
||||
int read_ptr;
|
||||
@@ -314,12 +314,44 @@ enum iwl_shared_irq_flags {
|
||||
IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_dram_data
|
||||
* @physical: page phy pointer
|
||||
* @block: pointer to the allocated block/page
|
||||
* @size: size of the block/page
|
||||
*/
|
||||
struct iwl_dram_data {
|
||||
dma_addr_t physical;
|
||||
void *block;
|
||||
int size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_self_init_dram - dram data used by self init process
|
||||
* @fw: lmac and umac dram data
|
||||
* @fw_cnt: total number of items in array
|
||||
* @paging: paging dram data
|
||||
* @paging_cnt: total number of items in array
|
||||
*/
|
||||
struct iwl_self_init_dram {
|
||||
struct iwl_dram_data *fw;
|
||||
int fw_cnt;
|
||||
struct iwl_dram_data *paging;
|
||||
int paging_cnt;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_trans_pcie - PCIe transport specific data
|
||||
* @rxq: all the RX queue data
|
||||
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
|
||||
* @global_table: table mapping received VID from hw to rxb
|
||||
* @rba: allocator for RX replenishing
|
||||
* @ctxt_info: context information for FW self init
|
||||
* @ctxt_info_dma_addr: dma addr of context information
|
||||
* @init_dram: DRAM data of firmware image (including paging).
|
||||
* Context information addresses will be taken from here.
|
||||
* This is driver's local copy for keeping track of size and
|
||||
* count for allocating and freeing the memory.
|
||||
* @trans: pointer to the generic transport area
|
||||
* @scd_base_addr: scheduler sram base address in SRAM
|
||||
* @scd_bc_tbls: pointer to the byte count table of the scheduler
|
||||
@@ -357,6 +389,9 @@ struct iwl_trans_pcie {
|
||||
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
|
||||
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
|
||||
struct iwl_rb_allocator rba;
|
||||
struct iwl_context_info *ctxt_info;
|
||||
dma_addr_t ctxt_info_dma_addr;
|
||||
struct iwl_self_init_dram init_dram;
|
||||
struct iwl_trans *trans;
|
||||
|
||||
struct net_device napi_dev;
|
||||
@@ -378,7 +413,8 @@ struct iwl_trans_pcie {
|
||||
struct iwl_dma_ptr scd_bc_tbls;
|
||||
struct iwl_dma_ptr kw;
|
||||
|
||||
struct iwl_txq *txq;
|
||||
struct iwl_txq *txq_memory;
|
||||
struct iwl_txq *txq[IWL_MAX_HW_QUEUES];
|
||||
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||
|
||||
@@ -454,6 +490,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
|
||||
* RX
|
||||
******************************************************/
|
||||
int iwl_pcie_rx_init(struct iwl_trans *trans);
|
||||
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
|
||||
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
|
||||
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
|
||||
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
|
||||
@@ -474,6 +511,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
|
||||
* TX / HCMD
|
||||
******************************************************/
|
||||
int iwl_pcie_tx_init(struct iwl_trans *trans);
|
||||
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
|
||||
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
|
||||
int iwl_pcie_tx_stop(struct iwl_trans *trans);
|
||||
void iwl_pcie_tx_free(struct iwl_trans *trans);
|
||||
@@ -484,7 +522,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
|
||||
bool configure_scd);
|
||||
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
|
||||
bool shared_mode);
|
||||
dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq);
|
||||
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq);
|
||||
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
@@ -616,6 +653,12 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
|
||||
struct iwl_txq *txq, int idx)
|
||||
{
|
||||
return txq->tfds + trans_pcie->tfd_size * idx;
|
||||
}
|
||||
|
||||
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
@@ -719,4 +762,40 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
|
||||
|
||||
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
|
||||
|
||||
/* common functions that are used by gen2 transport */
|
||||
void iwl_pcie_apm_config(struct iwl_trans *trans);
|
||||
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
|
||||
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
|
||||
bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans);
|
||||
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
|
||||
int iwl_queue_space(const struct iwl_txq *q);
|
||||
int iwl_pcie_apm_stop_master(struct iwl_trans *trans);
|
||||
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
|
||||
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, bool cmd_queue);
|
||||
int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq, int slots_num, bool cmd_queue);
|
||||
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
|
||||
struct iwl_dma_ptr *ptr, size_t size);
|
||||
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
|
||||
|
||||
/* transport gen 2 exported functions */
|
||||
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw, bool run_in_rfkill);
|
||||
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
|
||||
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
||||
struct iwl_tx_queue_cfg_cmd *cmd,
|
||||
int cmd_id,
|
||||
unsigned int timeout);
|
||||
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
|
||||
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id);
|
||||
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd);
|
||||
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
|
||||
bool low_power);
|
||||
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
|
||||
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
|
||||
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
|
||||
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
|
||||
#endif /* __iwl_trans_int_pcie_h__ */
|
||||
|
@@ -2,7 +2,7 @@
|
||||
*
|
||||
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* Portions of this file are derived from the ipw3945 project, as well
|
||||
* as portions of the ieee80211 subsystem header files.
|
||||
@@ -880,7 +880,7 @@ static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
static int _iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *def_rxq;
|
||||
@@ -958,20 +958,40 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
|
||||
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int ret = _iwl_pcie_rx_init(trans);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (trans->cfg->mq_rx_supported)
|
||||
iwl_pcie_rx_mq_hw_init(trans);
|
||||
else
|
||||
iwl_pcie_rx_hw_init(trans, def_rxq);
|
||||
iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
|
||||
|
||||
iwl_pcie_rxq_restock(trans, def_rxq);
|
||||
iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
|
||||
|
||||
spin_lock(&def_rxq->lock);
|
||||
iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
|
||||
spin_unlock(&def_rxq->lock);
|
||||
spin_lock(&trans_pcie->rxq->lock);
|
||||
iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
|
||||
spin_unlock(&trans_pcie->rxq->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
|
||||
{
|
||||
/*
|
||||
* We don't configure the RFH.
|
||||
* Restock will be done at alive, after firmware configured the RFH.
|
||||
*/
|
||||
return _iwl_pcie_rx_init(trans);
|
||||
}
|
||||
|
||||
void iwl_pcie_rx_free(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
@@ -1074,7 +1094,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
bool emergency)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
bool page_stolen = false;
|
||||
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
|
||||
u32 offset = 0;
|
||||
@@ -1393,17 +1413,17 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_pcie_dump_csr(trans);
|
||||
iwl_dump_fh(trans, NULL);
|
||||
|
||||
local_bh_disable();
|
||||
/* The STATUS_FW_ERROR bit is set in this function. This must happen
|
||||
* before we wake up the command caller, to ensure a proper cleanup. */
|
||||
iwl_trans_fw_error(trans);
|
||||
local_bh_enable();
|
||||
|
||||
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
|
||||
del_timer(&trans_pcie->txq[i].stuck_timer);
|
||||
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
|
||||
if (!trans_pcie->txq[i])
|
||||
continue;
|
||||
del_timer(&trans_pcie->txq[i]->stuck_timer);
|
||||
}
|
||||
|
||||
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
|
||||
wake_up(&trans_pcie->wait_command_queue);
|
||||
@@ -1597,6 +1617,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
|
||||
if (inta & CSR_INT_BIT_ALIVE) {
|
||||
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
|
||||
isr_stats->alive++;
|
||||
if (trans->cfg->gen2) {
|
||||
/*
|
||||
* We can restock, since firmware configured
|
||||
* the RFH
|
||||
*/
|
||||
iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1933,6 +1960,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
|
||||
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
|
||||
isr_stats->alive++;
|
||||
if (trans->cfg->gen2) {
|
||||
/* We can restock, since firmware configured the RFH */
|
||||
iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
|
||||
}
|
||||
}
|
||||
|
||||
/* uCode wakes up after power-down sleep */
|
||||
|
374
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
Normal file
374
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
Normal file
@@ -0,0 +1,374 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-context-info.h"
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* Start up NIC's basic functionality after it has been reset
|
||||
* (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
|
||||
* NOTE: This does not load uCode nor start the embedded processor
|
||||
*/
|
||||
static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
|
||||
|
||||
/*
|
||||
* Use "set_bit" below rather than "write", to preserve any hardware
|
||||
* bits already set by default after reset.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Disable L0s without affecting L1;
|
||||
* don't wait for ICH L0s (ICH bug W/A)
|
||||
*/
|
||||
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
|
||||
CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
|
||||
|
||||
/* Set FH wait threshold to maximum (HW error during stress W/A) */
|
||||
iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
|
||||
|
||||
/*
|
||||
* Enable HAP INTA (interrupt from management bus) to
|
||||
* wake device's PCI Express link L1a -> L0s
|
||||
*/
|
||||
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
|
||||
|
||||
iwl_pcie_apm_config(trans);
|
||||
|
||||
/*
|
||||
* Set "initialization complete" bit to move adapter from
|
||||
* D0U* --> D0A* (powered-up active) state.
|
||||
*/
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
|
||||
/*
|
||||
* Wait for clock stabilization; once stabilized, access to
|
||||
* device-internal resources is supported, e.g. iwl_write_prph()
|
||||
* and accesses to uCode SRAM.
|
||||
*/
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_INFO(trans, "Failed to init the card\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
set_bit(STATUS_DEVICE_ENABLED, &trans->status);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
|
||||
{
|
||||
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
|
||||
|
||||
if (op_mode_leave) {
|
||||
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
|
||||
iwl_pcie_gen2_apm_init(trans);
|
||||
|
||||
/* inform ME that we are leaving */
|
||||
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_PREPARE |
|
||||
CSR_HW_IF_CONFIG_REG_ENABLE_PME);
|
||||
mdelay(1);
|
||||
iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
mdelay(5);
|
||||
}
|
||||
|
||||
clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
|
||||
|
||||
/* Stop device's DMA activity */
|
||||
iwl_pcie_apm_stop_master(trans);
|
||||
|
||||
/* Reset the entire device */
|
||||
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
/*
|
||||
* Clear "initialization complete" bit to move adapter from
|
||||
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
|
||||
*/
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
}
|
||||
|
||||
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
bool hw_rfkill, was_hw_rfkill;
|
||||
|
||||
lockdep_assert_held(&trans_pcie->mutex);
|
||||
|
||||
if (trans_pcie->is_down)
|
||||
return;
|
||||
|
||||
trans_pcie->is_down = true;
|
||||
|
||||
was_hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
|
||||
/* tell the device to stop sending interrupts */
|
||||
iwl_disable_interrupts(trans);
|
||||
|
||||
/* device going down, Stop using ICT table */
|
||||
iwl_pcie_disable_ict(trans);
|
||||
|
||||
/*
|
||||
* If a HW restart happens during firmware loading,
|
||||
* then the firmware loading might call this function
|
||||
* and later it might be called again due to the
|
||||
* restart. So don't process again if the device is
|
||||
* already dead.
|
||||
*/
|
||||
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"DEVICE_ENABLED bit was set and is now cleared\n");
|
||||
iwl_pcie_gen2_tx_stop(trans);
|
||||
iwl_pcie_rx_stop(trans);
|
||||
}
|
||||
|
||||
iwl_pcie_ctxt_info_free_paging(trans);
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
|
||||
/* Make sure (redundant) we've released our request to stay awake */
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
||||
/* Stop the device, and put it in low power state */
|
||||
iwl_pcie_gen2_apm_stop(trans, false);
|
||||
|
||||
/* stop and reset the on-board processor */
|
||||
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
/*
|
||||
* Upon stop, the IVAR table gets erased, so msi-x won't
|
||||
* work. This causes a bug in RF-KILL flows, since the interrupt
|
||||
* that enables radio won't fire on the correct irq, and the
|
||||
* driver won't be able to handle the interrupt.
|
||||
* Configure the IVAR table again after reset.
|
||||
*/
|
||||
iwl_pcie_conf_msix_hw(trans_pcie);
|
||||
|
||||
/*
|
||||
* Upon stop, the APM issues an interrupt if HW RF kill is set.
|
||||
* This is a bug in certain verions of the hardware.
|
||||
* Certain devices also keep sending HW RF kill interrupt all
|
||||
* the time, unless the interrupt is ACKed even if the interrupt
|
||||
* should be masked. Re-ACK all the interrupts here.
|
||||
*/
|
||||
iwl_disable_interrupts(trans);
|
||||
|
||||
/* clear all status bits */
|
||||
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
|
||||
clear_bit(STATUS_INT_ENABLED, &trans->status);
|
||||
clear_bit(STATUS_TPOWER_PMI, &trans->status);
|
||||
clear_bit(STATUS_RFKILL, &trans->status);
|
||||
|
||||
/*
|
||||
* Even if we stop the HW, we still want the RF kill
|
||||
* interrupt
|
||||
*/
|
||||
iwl_enable_rfkill_int(trans);
|
||||
|
||||
/*
|
||||
* Check again since the RF kill state may have changed while
|
||||
* all the interrupts were disabled, in this case we couldn't
|
||||
* receive the RF kill interrupt and update the state in the
|
||||
* op_mode.
|
||||
* Don't call the op_mode if the rkfill state hasn't changed.
|
||||
* This allows the op_mode to call stop_device from the rfkill
|
||||
* notification without endless recursion. Under very rare
|
||||
* circumstances, we might have a small recursion if the rfkill
|
||||
* state changed exactly now while we were called from stop_device.
|
||||
* This is very unlikely but can happen and is supported.
|
||||
*/
|
||||
hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
if (hw_rfkill)
|
||||
set_bit(STATUS_RFKILL, &trans->status);
|
||||
else
|
||||
clear_bit(STATUS_RFKILL, &trans->status);
|
||||
if (hw_rfkill != was_hw_rfkill)
|
||||
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
|
||||
|
||||
/* re-take ownership to prevent other users from stealing the device */
|
||||
iwl_pcie_prepare_card_hw(trans);
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
_iwl_trans_pcie_gen2_stop_device(trans, low_power);
|
||||
mutex_unlock(&trans_pcie->mutex);
|
||||
}
|
||||
|
||||
static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
iwl_pcie_gen2_apm_init(trans);
|
||||
spin_unlock(&trans_pcie->irq_lock);
|
||||
|
||||
iwl_op_mode_nic_config(trans->op_mode);
|
||||
|
||||
/* Allocate the RX queue, or reset if it is already allocated */
|
||||
if (iwl_pcie_gen2_rx_init(trans))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate or reset and init all Tx and Command queues */
|
||||
if (iwl_pcie_gen2_tx_init(trans))
|
||||
return -ENOMEM;
|
||||
|
||||
/* enable shadow regs in HW */
|
||||
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
|
||||
IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
iwl_pcie_reset_ict(trans);
|
||||
|
||||
/* make sure all queue are not stopped/used */
|
||||
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
|
||||
/* now that we got alive we can free the fw image & the context info.
|
||||
* paging memory cannot be freed included since FW will still use it
|
||||
*/
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
const struct fw_img *fw, bool run_in_rfkill)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
bool hw_rfkill;
|
||||
int ret;
|
||||
|
||||
/* This may fail if AMT took ownership of the device */
|
||||
if (iwl_pcie_prepare_card_hw(trans)) {
|
||||
IWL_WARN(trans, "Exit HW not ready\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iwl_enable_rfkill_int(trans);
|
||||
|
||||
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
|
||||
|
||||
/*
|
||||
* We enabled the RF-Kill interrupt and the handler may very
|
||||
* well be running. Disable the interrupts to make sure no other
|
||||
* interrupt can be fired.
|
||||
*/
|
||||
iwl_disable_interrupts(trans);
|
||||
|
||||
/* Make sure it finished running */
|
||||
iwl_pcie_synchronize_irqs(trans);
|
||||
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
|
||||
/* If platform's RF_KILL switch is NOT set to KILL */
|
||||
hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
|
||||
if (hw_rfkill && !run_in_rfkill) {
|
||||
ret = -ERFKILL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Someone called stop_device, don't try to start_fw */
|
||||
if (trans_pcie->is_down) {
|
||||
IWL_WARN(trans,
|
||||
"Can't start_fw since the HW hasn't been started\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* make sure rfkill handshake bits are cleared */
|
||||
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
||||
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
||||
|
||||
/* clear (again), then enable host interrupts */
|
||||
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
|
||||
|
||||
ret = iwl_pcie_gen2_nic_init(trans);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Unable to init nic\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = iwl_pcie_ctxt_info_init(trans, fw);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* re-check RF-Kill state since we may have missed the interrupt */
|
||||
hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
|
||||
if (hw_rfkill && !run_in_rfkill)
|
||||
ret = -ERFKILL;
|
||||
|
||||
out:
|
||||
mutex_unlock(&trans_pcie->mutex);
|
||||
return ret;
|
||||
}
|
@@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@@ -201,7 +201,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
|
||||
/* PCI registers */
|
||||
#define PCI_CFG_RETRY_TIMEOUT 0x041
|
||||
|
||||
static void iwl_pcie_apm_config(struct iwl_trans *trans)
|
||||
void iwl_pcie_apm_config(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u16 lctl;
|
||||
@@ -448,7 +448,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
|
||||
~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
|
||||
}
|
||||
|
||||
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
|
||||
int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@@ -567,7 +567,7 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
/* Note: returns standard 0/-ERROR code */
|
||||
static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
|
||||
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
|
||||
{
|
||||
int ret;
|
||||
int t = 0;
|
||||
@@ -636,29 +636,6 @@ static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
|
||||
}
|
||||
|
||||
static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans,
|
||||
u32 dst_addr, dma_addr_t phy_addr,
|
||||
u32 byte_cnt)
|
||||
{
|
||||
/* Stop DMA channel */
|
||||
iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0);
|
||||
|
||||
/* Configure SRAM address */
|
||||
iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR,
|
||||
dst_addr);
|
||||
|
||||
/* Configure DRAM address - 64 bit */
|
||||
iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr);
|
||||
|
||||
/* Configure byte count to transfer */
|
||||
iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt);
|
||||
|
||||
/* Enable the DRAM2SRAM to start */
|
||||
iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP |
|
||||
TFH_SRV_DMA_TO_DRIVER |
|
||||
TFH_SRV_DMA_START);
|
||||
}
|
||||
|
||||
static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
|
||||
u32 dst_addr, dma_addr_t phy_addr,
|
||||
u32 byte_cnt)
|
||||
@@ -672,12 +649,8 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
|
||||
if (!iwl_trans_grab_nic_access(trans, &flags))
|
||||
return -EIO;
|
||||
|
||||
if (trans->cfg->use_tfh)
|
||||
iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr,
|
||||
byte_cnt);
|
||||
else
|
||||
iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
|
||||
byte_cnt);
|
||||
iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
|
||||
byte_cnt);
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
|
||||
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
|
||||
@@ -747,47 +720,6 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Driver Takes the ownership on secure machine before FW load
|
||||
* and prevent race with the BT load.
|
||||
* W/A for ROM bug. (should be remove in the next Si step)
|
||||
*/
|
||||
static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
|
||||
{
|
||||
u32 val, loop = 1000;
|
||||
|
||||
/*
|
||||
* Check the RSA semaphore is accessible.
|
||||
* If the HW isn't locked and the rsa semaphore isn't accessible,
|
||||
* we are in trouble.
|
||||
*/
|
||||
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
|
||||
if (val & (BIT(1) | BIT(17))) {
|
||||
IWL_DEBUG_INFO(trans,
|
||||
"can't access the RSA semaphore it is write protected\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* take ownership on the AUX IF */
|
||||
iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
|
||||
iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
|
||||
|
||||
do {
|
||||
iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
|
||||
val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
|
||||
if (val == 0x1) {
|
||||
iwl_write_prph(trans, RSA_ENABLE, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
udelay(10);
|
||||
loop--;
|
||||
} while (loop > 0);
|
||||
|
||||
IWL_ERR(trans, "Failed to take ownership on secure machine\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
|
||||
const struct fw_img *image,
|
||||
int cpu,
|
||||
@@ -828,15 +760,10 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
|
||||
return ret;
|
||||
|
||||
/* Notify ucode of loaded section number and status */
|
||||
if (trans->cfg->use_tfh) {
|
||||
val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
|
||||
val = val | (sec_num << shift_param);
|
||||
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
|
||||
} else {
|
||||
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
|
||||
val = val | (sec_num << shift_param);
|
||||
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
|
||||
}
|
||||
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
|
||||
val = val | (sec_num << shift_param);
|
||||
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
|
||||
|
||||
sec_num = (sec_num << 1) | 0x1;
|
||||
}
|
||||
|
||||
@@ -1042,10 +969,15 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
|
||||
if (trans->dbg_dest_tlv)
|
||||
iwl_pcie_apply_destination(trans);
|
||||
|
||||
/* TODO: remove in the next Si step */
|
||||
ret = iwl_pcie_rsa_race_bug_wa(trans);
|
||||
if (ret)
|
||||
return ret;
|
||||
IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
|
||||
iwl_read_prph(trans, WFPM_GP2));
|
||||
|
||||
/*
|
||||
* Set default value. On resume reading the values that were
|
||||
* zeored can provide debug data on the resume flow.
|
||||
* This is for debugging only and has no functional impact.
|
||||
*/
|
||||
iwl_write_prph(trans, WFPM_GP2, 0x01010101);
|
||||
|
||||
/* configure the ucode to be ready to get the secured image */
|
||||
/* release CPU reset */
|
||||
@@ -1062,7 +994,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
|
||||
&first_ucode_section);
|
||||
}
|
||||
|
||||
static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
|
||||
bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
|
||||
{
|
||||
bool hw_rfkill = iwl_is_rfkill_set(trans);
|
||||
|
||||
@@ -1147,7 +1079,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
|
||||
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
|
||||
}
|
||||
|
||||
static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
|
||||
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
|
||||
{
|
||||
struct iwl_trans *trans = trans_pcie->trans;
|
||||
|
||||
@@ -1299,7 +1231,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
|
||||
iwl_pcie_prepare_card_hw(trans);
|
||||
}
|
||||
|
||||
static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
|
||||
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
@@ -1423,8 +1355,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
|
||||
|
||||
lockdep_assert_held(&trans_pcie->mutex);
|
||||
|
||||
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
|
||||
_iwl_trans_pcie_stop_device(trans, true);
|
||||
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
|
||||
if (trans->cfg->gen2)
|
||||
_iwl_trans_pcie_gen2_stop_device(trans, true);
|
||||
else
|
||||
_iwl_trans_pcie_stop_device(trans, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
|
||||
@@ -1527,6 +1463,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
|
||||
iwl_read_prph(trans, WFPM_GP2));
|
||||
|
||||
val = iwl_read32(trans, CSR_RESET);
|
||||
if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
|
||||
*status = IWL_D3_STATUS_RESET;
|
||||
@@ -1828,7 +1767,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
|
||||
|
||||
iwl_pcie_synchronize_irqs(trans);
|
||||
|
||||
iwl_pcie_tx_free(trans);
|
||||
if (trans->cfg->gen2)
|
||||
iwl_pcie_gen2_tx_free(trans);
|
||||
else
|
||||
iwl_pcie_tx_free(trans);
|
||||
iwl_pcie_rx_free(trans);
|
||||
|
||||
if (trans_pcie->msix_enabled) {
|
||||
@@ -1998,7 +1940,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
|
||||
int queue;
|
||||
|
||||
for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
|
||||
struct iwl_txq *txq = &trans_pcie->txq[queue];
|
||||
struct iwl_txq *txq = trans_pcie->txq[queue];
|
||||
unsigned long now;
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
@@ -2050,7 +1992,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
|
||||
struct iwl_txq *txq = &trans_pcie->txq[i];
|
||||
struct iwl_txq *txq = trans_pcie->txq[i];
|
||||
|
||||
if (i == trans_pcie->cmd_queue)
|
||||
continue;
|
||||
@@ -2075,48 +2017,32 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
|
||||
|
||||
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u32 scd_sram_addr;
|
||||
u8 buf[16];
|
||||
int cnt;
|
||||
u32 txq_id = txq->id;
|
||||
u32 status;
|
||||
bool active;
|
||||
u8 fifo;
|
||||
|
||||
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
|
||||
txq->read_ptr, txq->write_ptr);
|
||||
|
||||
if (trans->cfg->use_tfh)
|
||||
if (trans->cfg->use_tfh) {
|
||||
IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
|
||||
txq->read_ptr, txq->write_ptr);
|
||||
/* TODO: access new SCD registers and dump them */
|
||||
return;
|
||||
|
||||
scd_sram_addr = trans_pcie->scd_base_addr +
|
||||
SCD_TX_STTS_QUEUE_OFFSET(txq->id);
|
||||
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
|
||||
|
||||
iwl_print_hex_error(trans, buf, sizeof(buf));
|
||||
|
||||
for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
|
||||
IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
|
||||
iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
|
||||
|
||||
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
|
||||
u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
|
||||
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
|
||||
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
|
||||
u32 tbl_dw =
|
||||
iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
|
||||
SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
|
||||
|
||||
if (cnt & 0x1)
|
||||
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
|
||||
else
|
||||
tbl_dw = tbl_dw & 0x0000FFFF;
|
||||
|
||||
IWL_ERR(trans,
|
||||
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
|
||||
cnt, active ? "" : "in", fifo, tbl_dw,
|
||||
iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
|
||||
(TFD_QUEUE_SIZE_MAX - 1),
|
||||
iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
|
||||
}
|
||||
|
||||
status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
|
||||
fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
|
||||
active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
|
||||
|
||||
IWL_ERR(trans,
|
||||
"Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
|
||||
txq_id, active ? "" : "in", fifo,
|
||||
jiffies_to_msecs(txq->wd_timeout),
|
||||
txq->read_ptr, txq->write_ptr,
|
||||
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
|
||||
(TFD_QUEUE_SIZE_MAX - 1),
|
||||
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
|
||||
(TFD_QUEUE_SIZE_MAX - 1),
|
||||
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
|
||||
@@ -2139,7 +2065,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
|
||||
continue;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
|
||||
txq = &trans_pcie->txq[cnt];
|
||||
txq = trans_pcie->txq[cnt];
|
||||
wr_ptr = ACCESS_ONCE(txq->write_ptr);
|
||||
|
||||
while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
|
||||
@@ -2330,7 +2256,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
|
||||
|
||||
bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
|
||||
|
||||
if (!trans_pcie->txq)
|
||||
if (!trans_pcie->txq_memory)
|
||||
return -EAGAIN;
|
||||
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
@@ -2338,7 +2264,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
|
||||
return -ENOMEM;
|
||||
|
||||
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
|
||||
txq = &trans_pcie->txq[cnt];
|
||||
txq = trans_pcie->txq[cnt];
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
|
||||
cnt, txq->read_ptr, txq->write_ptr,
|
||||
@@ -2755,7 +2681,7 @@ static struct iwl_trans_dump_data
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_fw_error_dump_data *data;
|
||||
struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_fw_error_dump_txcmd *txcmd;
|
||||
struct iwl_trans_dump_data *dump_data;
|
||||
u32 len, num_rbs;
|
||||
@@ -2890,21 +2816,43 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#define IWL_TRANS_COMMON_OPS \
|
||||
.op_mode_leave = iwl_trans_pcie_op_mode_leave, \
|
||||
.write8 = iwl_trans_pcie_write8, \
|
||||
.write32 = iwl_trans_pcie_write32, \
|
||||
.read32 = iwl_trans_pcie_read32, \
|
||||
.read_prph = iwl_trans_pcie_read_prph, \
|
||||
.write_prph = iwl_trans_pcie_write_prph, \
|
||||
.read_mem = iwl_trans_pcie_read_mem, \
|
||||
.write_mem = iwl_trans_pcie_write_mem, \
|
||||
.configure = iwl_trans_pcie_configure, \
|
||||
.set_pmi = iwl_trans_pcie_set_pmi, \
|
||||
.grab_nic_access = iwl_trans_pcie_grab_nic_access, \
|
||||
.release_nic_access = iwl_trans_pcie_release_nic_access, \
|
||||
.set_bits_mask = iwl_trans_pcie_set_bits_mask, \
|
||||
.ref = iwl_trans_pcie_ref, \
|
||||
.unref = iwl_trans_pcie_unref, \
|
||||
.dump_data = iwl_trans_pcie_dump_data, \
|
||||
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, \
|
||||
.d3_suspend = iwl_trans_pcie_d3_suspend, \
|
||||
.d3_resume = iwl_trans_pcie_d3_resume
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#define IWL_TRANS_PM_OPS \
|
||||
.suspend = iwl_trans_pcie_suspend, \
|
||||
.resume = iwl_trans_pcie_resume,
|
||||
#else
|
||||
#define IWL_TRANS_PM_OPS
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static const struct iwl_trans_ops trans_ops_pcie = {
|
||||
IWL_TRANS_COMMON_OPS,
|
||||
IWL_TRANS_PM_OPS
|
||||
.start_hw = iwl_trans_pcie_start_hw,
|
||||
.op_mode_leave = iwl_trans_pcie_op_mode_leave,
|
||||
.fw_alive = iwl_trans_pcie_fw_alive,
|
||||
.start_fw = iwl_trans_pcie_start_fw,
|
||||
.stop_device = iwl_trans_pcie_stop_device,
|
||||
|
||||
.d3_suspend = iwl_trans_pcie_d3_suspend,
|
||||
.d3_resume = iwl_trans_pcie_d3_resume,
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.suspend = iwl_trans_pcie_suspend,
|
||||
.resume = iwl_trans_pcie_resume,
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
.send_cmd = iwl_trans_pcie_send_hcmd,
|
||||
|
||||
.tx = iwl_trans_pcie_tx,
|
||||
@@ -2913,31 +2861,27 @@ static const struct iwl_trans_ops trans_ops_pcie = {
|
||||
.txq_disable = iwl_trans_pcie_txq_disable,
|
||||
.txq_enable = iwl_trans_pcie_txq_enable,
|
||||
|
||||
.get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,
|
||||
|
||||
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
|
||||
|
||||
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
|
||||
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
|
||||
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
|
||||
};
|
||||
|
||||
.write8 = iwl_trans_pcie_write8,
|
||||
.write32 = iwl_trans_pcie_write32,
|
||||
.read32 = iwl_trans_pcie_read32,
|
||||
.read_prph = iwl_trans_pcie_read_prph,
|
||||
.write_prph = iwl_trans_pcie_write_prph,
|
||||
.read_mem = iwl_trans_pcie_read_mem,
|
||||
.write_mem = iwl_trans_pcie_write_mem,
|
||||
.configure = iwl_trans_pcie_configure,
|
||||
.set_pmi = iwl_trans_pcie_set_pmi,
|
||||
.grab_nic_access = iwl_trans_pcie_grab_nic_access,
|
||||
.release_nic_access = iwl_trans_pcie_release_nic_access,
|
||||
.set_bits_mask = iwl_trans_pcie_set_bits_mask,
|
||||
static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
|
||||
IWL_TRANS_COMMON_OPS,
|
||||
IWL_TRANS_PM_OPS
|
||||
.start_hw = iwl_trans_pcie_start_hw,
|
||||
.fw_alive = iwl_trans_pcie_gen2_fw_alive,
|
||||
.start_fw = iwl_trans_pcie_gen2_start_fw,
|
||||
.stop_device = iwl_trans_pcie_gen2_stop_device,
|
||||
|
||||
.ref = iwl_trans_pcie_ref,
|
||||
.unref = iwl_trans_pcie_unref,
|
||||
.send_cmd = iwl_trans_pcie_gen2_send_hcmd,
|
||||
|
||||
.dump_data = iwl_trans_pcie_dump_data,
|
||||
.tx = iwl_trans_pcie_gen2_tx,
|
||||
.reclaim = iwl_trans_pcie_reclaim,
|
||||
|
||||
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
|
||||
.txq_free = iwl_trans_pcie_dyn_txq_free,
|
||||
};
|
||||
|
||||
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
@@ -2952,8 +2896,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
|
||||
&pdev->dev, cfg, &trans_ops_pcie, 0);
|
||||
if (cfg->gen2)
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
|
||||
&pdev->dev, cfg, &trans_ops_pcie_gen2);
|
||||
else
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
|
||||
&pdev->dev, cfg, &trans_ops_pcie);
|
||||
if (!trans)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
1018
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
Normal file
1018
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
*
|
||||
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* Portions of this file are derived from the ipw3945 project, as well
|
||||
* as portions of the ieee80211 subsystem header files.
|
||||
@@ -71,7 +71,7 @@
|
||||
*
|
||||
***************************************************/
|
||||
|
||||
static int iwl_queue_space(const struct iwl_txq *q)
|
||||
int iwl_queue_space(const struct iwl_txq *q)
|
||||
{
|
||||
unsigned int max;
|
||||
unsigned int used;
|
||||
@@ -102,10 +102,9 @@ static int iwl_queue_space(const struct iwl_txq *q)
|
||||
/*
|
||||
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
|
||||
*/
|
||||
static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
|
||||
static int iwl_queue_init(struct iwl_txq *q, int slots_num)
|
||||
{
|
||||
q->n_window = slots_num;
|
||||
q->id = id;
|
||||
|
||||
/* slots_num must be power-of-two size, otherwise
|
||||
* get_cmd_index is broken. */
|
||||
@@ -126,8 +125,8 @@ static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
|
||||
struct iwl_dma_ptr *ptr, size_t size)
|
||||
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
|
||||
struct iwl_dma_ptr *ptr, size_t size)
|
||||
{
|
||||
if (WARN_ON(ptr->addr))
|
||||
return -EINVAL;
|
||||
@@ -140,8 +139,7 @@ static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
|
||||
struct iwl_dma_ptr *ptr)
|
||||
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
|
||||
{
|
||||
if (unlikely(!ptr->addr))
|
||||
return;
|
||||
@@ -164,9 +162,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
|
||||
}
|
||||
spin_unlock(&txq->lock);
|
||||
|
||||
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
|
||||
jiffies_to_msecs(txq->wd_timeout));
|
||||
|
||||
iwl_trans_pcie_log_scd_error(trans, txq);
|
||||
|
||||
iwl_force_nmi(trans);
|
||||
@@ -188,6 +183,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
__le16 bc_ent;
|
||||
struct iwl_tx_cmd *tx_cmd =
|
||||
(void *)txq->entries[txq->write_ptr].cmd->payload;
|
||||
u8 sta_id = tx_cmd->sta_id;
|
||||
|
||||
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
|
||||
|
||||
@@ -210,26 +206,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
||||
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
|
||||
return;
|
||||
|
||||
if (trans->cfg->use_tfh) {
|
||||
u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
|
||||
num_tbs * sizeof(struct iwl_tfh_tb);
|
||||
/*
|
||||
* filled_tfd_size contains the number of filled bytes in the
|
||||
* TFD.
|
||||
* Dividing it by 64 will give the number of chunks to fetch
|
||||
* to SRAM- 0 for one chunk, 1 for 2 and so on.
|
||||
* If, for example, TFD contains only 3 TBs then 32 bytes
|
||||
* of the TFD are used, and only one chunk of 64 bytes should
|
||||
* be fetched
|
||||
*/
|
||||
u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
|
||||
|
||||
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
|
||||
} else {
|
||||
u8 sta_id = tx_cmd->sta_id;
|
||||
|
||||
bc_ent = cpu_to_le16(len | (sta_id << 12));
|
||||
}
|
||||
bc_ent = cpu_to_le16(len | (sta_id << 12));
|
||||
|
||||
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
|
||||
|
||||
@@ -319,23 +296,17 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
|
||||
struct iwl_txq *txq = &trans_pcie->txq[i];
|
||||
struct iwl_txq *txq = trans_pcie->txq[i];
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
if (trans_pcie->txq[i].need_update) {
|
||||
if (txq->need_update) {
|
||||
iwl_pcie_txq_inc_wr_ptr(trans, txq);
|
||||
trans_pcie->txq[i].need_update = false;
|
||||
txq->need_update = false;
|
||||
}
|
||||
spin_unlock_bh(&txq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
|
||||
struct iwl_txq *txq, int idx)
|
||||
{
|
||||
return txq->tfds + trans_pcie->tfd_size * idx;
|
||||
}
|
||||
|
||||
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
|
||||
void *_tfd, u8 idx)
|
||||
{
|
||||
@@ -368,28 +339,17 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
|
||||
static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
|
||||
u8 idx, dma_addr_t addr, u16 len)
|
||||
{
|
||||
if (trans->cfg->use_tfh) {
|
||||
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
|
||||
struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
|
||||
struct iwl_tfd *tfd_fh = (void *)tfd;
|
||||
struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
|
||||
|
||||
put_unaligned_le64(addr, &tb->addr);
|
||||
tb->tb_len = cpu_to_le16(len);
|
||||
u16 hi_n_len = len << 4;
|
||||
|
||||
tfd_fh->num_tbs = cpu_to_le16(idx + 1);
|
||||
} else {
|
||||
struct iwl_tfd *tfd_fh = (void *)tfd;
|
||||
struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
|
||||
put_unaligned_le32(addr, &tb->lo);
|
||||
hi_n_len |= iwl_get_dma_hi_addr(addr);
|
||||
|
||||
u16 hi_n_len = len << 4;
|
||||
tb->hi_n_len = cpu_to_le16(hi_n_len);
|
||||
|
||||
put_unaligned_le32(addr, &tb->lo);
|
||||
if (sizeof(dma_addr_t) > sizeof(u32))
|
||||
hi_n_len |= ((addr >> 16) >> 16) & 0xF;
|
||||
|
||||
tb->hi_n_len = cpu_to_le16(hi_n_len);
|
||||
|
||||
tfd_fh->num_tbs = idx + 1;
|
||||
}
|
||||
tfd_fh->num_tbs = idx + 1;
|
||||
}
|
||||
|
||||
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
|
||||
@@ -460,7 +420,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
||||
* Does NOT advance any TFD circular buffer read/write indexes
|
||||
* Does NOT free the TFD itself (which is within circular buffer)
|
||||
*/
|
||||
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
{
|
||||
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
|
||||
* idx is bounded by n_window
|
||||
@@ -522,9 +482,8 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
return num_tbs;
|
||||
}
|
||||
|
||||
static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq, int slots_num,
|
||||
u32 txq_id)
|
||||
int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, bool cmd_queue)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
|
||||
@@ -547,7 +506,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
||||
if (!txq->entries)
|
||||
goto error;
|
||||
|
||||
if (txq_id == trans_pcie->cmd_queue)
|
||||
if (cmd_queue)
|
||||
for (i = 0; i < slots_num; i++) {
|
||||
txq->entries[i].cmd =
|
||||
kmalloc(sizeof(struct iwl_device_cmd),
|
||||
@@ -573,13 +532,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
||||
if (!txq->first_tb_bufs)
|
||||
goto err_free_tfds;
|
||||
|
||||
txq->id = txq_id;
|
||||
|
||||
return 0;
|
||||
err_free_tfds:
|
||||
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
|
||||
error:
|
||||
if (txq->entries && txq_id == trans_pcie->cmd_queue)
|
||||
if (txq->entries && cmd_queue)
|
||||
for (i = 0; i < slots_num; i++)
|
||||
kfree(txq->entries[i].cmd);
|
||||
kfree(txq->entries);
|
||||
@@ -589,10 +546,9 @@ error:
|
||||
|
||||
}
|
||||
|
||||
static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, u32 txq_id)
|
||||
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
int slots_num, bool cmd_queue)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int ret;
|
||||
|
||||
txq->need_update = false;
|
||||
@@ -602,13 +558,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
|
||||
|
||||
/* Initialize queue's high/low-water marks, and head/tail indexes */
|
||||
ret = iwl_queue_init(txq, slots_num, txq_id);
|
||||
ret = iwl_queue_init(txq, slots_num);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock_init(&txq->lock);
|
||||
|
||||
if (txq_id == trans_pcie->cmd_queue) {
|
||||
if (cmd_queue) {
|
||||
static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
|
||||
|
||||
lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
|
||||
@@ -616,18 +572,6 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
|
||||
__skb_queue_head_init(&txq->overflow_q);
|
||||
|
||||
/*
|
||||
* Tell nic where to find circular buffer of Tx Frame Descriptors for
|
||||
* given Tx queue, and enable the DMA channel used for that queue.
|
||||
* Circular buffer (TFD queue in DRAM) physical base address */
|
||||
if (trans->cfg->use_tfh)
|
||||
iwl_write_direct64(trans,
|
||||
FH_MEM_CBBC_QUEUE(trans, txq_id),
|
||||
txq->dma_addr);
|
||||
else
|
||||
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
|
||||
txq->dma_addr >> 8);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -672,7 +616,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
|
||||
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
while (txq->write_ptr != txq->read_ptr) {
|
||||
@@ -704,7 +648,6 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
|
||||
}
|
||||
}
|
||||
txq->active = false;
|
||||
|
||||
while (!skb_queue_empty(&txq->overflow_q)) {
|
||||
struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
|
||||
@@ -729,7 +672,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
struct device *dev = trans->dev;
|
||||
int i;
|
||||
|
||||
@@ -780,9 +723,6 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
|
||||
if (trans->cfg->use_tfh)
|
||||
return;
|
||||
|
||||
trans_pcie->scd_base_addr =
|
||||
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
|
||||
|
||||
@@ -832,9 +772,16 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int txq_id;
|
||||
|
||||
/*
|
||||
* we should never get here in gen2 trans mode return early to avoid
|
||||
* having invalid accesses
|
||||
*/
|
||||
if (WARN_ON_ONCE(trans->cfg->gen2))
|
||||
return;
|
||||
|
||||
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
|
||||
txq_id++) {
|
||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
if (trans->cfg->use_tfh)
|
||||
iwl_write_direct64(trans,
|
||||
FH_MEM_CBBC_QUEUE(trans, txq_id),
|
||||
@@ -914,7 +861,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
|
||||
/* This can happen: start_hw, stop_device */
|
||||
if (!trans_pcie->txq)
|
||||
if (!trans_pcie->txq_memory)
|
||||
return 0;
|
||||
|
||||
/* Unmap DMA from host system and free skb's */
|
||||
@@ -935,15 +882,20 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
|
||||
int txq_id;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||
|
||||
/* Tx queues */
|
||||
if (trans_pcie->txq) {
|
||||
if (trans_pcie->txq_memory) {
|
||||
for (txq_id = 0;
|
||||
txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
|
||||
txq_id < trans->cfg->base_params->num_of_queues;
|
||||
txq_id++) {
|
||||
iwl_pcie_txq_free(trans, txq_id);
|
||||
trans_pcie->txq[txq_id] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(trans_pcie->txq);
|
||||
trans_pcie->txq = NULL;
|
||||
kfree(trans_pcie->txq_memory);
|
||||
trans_pcie->txq_memory = NULL;
|
||||
|
||||
iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
|
||||
|
||||
@@ -965,7 +917,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
|
||||
/*It is not allowed to alloc twice, so warn when this happens.
|
||||
* We cannot rely on the previous allocation, so free and fail */
|
||||
if (WARN_ON(trans_pcie->txq)) {
|
||||
if (WARN_ON(trans_pcie->txq_memory)) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@@ -984,9 +936,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
goto error;
|
||||
}
|
||||
|
||||
trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
|
||||
sizeof(struct iwl_txq), GFP_KERNEL);
|
||||
if (!trans_pcie->txq) {
|
||||
trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
|
||||
sizeof(struct iwl_txq), GFP_KERNEL);
|
||||
if (!trans_pcie->txq_memory) {
|
||||
IWL_ERR(trans, "Not enough memory for txq\n");
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
@@ -995,14 +947,17 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
|
||||
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
|
||||
txq_id++) {
|
||||
slots_num = (txq_id == trans_pcie->cmd_queue) ?
|
||||
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
|
||||
ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
|
||||
slots_num, txq_id);
|
||||
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
|
||||
|
||||
slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
|
||||
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
|
||||
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
|
||||
goto error;
|
||||
}
|
||||
trans_pcie->txq[txq_id]->id = txq_id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1012,6 +967,7 @@ error:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
@@ -1019,7 +975,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
int txq_id, slots_num;
|
||||
bool alloc = false;
|
||||
|
||||
if (!trans_pcie->txq) {
|
||||
if (!trans_pcie->txq_memory) {
|
||||
ret = iwl_pcie_tx_alloc(trans);
|
||||
if (ret)
|
||||
goto error;
|
||||
@@ -1040,22 +996,24 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
|
||||
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
|
||||
txq_id++) {
|
||||
slots_num = (txq_id == trans_pcie->cmd_queue) ?
|
||||
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
|
||||
ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
|
||||
slots_num, txq_id);
|
||||
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
|
||||
|
||||
slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
|
||||
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
if (ret) {
|
||||
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
if (trans->cfg->use_tfh) {
|
||||
iwl_write_direct32(trans, TFH_TRANSFER_MODE,
|
||||
TFH_TRANSFER_MAX_PENDING_REQ |
|
||||
TFH_CHUNK_SIZE_128 |
|
||||
TFH_CHUNK_SPLIT_MODE);
|
||||
return 0;
|
||||
/*
|
||||
* Tell nic where to find circular buffer of TFDs for a
|
||||
* given Tx queue, and enable the DMA channel used for that
|
||||
* queue.
|
||||
* Circular buffer (TFD queue in DRAM) physical base address
|
||||
*/
|
||||
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
|
||||
trans_pcie->txq[txq_id]->dma_addr >> 8);
|
||||
}
|
||||
|
||||
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
|
||||
@@ -1100,7 +1058,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
struct sk_buff_head *skbs)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
|
||||
int last_to_free;
|
||||
|
||||
@@ -1110,7 +1068,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
|
||||
if (!txq->active) {
|
||||
if (!test_bit(txq_id, trans_pcie->queue_used)) {
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
|
||||
txq_id, ssn);
|
||||
goto out;
|
||||
@@ -1257,7 +1215,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
|
||||
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
unsigned long flags;
|
||||
int nfreed = 0;
|
||||
|
||||
@@ -1324,15 +1282,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
|
||||
unsigned int wdg_timeout)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
int fifo = -1;
|
||||
|
||||
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
|
||||
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
|
||||
|
||||
if (cfg && trans->cfg->use_tfh)
|
||||
WARN_ONCE(1, "Expected no calls to SCD configuration");
|
||||
|
||||
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
|
||||
|
||||
if (cfg) {
|
||||
@@ -1414,27 +1369,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
|
||||
"Activate queue %d WrPtr: %d\n",
|
||||
txq_id, ssn & 0xff);
|
||||
}
|
||||
|
||||
txq->active = true;
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
|
||||
bool shared_mode)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_txq *txq = trans_pcie->txq[txq_id];
|
||||
|
||||
txq->ampdu = !shared_mode;
|
||||
}
|
||||
|
||||
dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
return trans_pcie->scd_bc_tbls.dma +
|
||||
txq * sizeof(struct iwlagn_scd_bc_tbl);
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
||||
bool configure_scd)
|
||||
{
|
||||
@@ -1443,8 +1388,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
||||
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
|
||||
static const u32 zero_val[4] = {};
|
||||
|
||||
trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
|
||||
trans_pcie->txq[txq_id].frozen = false;
|
||||
trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
|
||||
trans_pcie->txq[txq_id]->frozen = false;
|
||||
|
||||
/*
|
||||
* Upon HW Rfkill - we stop the device, and then stop the queues
|
||||
@@ -1458,9 +1403,6 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
||||
return;
|
||||
}
|
||||
|
||||
if (configure_scd && trans->cfg->use_tfh)
|
||||
WARN_ONCE(1, "Expected no calls to SCD configuration");
|
||||
|
||||
if (configure_scd) {
|
||||
iwl_scd_txq_set_inactive(trans, txq_id);
|
||||
|
||||
@@ -1469,7 +1411,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
|
||||
}
|
||||
|
||||
iwl_pcie_txq_unmap(trans, txq_id);
|
||||
trans_pcie->txq[txq_id].ampdu = false;
|
||||
trans_pcie->txq[txq_id]->ampdu = false;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
|
||||
}
|
||||
@@ -1489,7 +1431,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_device_cmd *out_cmd;
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
unsigned long flags;
|
||||
@@ -1774,16 +1716,15 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
||||
struct iwl_device_cmd *cmd;
|
||||
struct iwl_cmd_meta *meta;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
|
||||
/* If a Tx command is being handled and it isn't in the actual
|
||||
* command queue then there a command routing bug has been introduced
|
||||
* in the queue management code. */
|
||||
if (WARN(txq_id != trans_pcie->cmd_queue,
|
||||
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
|
||||
txq_id, trans_pcie->cmd_queue, sequence,
|
||||
trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
|
||||
trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
|
||||
txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
|
||||
txq->write_ptr)) {
|
||||
iwl_print_hex_error(trans, pkt, 32);
|
||||
return;
|
||||
}
|
||||
@@ -1867,6 +1808,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
int cmd_idx;
|
||||
int ret;
|
||||
|
||||
@@ -1907,8 +1849,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
|
||||
&trans->status),
|
||||
HOST_COMPLETE_TIMEOUT);
|
||||
if (!ret) {
|
||||
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
||||
|
||||
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
|
||||
iwl_get_cmd_string(trans, cmd->id),
|
||||
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
|
||||
@@ -1959,8 +1899,7 @@ cancel:
|
||||
* in later, it will possibly set an invalid
|
||||
* address (cmd->meta.source).
|
||||
*/
|
||||
trans_pcie->txq[trans_pcie->cmd_queue].
|
||||
entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
|
||||
txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
|
||||
}
|
||||
|
||||
if (cmd->resp_pkt) {
|
||||
@@ -2314,7 +2253,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
u16 wifi_seq;
|
||||
bool amsdu;
|
||||
|
||||
txq = &trans_pcie->txq[txq_id];
|
||||
txq = trans_pcie->txq[txq_id];
|
||||
|
||||
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
|
||||
"TX on unused queue %d\n", txq_id))
|
||||
|
Reference in New Issue
Block a user