Merge tag 'iwlwifi-next-for-kalle-2016-09-15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next
* work for new hardware support continues * dynamic queue allocation stabilization * improvements in the MSIx code * multiqueue support work continues * new firmware version support * general cleanups and improvements
This commit is contained in:
@@ -577,6 +577,85 @@ struct iwl_mvm_ba_notif {
|
||||
u8 reserved1;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
|
||||
* @q_num: TFD queue number
|
||||
* @tfd_index: Index of first un-acked frame in the TFD queue
|
||||
*/
|
||||
struct iwl_mvm_compressed_ba_tfd {
|
||||
u8 q_num;
|
||||
u8 reserved;
|
||||
__le16 tfd_index;
|
||||
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue
|
||||
* @q_num: RA TID queue number
|
||||
* @tid: TID of the queue
|
||||
* @ssn: BA window current SSN
|
||||
*/
|
||||
struct iwl_mvm_compressed_ba_ratid {
|
||||
u8 q_num;
|
||||
u8 tid;
|
||||
__le16 ssn;
|
||||
} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
|
||||
|
||||
/*
|
||||
* enum iwl_mvm_ba_resp_flags - TX aggregation status
|
||||
* @IWL_MVM_BA_RESP_TX_AGG: generated due to BA
|
||||
* @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR
|
||||
* @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA
|
||||
* @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun
|
||||
* @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill
|
||||
* @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the
|
||||
* expected time
|
||||
*/
|
||||
enum iwl_mvm_ba_resp_flags {
|
||||
IWL_MVM_BA_RESP_TX_AGG,
|
||||
IWL_MVM_BA_RESP_TX_BAR,
|
||||
IWL_MVM_BA_RESP_TX_AGG_FAIL,
|
||||
IWL_MVM_BA_RESP_TX_UNDERRUN,
|
||||
IWL_MVM_BA_RESP_TX_BT_KILL,
|
||||
IWL_MVM_BA_RESP_TX_DSP_TIMEOUT
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_compressed_ba_notif - notifies about reception of BA
|
||||
* ( BA_NOTIF = 0xc5 )
|
||||
* @flags: status flag, see the &iwl_mvm_ba_resp_flags
|
||||
* @sta_id: Index of recipient (BA-sending) station in fw's station table
|
||||
* @reduced_txp: power reduced according to TPC. This is the actual value and
|
||||
* not a copy from the LQ command. Thus, if not the first rate was used
|
||||
* for Tx-ing then this value will be set to 0 by FW.
|
||||
* @initial_rate: TLC rate info, initial rate index, TLC table color
|
||||
* @retry_cnt: retry count
|
||||
* @query_byte_cnt: SCD query byte count
|
||||
* @query_frame_cnt: SCD query frame count
|
||||
* @txed: number of frames sent in the aggregation (all-TIDs)
|
||||
* @done: number of frames that were Acked by the BA (all-TIDs)
|
||||
* @wireless_time: Wireless-media time
|
||||
* @tx_rate: the rate the aggregation was sent at
|
||||
* @tfd_cnt: number of TFD-Q elements
|
||||
* @ra_tid_cnt: number of RATID-Q elements
|
||||
*/
|
||||
struct iwl_mvm_compressed_ba_notif {
|
||||
__le32 flags;
|
||||
u8 sta_id;
|
||||
u8 reduced_txp;
|
||||
u8 initial_rate;
|
||||
u8 retry_cnt;
|
||||
__le32 query_byte_cnt;
|
||||
__le16 query_frame_cnt;
|
||||
__le16 txed;
|
||||
__le16 done;
|
||||
__le32 wireless_time;
|
||||
__le32 tx_rate;
|
||||
__le16 tfd_cnt;
|
||||
__le16 ra_tid_cnt;
|
||||
struct iwl_mvm_compressed_ba_tfd tfd[1];
|
||||
struct iwl_mvm_compressed_ba_ratid ra_tid[0];
|
||||
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
|
||||
|
||||
/**
|
||||
* struct iwl_mac_beacon_cmd_v6 - beacon template command
|
||||
* @tx: the tx commands associated with the beacon frame
|
||||
|
@@ -1977,8 +1977,9 @@ struct iwl_tdls_config_res {
|
||||
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
|
||||
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
|
||||
|
||||
#define TX_FIFO_MAX_NUM 8
|
||||
#define RX_FIFO_MAX_NUM 2
|
||||
#define TX_FIFO_MAX_NUM_9000 8
|
||||
#define TX_FIFO_MAX_NUM 15
|
||||
#define RX_FIFO_MAX_NUM 2
|
||||
#define TX_FIFO_INTERNAL_MAX_NUM 6
|
||||
|
||||
/**
|
||||
@@ -2004,6 +2005,21 @@ struct iwl_tdls_config_res {
|
||||
* NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
|
||||
* set, the last 3 members don't exist.
|
||||
*/
|
||||
struct iwl_shared_mem_cfg_v1 {
|
||||
__le32 shared_mem_addr;
|
||||
__le32 shared_mem_size;
|
||||
__le32 sample_buff_addr;
|
||||
__le32 sample_buff_size;
|
||||
__le32 txfifo_addr;
|
||||
__le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
|
||||
__le32 rxfifo_size[RX_FIFO_MAX_NUM];
|
||||
__le32 page_buff_addr;
|
||||
__le32 page_buff_size;
|
||||
__le32 rxfifo_addr;
|
||||
__le32 internal_txfifo_addr;
|
||||
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
|
||||
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
|
||||
|
||||
struct iwl_shared_mem_cfg {
|
||||
__le32 shared_mem_addr;
|
||||
__le32 shared_mem_size;
|
||||
@@ -2017,7 +2033,7 @@ struct iwl_shared_mem_cfg {
|
||||
__le32 rxfifo_addr;
|
||||
__le32 internal_txfifo_addr;
|
||||
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
|
||||
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
|
||||
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
* VHT MU-MIMO group configuration
|
||||
|
@@ -440,14 +440,12 @@ static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
|
||||
{ .start = 0x00a04560, .end = 0x00a0457c },
|
||||
{ .start = 0x00a04590, .end = 0x00a04598 },
|
||||
{ .start = 0x00a045c0, .end = 0x00a045f4 },
|
||||
{ .start = 0x00a44000, .end = 0x00a7bf80 },
|
||||
};
|
||||
|
||||
static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
|
||||
{ .start = 0x00a05c00, .end = 0x00a05c18 },
|
||||
{ .start = 0x00a05400, .end = 0x00a056e8 },
|
||||
{ .start = 0x00a08000, .end = 0x00a098bc },
|
||||
{ .start = 0x00adfc00, .end = 0x00adfd1c },
|
||||
{ .start = 0x00a02400, .end = 0x00a02758 },
|
||||
};
|
||||
|
||||
@@ -559,7 +557,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
sizeof(struct iwl_fw_error_dump_fifo);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
|
||||
for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
|
||||
if (!mem_cfg->txfifo_size[i])
|
||||
continue;
|
||||
|
||||
|
@@ -90,15 +90,6 @@ struct iwl_mvm_alive_data {
|
||||
u32 scd_base_addr;
|
||||
};
|
||||
|
||||
static inline const struct fw_img *
|
||||
iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
|
||||
{
|
||||
if (ucode_type >= IWL_UCODE_TYPE_MAX)
|
||||
return NULL;
|
||||
|
||||
return &mvm->fw->img[ucode_type];
|
||||
}
|
||||
|
||||
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
|
||||
{
|
||||
struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
|
||||
@@ -592,9 +583,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
|
||||
!(fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
|
||||
fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
|
||||
fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
|
||||
else
|
||||
fw = iwl_get_ucode_image(mvm, ucode_type);
|
||||
fw = iwl_get_ucode_image(mvm->fw, ucode_type);
|
||||
if (WARN_ON(!fw))
|
||||
return -EINVAL;
|
||||
mvm->cur_ucode = ucode_type;
|
||||
@@ -838,6 +829,59 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
|
||||
int i;
|
||||
|
||||
mvm->shared_mem_cfg.num_txfifo_entries =
|
||||
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
|
||||
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
|
||||
mvm->shared_mem_cfg.txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->txfifo_size[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
|
||||
mvm->shared_mem_cfg.rxfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->rxfifo_size[i]);
|
||||
|
||||
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
|
||||
sizeof(mem_cfg->internal_txfifo_size));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
|
||||
i++)
|
||||
mvm->shared_mem_cfg.internal_txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
|
||||
}
|
||||
|
||||
static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
|
||||
int i;
|
||||
|
||||
mvm->shared_mem_cfg.num_txfifo_entries =
|
||||
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
|
||||
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
|
||||
mvm->shared_mem_cfg.txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->txfifo_size[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
|
||||
mvm->shared_mem_cfg.rxfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->rxfifo_size[i]);
|
||||
|
||||
/* new API has more data, from rxfifo_addr field and on */
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
|
||||
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
|
||||
sizeof(mem_cfg->internal_txfifo_size));
|
||||
|
||||
for (i = 0;
|
||||
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
|
||||
i++)
|
||||
mvm->shared_mem_cfg.internal_txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_host_cmd cmd = {
|
||||
@@ -845,9 +889,7 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
|
||||
.data = { NULL, },
|
||||
.len = { 0, },
|
||||
};
|
||||
struct iwl_shared_mem_cfg *mem_cfg;
|
||||
struct iwl_rx_packet *pkt;
|
||||
u32 i;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
@@ -861,45 +903,10 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
|
||||
return;
|
||||
|
||||
pkt = cmd.resp_pkt;
|
||||
mem_cfg = (void *)pkt->data;
|
||||
|
||||
mvm->shared_mem_cfg.shared_mem_addr =
|
||||
le32_to_cpu(mem_cfg->shared_mem_addr);
|
||||
mvm->shared_mem_cfg.shared_mem_size =
|
||||
le32_to_cpu(mem_cfg->shared_mem_size);
|
||||
mvm->shared_mem_cfg.sample_buff_addr =
|
||||
le32_to_cpu(mem_cfg->sample_buff_addr);
|
||||
mvm->shared_mem_cfg.sample_buff_size =
|
||||
le32_to_cpu(mem_cfg->sample_buff_size);
|
||||
mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
|
||||
mvm->shared_mem_cfg.txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->txfifo_size[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
|
||||
mvm->shared_mem_cfg.rxfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->rxfifo_size[i]);
|
||||
mvm->shared_mem_cfg.page_buff_addr =
|
||||
le32_to_cpu(mem_cfg->page_buff_addr);
|
||||
mvm->shared_mem_cfg.page_buff_size =
|
||||
le32_to_cpu(mem_cfg->page_buff_size);
|
||||
|
||||
/* new API has more data */
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
|
||||
mvm->shared_mem_cfg.rxfifo_addr =
|
||||
le32_to_cpu(mem_cfg->rxfifo_addr);
|
||||
mvm->shared_mem_cfg.internal_txfifo_addr =
|
||||
le32_to_cpu(mem_cfg->internal_txfifo_addr);
|
||||
|
||||
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
|
||||
sizeof(mem_cfg->internal_txfifo_size));
|
||||
|
||||
for (i = 0;
|
||||
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
|
||||
i++)
|
||||
mvm->shared_mem_cfg.internal_txfifo_size[i] =
|
||||
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
|
||||
}
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_mvm_parse_shared_mem_a000(mvm, pkt);
|
||||
else
|
||||
iwl_mvm_parse_shared_mem(mvm, pkt);
|
||||
|
||||
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
|
||||
|
||||
|
@@ -604,16 +604,9 @@ enum iwl_mvm_tdls_cs_state {
|
||||
};
|
||||
|
||||
struct iwl_mvm_shared_mem_cfg {
|
||||
u32 shared_mem_addr;
|
||||
u32 shared_mem_size;
|
||||
u32 sample_buff_addr;
|
||||
u32 sample_buff_size;
|
||||
u32 txfifo_addr;
|
||||
int num_txfifo_entries;
|
||||
u32 txfifo_size[TX_FIFO_MAX_NUM];
|
||||
u32 rxfifo_size[RX_FIFO_MAX_NUM];
|
||||
u32 page_buff_addr;
|
||||
u32 page_buff_size;
|
||||
u32 rxfifo_addr;
|
||||
u32 internal_txfifo_addr;
|
||||
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
|
||||
};
|
||||
|
@@ -1672,7 +1672,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
|
||||
else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
|
||||
pkt->hdr.group_id == DATA_PATH_GROUP))
|
||||
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
|
||||
else
|
||||
else if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
|
||||
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
|
||||
}
|
||||
|
||||
|
@@ -452,10 +452,10 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
|
||||
u16 sn = 0, index = 0;
|
||||
bool expired = false;
|
||||
|
||||
spin_lock_bh(&buf->lock);
|
||||
spin_lock(&buf->lock);
|
||||
|
||||
if (!buf->num_stored || buf->removed) {
|
||||
spin_unlock_bh(&buf->lock);
|
||||
spin_unlock(&buf->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -492,7 +492,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
|
||||
buf->reorder_time[index] +
|
||||
1 + RX_REORDER_BUF_TIMEOUT_MQ);
|
||||
}
|
||||
spin_unlock_bh(&buf->lock);
|
||||
spin_unlock(&buf->lock);
|
||||
}
|
||||
|
||||
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
|
||||
@@ -503,7 +503,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
|
||||
struct iwl_mvm_reorder_buffer *reorder_buf;
|
||||
u8 baid = data->baid;
|
||||
|
||||
if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
|
||||
if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@@ -588,9 +588,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
ret);
|
||||
|
||||
/* Make sure the SCD wrptr is correctly set before reconfiguring */
|
||||
iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
|
||||
cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
|
||||
ssn, wdg_timeout);
|
||||
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
|
||||
|
||||
/* Update the TID "owner" of the queue */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
@@ -1498,9 +1496,31 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
|
||||
|
||||
/* If DQA is supported - the queues can be disabled now */
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
u8 reserved_txq = mvm_sta->reserved_queue;
|
||||
enum iwl_mvm_queue_status *status;
|
||||
|
||||
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
|
||||
|
||||
/*
|
||||
* If no traffic has gone through the reserved TXQ - it
|
||||
* is still marked as IWL_MVM_QUEUE_RESERVED, and
|
||||
* should be manually marked as free again
|
||||
*/
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
status = &mvm->queue_info[reserved_txq].status;
|
||||
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
|
||||
(*status != IWL_MVM_QUEUE_FREE),
|
||||
"sta_id %d reserved txq %d status %d",
|
||||
mvm_sta->sta_id, reserved_txq, *status)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*status = IWL_MVM_QUEUE_FREE;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
mvmvif->ap_sta_id == mvm_sta->sta_id) {
|
||||
/* if associated - we can't remove the AP STA now */
|
||||
@@ -2030,11 +2050,9 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
baid_data->baid = baid;
|
||||
baid_data->timeout = timeout;
|
||||
baid_data->last_rx = jiffies;
|
||||
init_timer(&baid_data->session_timer);
|
||||
baid_data->session_timer.function =
|
||||
iwl_mvm_rx_agg_session_expired;
|
||||
baid_data->session_timer.data =
|
||||
(unsigned long)&mvm->baid_map[baid];
|
||||
setup_timer(&baid_data->session_timer,
|
||||
iwl_mvm_rx_agg_session_expired,
|
||||
(unsigned long)&mvm->baid_map[baid]);
|
||||
baid_data->mvm = mvm;
|
||||
baid_data->tid = tid;
|
||||
baid_data->sta_id = mvm_sta->sta_id;
|
||||
|
@@ -920,9 +920,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
tid = IWL_MAX_TID_COUNT;
|
||||
}
|
||||
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
if (iwl_mvm_is_dqa_supported(mvm)) {
|
||||
txq_id = mvmsta->tid_data[tid].txq_id;
|
||||
|
||||
if (ieee80211_is_mgmt(fc))
|
||||
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
|
||||
}
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(tx_cmd->hdr, hdr, hdrlen);
|
||||
|
||||
@@ -1100,9 +1104,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Can continue DELBA flow ssn = next_recl = %d\n",
|
||||
tid_data->next_reclaimed);
|
||||
iwl_mvm_disable_txq(mvm, tid_data->txq_id,
|
||||
vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
|
||||
CMD_ASYNC);
|
||||
if (!iwl_mvm_is_dqa_supported(mvm)) {
|
||||
u8 mac80211_ac = tid_to_mac80211_ac[tid];
|
||||
|
||||
iwl_mvm_disable_txq(mvm, tid_data->txq_id,
|
||||
vif->hw_queue[mac80211_ac], tid,
|
||||
CMD_ASYNC);
|
||||
}
|
||||
tid_data->state = IWL_AGG_OFF;
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
break;
|
||||
@@ -1580,41 +1588,16 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
|
||||
}
|
||||
|
||||
static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
|
||||
struct iwl_mvm_ba_notif *ba_notif,
|
||||
struct iwl_mvm_tid_data *tid_data)
|
||||
static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
|
||||
int txq, int index,
|
||||
struct ieee80211_tx_info *ba_info, u32 rate)
|
||||
{
|
||||
info->flags |= IEEE80211_TX_STAT_AMPDU;
|
||||
info->status.ampdu_ack_len = ba_notif->txed_2_done;
|
||||
info->status.ampdu_len = ba_notif->txed;
|
||||
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
|
||||
info);
|
||||
/* TODO: not accounted if the whole A-MPDU failed */
|
||||
info->status.tx_time = tid_data->tx_time;
|
||||
info->status.status_driver_data[0] =
|
||||
(void *)(uintptr_t)ba_notif->reduced_txp;
|
||||
info->status.status_driver_data[1] =
|
||||
(void *)(uintptr_t)tid_data->rate_n_flags;
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
|
||||
struct sk_buff_head reclaimed_skbs;
|
||||
struct iwl_mvm_tid_data *tid_data;
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
struct sk_buff *skb;
|
||||
int sta_id, tid, freed;
|
||||
/* "flow" corresponds to Tx queue */
|
||||
u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
|
||||
/* "ssn" is start of block-ack Tx window, corresponds to index
|
||||
* (in Tx queue's circular buffer) of first TFD/frame in window */
|
||||
u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
|
||||
|
||||
sta_id = ba_notif->sta_id;
|
||||
tid = ba_notif->tid;
|
||||
int freed;
|
||||
|
||||
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
|
||||
tid >= IWL_MAX_TID_COUNT,
|
||||
@@ -1634,10 +1617,10 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
tid_data = &mvmsta->tid_data[tid];
|
||||
|
||||
if (tid_data->txq_id != scd_flow) {
|
||||
if (tid_data->txq_id != txq) {
|
||||
IWL_ERR(mvm,
|
||||
"invalid BA notification: Q %d, tid %d, flow %d\n",
|
||||
tid_data->txq_id, tid, scd_flow);
|
||||
"invalid BA notification: Q %d, tid %d\n",
|
||||
tid_data->txq_id, tid);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
@@ -1651,27 +1634,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
* block-ack window (we assume that they've been successfully
|
||||
* transmitted ... if not, it's too late anyway).
|
||||
*/
|
||||
iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
|
||||
&reclaimed_skbs);
|
||||
iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
|
||||
|
||||
IWL_DEBUG_TX_REPLY(mvm,
|
||||
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
|
||||
(u8 *)&ba_notif->sta_addr_lo32,
|
||||
ba_notif->sta_id);
|
||||
IWL_DEBUG_TX_REPLY(mvm,
|
||||
"TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
|
||||
ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
|
||||
(unsigned long long)le64_to_cpu(ba_notif->bitmap),
|
||||
scd_flow, ba_resp_scd_ssn, ba_notif->txed,
|
||||
ba_notif->txed_2_done);
|
||||
|
||||
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
|
||||
ba_notif->reduced_txp);
|
||||
tid_data->next_reclaimed = ba_resp_scd_ssn;
|
||||
tid_data->next_reclaimed = index;
|
||||
|
||||
iwl_mvm_check_ratid_empty(mvm, sta, tid);
|
||||
|
||||
freed = 0;
|
||||
ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
|
||||
|
||||
skb_queue_walk(&reclaimed_skbs, skb) {
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
@@ -1693,8 +1663,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
|
||||
/* this is the first skb we deliver in this batch */
|
||||
/* put the rate scaling data there */
|
||||
if (freed == 1)
|
||||
iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data);
|
||||
if (freed == 1) {
|
||||
info->flags |= IEEE80211_TX_STAT_AMPDU;
|
||||
memcpy(&info->status, &ba_info->status,
|
||||
sizeof(ba_info->status));
|
||||
iwl_mvm_hwrate_to_tx_status(rate, info);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
@@ -1704,7 +1678,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
* Still it's important to update RS about sent vs. acked.
|
||||
*/
|
||||
if (skb_queue_empty(&reclaimed_skbs)) {
|
||||
struct ieee80211_tx_info ba_info = {};
|
||||
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
|
||||
|
||||
if (mvmsta->vif)
|
||||
@@ -1714,11 +1687,11 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
if (WARN_ON_ONCE(!chanctx_conf))
|
||||
goto out;
|
||||
|
||||
ba_info.band = chanctx_conf->def.chan->band;
|
||||
iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
|
||||
ba_info->band = chanctx_conf->def.chan->band;
|
||||
iwl_mvm_hwrate_to_tx_status(rate, ba_info);
|
||||
|
||||
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
|
||||
iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false);
|
||||
iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
|
||||
}
|
||||
|
||||
out:
|
||||
@@ -1730,6 +1703,92 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
int sta_id, tid, txq, index;
|
||||
struct ieee80211_tx_info ba_info = {};
|
||||
struct iwl_mvm_ba_notif *ba_notif;
|
||||
struct iwl_mvm_tid_data *tid_data;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
struct iwl_mvm_compressed_ba_notif *ba_res =
|
||||
(void *)pkt->data;
|
||||
|
||||
sta_id = ba_res->sta_id;
|
||||
ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
|
||||
ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
|
||||
ba_info.status.tx_time =
|
||||
(u16)le32_to_cpu(ba_res->wireless_time);
|
||||
ba_info.status.status_driver_data[0] =
|
||||
(void *)(uintptr_t)ba_res->reduced_txp;
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* When supporting multi TID aggregations - we need to move
|
||||
* next_reclaimed to be per TXQ and not per TID or handle it
|
||||
* in a different way.
|
||||
* This will go together with SN and AddBA offload and cannot
|
||||
* be handled properly for now.
|
||||
*/
|
||||
WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
|
||||
iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
|
||||
(int)ba_res->tfd[0].q_num,
|
||||
le16_to_cpu(ba_res->tfd[0].tfd_index),
|
||||
&ba_info, le32_to_cpu(ba_res->tx_rate));
|
||||
|
||||
IWL_DEBUG_TX_REPLY(mvm,
|
||||
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
|
||||
sta_id, le32_to_cpu(ba_res->flags),
|
||||
le16_to_cpu(ba_res->txed),
|
||||
le16_to_cpu(ba_res->done));
|
||||
return;
|
||||
}
|
||||
|
||||
ba_notif = (void *)pkt->data;
|
||||
sta_id = ba_notif->sta_id;
|
||||
tid = ba_notif->tid;
|
||||
/* "flow" corresponds to Tx queue */
|
||||
txq = le16_to_cpu(ba_notif->scd_flow);
|
||||
/* "ssn" is start of block-ack Tx window, corresponds to index
|
||||
* (in Tx queue's circular buffer) of first TFD/frame in window */
|
||||
index = le16_to_cpu(ba_notif->scd_ssn);
|
||||
|
||||
rcu_read_lock();
|
||||
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
|
||||
if (WARN_ON_ONCE(!mvmsta)) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
tid_data = &mvmsta->tid_data[tid];
|
||||
|
||||
ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
|
||||
ba_info.status.ampdu_len = ba_notif->txed;
|
||||
ba_info.status.tx_time = tid_data->tx_time;
|
||||
ba_info.status.status_driver_data[0] =
|
||||
(void *)(uintptr_t)ba_notif->reduced_txp;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
|
||||
tid_data->rate_n_flags);
|
||||
|
||||
IWL_DEBUG_TX_REPLY(mvm,
|
||||
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
|
||||
(u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
|
||||
|
||||
IWL_DEBUG_TX_REPLY(mvm,
|
||||
"TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
|
||||
ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
|
||||
le64_to_cpu(ba_notif->bitmap), txq, index,
|
||||
ba_notif->txed, ba_notif->txed_2_done);
|
||||
|
||||
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
|
||||
ba_notif->reduced_txp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that there are transports that buffer frames before they reach
|
||||
* the firmware. This means that after flush_tx_path is called, the
|
||||
|
Reference in New Issue
Block a user