|
@@ -366,6 +366,8 @@ dp_rx_wds_srcport_learn(struct dp_soc *soc,
|
|
|
uint32_t flags = IEEE80211_NODE_F_WDS_HM;
|
|
|
uint32_t ret = 0;
|
|
|
uint8_t wds_src_mac[IEEE80211_ADDR_LEN];
|
|
|
+ struct dp_ast_entry *ast;
|
|
|
+ uint16_t sa_idx;
|
|
|
|
|
|
/* Do wds source port learning only if it is a 4-address mpdu */
|
|
|
if (!(qdf_nbuf_is_rx_chfrag_start(nbuf) &&
|
|
@@ -381,32 +383,42 @@ dp_rx_wds_srcport_learn(struct dp_soc *soc,
|
|
|
wds_src_mac,
|
|
|
CDP_TXRX_AST_TYPE_WDS,
|
|
|
flags);
|
|
|
+ return;
|
|
|
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * Get the AST entry from HW SA index and mark it as active
|
|
|
- */
|
|
|
- struct dp_ast_entry *ast;
|
|
|
- uint16_t sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
|
|
|
- ast = soc->ast_table[sa_idx];
|
|
|
-
|
|
|
- /*
|
|
|
- * Ensure we are updating the right AST entry by
|
|
|
- * validating ast_idx.
|
|
|
- * There is a possibility we might arrive here without
|
|
|
- * AST MAP event , so this check is mandatory
|
|
|
- */
|
|
|
- if (ast && (ast->ast_idx == sa_idx)) {
|
|
|
- ast->is_active = TRUE;
|
|
|
- }
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Get the AST entry from HW SA index and mark it as active
|
|
|
+ */
|
|
|
+ sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
|
|
|
|
|
|
- if (ast && sa_sw_peer_id != ta_peer->peer_ids[0])
|
|
|
- dp_peer_update_ast(soc, ta_peer, ast, flags);
|
|
|
+ qdf_spin_lock_bh(&soc->ast_lock);
|
|
|
+ ast = soc->ast_table[sa_idx];
|
|
|
+
|
|
|
+ if (!ast) {
|
|
|
+ qdf_spin_unlock_bh(&soc->ast_lock);
|
|
|
+ return;
|
|
|
}
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ensure we are updating the right AST entry by
|
|
|
+ * validating ast_idx.
|
|
|
+ * There is a possibility we might arrive here without
|
|
|
+ * AST MAP event , so this check is mandatory
|
|
|
+ */
|
|
|
+ if (ast->ast_idx == sa_idx)
|
|
|
+ ast->is_active = TRUE;
|
|
|
+
|
|
|
+ /* Handle client roaming */
|
|
|
+ if (sa_sw_peer_id != ta_peer->peer_ids[0])
|
|
|
+ dp_peer_update_ast(soc, ta_peer, ast, flags);
|
|
|
+
|
|
|
+ qdf_spin_unlock_bh(&soc->ast_lock);
|
|
|
+
|
|
|
return;
|
|
|
}
|
|
|
#else
|
|
|
- static inline void
|
|
|
+static inline void
|
|
|
dp_rx_wds_srcport_learn(struct dp_soc *soc,
|
|
|
uint8_t *rx_tlv_hdr,
|
|
|
struct dp_peer *ta_peer,
|
|
@@ -417,7 +429,7 @@ dp_rx_wds_srcport_learn(struct dp_soc *soc,
|
|
|
|
|
|
uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
|
|
|
void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
|
|
|
- qdf_nbuf_t mpdu, bool mpdu_done);
|
|
|
+ qdf_nbuf_t mpdu, bool mpdu_done);
|
|
|
void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
|
|
|
|
|
|
#define DP_RX_LIST_APPEND(head, tail, elem) \
|
|
@@ -428,19 +440,19 @@ void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tl
|
|
|
qdf_nbuf_set_next((tail), (elem)); \
|
|
|
} \
|
|
|
(tail) = (elem); \
|
|
|
- qdf_nbuf_set_next((tail), NULL); \
|
|
|
-} while (0)
|
|
|
+ qdf_nbuf_set_next((tail), NULL); \
|
|
|
+ } while (0)
|
|
|
|
|
|
#ifndef BUILD_X86
|
|
|
static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
|
|
|
- qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
|
|
|
+ qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
|
|
|
{
|
|
|
return QDF_STATUS_SUCCESS;
|
|
|
}
|
|
|
#else
|
|
|
#define MAX_RETRY 100
|
|
|
static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
|
|
|
- qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
|
|
|
+ qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
|
|
|
{
|
|
|
uint32_t nbuf_retry = 0;
|
|
|
int32_t ret;
|
|
@@ -455,13 +467,13 @@ static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
|
|
|
return QDF_STATUS_SUCCESS;
|
|
|
else {
|
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
|
|
- "phy addr %pK exceded 0x50000000 trying again\n",
|
|
|
- paddr);
|
|
|
+ "phy addr %pK exceded 0x50000000 trying again\n",
|
|
|
+ paddr);
|
|
|
|
|
|
nbuf_retry++;
|
|
|
if ((*rx_netbuf)) {
|
|
|
qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
|
|
|
- QDF_DMA_BIDIRECTIONAL);
|
|
|
+ QDF_DMA_BIDIRECTIONAL);
|
|
|
/* Not freeing buffer intentionally.
|
|
|
* Observed that same buffer is getting
|
|
|
* re-allocated resulting in longer load time
|