qcacmn: Access peer and its parameters only if peer_id changes

1. avoid accessing CFG parameter in per pkt path.
2. Access peer, vdev and pdev only when current peer_id
   is different from previous peer_id.
3. Avoid HKv1 and v2 checks in WDS learning path.

Change-Id: Id7fb7a29c642480f8ca58738dbb0c92130e44ecd
This commit is contained in:
Tallapragada Kalyan
2021-11-28 23:54:05 +05:30
committed by Madan Koyyalamudi
parent f3c393d735
commit ab28074b13
3 changed files with 98 additions and 74 deletions

View File

@@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016-2022 The Linux Foundation. All rights reserved. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* *
* Permission to use, copy, modify, and/or distribute this software for * Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the * any purpose with or without fee is hereby granted, provided that the
@@ -2342,4 +2342,45 @@ qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
rx_desc_pool->buf_alignment, FALSE); rx_desc_pool->buf_alignment, FALSE);
} }
#endif #endif
static inline
struct dp_peer *dp_rx_get_peer_and_vdev(struct dp_soc *soc,
qdf_nbuf_t nbuf,
uint16_t peer_id,
bool pkt_capture_offload,
struct dp_vdev **vdev,
struct dp_pdev **rx_pdev,
uint32_t *dsf,
uint32_t *old_tid)
{
struct dp_peer *peer = NULL;
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX);
if (qdf_likely(peer)) {
*vdev = peer->vdev;
} else {
nbuf->next = NULL;
dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf,
pkt_capture_offload);
if (!pkt_capture_offload)
dp_rx_deliver_to_stack_no_peer(soc, nbuf);
goto end;
}
if (qdf_unlikely(!(*vdev))) {
qdf_nbuf_free(nbuf);
DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
goto end;
}
*rx_pdev = (*vdev)->pdev;
*dsf = (*rx_pdev)->delay_stats_flag;
*old_tid = 0xff;
end:
return peer;
}
#endif /* _DP_RX_H */ #endif /* _DP_RX_H */

View File

@@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* *
* Permission to use, copy, modify, and/or distribute this software for * Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the * any purpose with or without fee is hereby granted, provided that the
@@ -157,31 +157,10 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
struct dp_ast_entry *ast; struct dp_ast_entry *ast;
uint32_t flags = DP_AST_FLAGS_HM; uint32_t flags = DP_AST_FLAGS_HM;
uint32_t ret = 0; uint32_t ret = 0;
struct dp_pdev *pdev = ta_peer->vdev->pdev;
uint8_t wds_src_mac[QDF_MAC_ADDR_SIZE]; uint8_t wds_src_mac[QDF_MAC_ADDR_SIZE];
/* For AP mode : Do wds source port learning only if it is a if (!(is_chfrag_start && is_ad4_valid))
* 4-address mpdu return;
*
* For STA mode : Frames from RootAP backend will be in 3-address mode,
* till RootAP does the WDS source port learning; Hence in repeater/STA
* mode, we enable learning even in 3-address mode , to avoid RootAP
* backbone getting wrongly learnt as MEC on repeater
*/
if (ta_peer->vdev->opmode != wlan_op_mode_sta) {
if (!(is_chfrag_start && is_ad4_valid))
return;
} else {
/* For HKv2 Source port learing is not needed in STA mode
* as we have support in HW
*
* if sa_valid bit is set there is a AST entry added on AP VAP
* and this peer has roamed behind ROOT AP in this case proceed
* further to check for roaming
*/
if (soc->ast_override_support && !is_sa_valid)
return;
}
if (qdf_unlikely(!is_sa_valid)) { if (qdf_unlikely(!is_sa_valid)) {
qdf_mem_copy(wds_src_mac, qdf_mem_copy(wds_src_mac,
@@ -234,20 +213,14 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
* smart monitor is enabled and send add_ast command * smart monitor is enabled and send add_ast command
* to FW. * to FW.
*/ */
dp_monitor_neighbour_peer_add_ast(pdev, ta_peer, dp_monitor_neighbour_peer_add_ast(ta_peer->vdev->pdev,
ta_peer,
wds_src_mac, nbuf, wds_src_mac, nbuf,
flags); flags);
return; return;
} }
} }
if ((ast->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
(ast->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
qdf_spin_unlock_bh(&soc->ast_lock);
return;
}
/* /*
* Ensure we are updating the right AST entry by * Ensure we are updating the right AST entry by
* validating ast_idx. * validating ast_idx.
@@ -257,7 +230,12 @@ dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
if (ast->is_mapped && (ast->ast_idx == sa_idx)) if (ast->is_mapped && (ast->ast_idx == sa_idx))
ast->is_active = TRUE; ast->is_active = TRUE;
if (sa_sw_peer_id != ta_peer->peer_id) { if (ast->peer_id != ta_peer->peer_id) {
if ((ast->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
(ast->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
qdf_spin_unlock_bh(&soc->ast_lock);
return;
}
if ((ast->type != CDP_TXRX_AST_TYPE_STATIC) && if ((ast->type != CDP_TXRX_AST_TYPE_STATIC) &&
(ast->type != CDP_TXRX_AST_TYPE_SELF) && (ast->type != CDP_TXRX_AST_TYPE_SELF) &&

View File

@@ -43,13 +43,12 @@
#include "dp_rx_buffer_pool.h" #include "dp_rx_buffer_pool.h"
static inline static inline
bool is_sa_da_idx_valid(struct dp_soc *soc, uint8_t *rx_tlv_hdr, bool is_sa_da_idx_valid(uint32_t max_ast,
qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info) qdf_nbuf_t nbuf, struct hal_rx_msdu_metadata msdu_info)
{ {
if ((qdf_nbuf_is_sa_valid(nbuf) && if ((qdf_nbuf_is_sa_valid(nbuf) && (msdu_info.sa_idx > max_ast)) ||
(msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
(!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) && (!qdf_nbuf_is_da_mcbc(nbuf) && qdf_nbuf_is_da_valid(nbuf) &&
(msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) (msdu_info.da_idx > max_ast)))
return false; return false;
return true; return true;
@@ -156,13 +155,10 @@ dp_rx_intrabss_fwd_li(struct dp_soc *soc,
struct dp_peer *ta_peer, struct dp_peer *ta_peer,
uint8_t *rx_tlv_hdr, uint8_t *rx_tlv_hdr,
qdf_nbuf_t nbuf, qdf_nbuf_t nbuf,
struct hal_rx_msdu_metadata msdu_metadata) struct hal_rx_msdu_metadata msdu_metadata,
struct cdp_tid_rx_stats *tid_stats)
{ {
uint8_t tx_vdev_id; uint8_t tx_vdev_id;
uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
tid_stats.tid_rx_stats[ring_id][tid];
/* if it is a broadcast pkt (eg: ARP) and it is not its own /* if it is a broadcast pkt (eg: ARP) and it is not its own
* source, then clone the pkt and send the cloned pkt for * source, then clone the pkt and send the cloned pkt for
@@ -252,6 +248,10 @@ uint32_t dp_rx_process_li(struct dp_intr *int_ctx,
qdf_nbuf_t ebuf_tail; qdf_nbuf_t ebuf_tail;
uint8_t pkt_capture_offload = 0; uint8_t pkt_capture_offload = 0;
int max_reap_limit; int max_reap_limit;
uint32_t old_tid;
uint32_t peer_ext_stats;
uint32_t dsf;
uint32_t max_ast;
DP_HIST_INIT(); DP_HIST_INIT();
@@ -282,6 +282,12 @@ more_data:
qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info)); qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
qdf_mem_zero(head, sizeof(head)); qdf_mem_zero(head, sizeof(head));
qdf_mem_zero(tail, sizeof(tail)); qdf_mem_zero(tail, sizeof(tail));
old_tid = 0xff;
dsf = 0;
peer_ext_stats = 0;
max_ast = 0;
rx_pdev = NULL;
tid_stats = NULL;
if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) { if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
/* /*
@@ -305,6 +311,8 @@ more_data:
last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl, last_prefetched_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
num_pending); num_pending);
peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
max_ast = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
/* /*
* start reaping the buffers from reo ring and queue * start reaping the buffers from reo ring and queue
* them in per vdev queue. * them in per vdev queue.
@@ -607,12 +615,27 @@ done:
} }
if (qdf_unlikely(!peer)) { if (qdf_unlikely(!peer)) {
peer = dp_peer_get_ref_by_id(soc, peer_id, peer = dp_rx_get_peer_and_vdev(soc, nbuf, peer_id,
DP_MOD_ID_RX); pkt_capture_offload,
&vdev,
&rx_pdev, &dsf,
&old_tid);
if (qdf_unlikely(!peer) || qdf_unlikely(!vdev)) {
nbuf = next;
continue;
}
} else if (peer && peer->peer_id != peer_id) { } else if (peer && peer->peer_id != peer_id) {
dp_peer_unref_delete(peer, DP_MOD_ID_RX); dp_peer_unref_delete(peer, DP_MOD_ID_RX);
peer = dp_peer_get_ref_by_id(soc, peer_id,
DP_MOD_ID_RX); peer = dp_rx_get_peer_and_vdev(soc, nbuf, peer_id,
pkt_capture_offload,
&vdev,
&rx_pdev, &dsf,
&old_tid);
if (qdf_unlikely(!peer) || qdf_unlikely(!vdev)) {
nbuf = next;
continue;
}
} }
if (peer) { if (peer) {
@@ -625,25 +648,6 @@ done:
rx_bufs_used++; rx_bufs_used++;
if (qdf_likely(peer)) {
vdev = peer->vdev;
} else {
nbuf->next = NULL;
dp_rx_deliver_to_pkt_capture_no_peer(
soc, nbuf, pkt_capture_offload);
if (!pkt_capture_offload)
dp_rx_deliver_to_stack_no_peer(soc, nbuf);
nbuf = next;
continue;
}
if (qdf_unlikely(!vdev)) {
qdf_nbuf_free(nbuf);
nbuf = next;
DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
continue;
}
/* when hlos tid override is enabled, save tid in /* when hlos tid override is enabled, save tid in
* skb->priority * skb->priority
*/ */
@@ -651,16 +655,16 @@ done:
DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
qdf_nbuf_set_priority(nbuf, tid); qdf_nbuf_set_priority(nbuf, tid);
rx_pdev = vdev->pdev;
DP_RX_TID_SAVE(nbuf, tid); DP_RX_TID_SAVE(nbuf, tid);
if (qdf_unlikely(rx_pdev->delay_stats_flag) || if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) ||
qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
soc->wlan_cfg_ctx)) ||
dp_rx_pkt_tracepoints_enabled()) dp_rx_pkt_tracepoints_enabled())
qdf_nbuf_set_timestamp(nbuf); qdf_nbuf_set_timestamp(nbuf);
tid_stats = if (qdf_likely(old_tid != tid)) {
tid_stats =
&rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid]; &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid];
old_tid = tid;
}
/* /*
* Check if DMA completed -- msdu_done is the last bit * Check if DMA completed -- msdu_done is the last bit
@@ -850,7 +854,7 @@ done:
* Drop the packet if sa_idx and da_idx OOB or * Drop the packet if sa_idx and da_idx OOB or
* sa_sw_peerid is 0 * sa_sw_peerid is 0
*/ */
if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf, if (!is_sa_da_idx_valid(max_ast, nbuf,
msdu_metadata)) { msdu_metadata)) {
qdf_nbuf_free(nbuf); qdf_nbuf_free(nbuf);
nbuf = next; nbuf = next;
@@ -880,7 +884,8 @@ done:
if (dp_rx_check_ap_bridge(vdev)) if (dp_rx_check_ap_bridge(vdev))
if (dp_rx_intrabss_fwd_li(soc, peer, rx_tlv_hdr, if (dp_rx_intrabss_fwd_li(soc, peer, rx_tlv_hdr,
nbuf, nbuf,
msdu_metadata)) { msdu_metadata,
tid_stats)) {
nbuf = next; nbuf = next;
tid_stats->intrabss_cnt++; tid_stats->intrabss_cnt++;
continue; /* Get next desc */ continue; /* Get next desc */