qcacmn: Handle nawds case of intra-bss forwarding

Handle nawds case of intra-bss forwarding of
multicast packets on Beryllium.

Change-Id: I05ec4d937b4b97aa2c9fb325fb2b6a197968ea2a
CRs-Fixed: 3103402
This commit is contained in:
Pavankumar Nandeshwar
2022-02-03 10:05:45 -08:00
committed by Madan Koyyalamudi
parent 8aa367acfa
commit 223693e614
9 changed files with 207 additions and 107 deletions

View File

@@ -641,7 +641,8 @@ struct cdp_tx_exception_metadata {
uint8_t tid; uint8_t tid;
uint16_t tx_encap_type; uint16_t tx_encap_type;
enum cdp_sec_type sec_type; enum cdp_sec_type sec_type;
uint8_t is_tx_sniffer; uint8_t is_tx_sniffer :1,
is_intrabss_fwd :1;
uint16_t ppdu_cookie; uint16_t ppdu_cookie;
#ifdef QCA_SUPPORT_WDS_EXTENDED #ifdef QCA_SUPPORT_WDS_EXTENDED
uint8_t is_wds_extended; uint8_t is_wds_extended;

View File

@@ -1589,6 +1589,7 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_be; arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_be;
arch_ops->dp_rx_desc_cookie_2_va = arch_ops->dp_rx_desc_cookie_2_va =
dp_rx_desc_cookie_2_va_be; dp_rx_desc_cookie_2_va_be;
arch_ops->dp_rx_intrabss_handle_nawds = dp_rx_intrabss_handle_nawds_be;
arch_ops->txrx_soc_attach = dp_soc_attach_be; arch_ops->txrx_soc_attach = dp_soc_attach_be;
arch_ops->txrx_soc_detach = dp_soc_detach_be; arch_ops->txrx_soc_detach = dp_soc_detach_be;

View File

@@ -21,6 +21,7 @@
#include "hal_hw_headers.h" #include "hal_hw_headers.h"
#include "dp_types.h" #include "dp_types.h"
#include "dp_rx.h" #include "dp_rx.h"
#include "dp_tx.h"
#include "dp_be_rx.h" #include "dp_be_rx.h"
#include "dp_peer.h" #include "dp_peer.h"
#include "hal_rx.h" #include "hal_rx.h"
@@ -1373,6 +1374,48 @@ rel_da_peer:
#endif /* WLAN_MLO_MULTI_CHIP */ #endif /* WLAN_MLO_MULTI_CHIP */
#endif /* INTRA_BSS_FWD_OFFLOAD */ #endif /* INTRA_BSS_FWD_OFFLOAD */
/*
* dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case
* @soc: core txrx main context
* @ta_txrx_peer: source txrx_peer entry
* @nbuf_copy: nbuf that has to be intrabss forwarded
* @tid_stats: tid_stats structure
*
* Return: true if it is forwarded else false
*/
bool
dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc,
struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats)
{
if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) {
struct cdp_tx_exception_metadata tx_exc_metadata = {0};
uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy);
tx_exc_metadata.peer_id = ta_txrx_peer->peer_id;
tx_exc_metadata.is_intrabss_fwd = 1;
tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID;
if (dp_tx_send_exception((struct cdp_soc_t *)soc,
ta_txrx_peer->vdev->vdev_id,
nbuf_copy,
&tx_exc_metadata)) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
rx.intra_bss.fail, 1,
len);
tid_stats->fail_cnt[INTRABSS_DROP]++;
qdf_nbuf_free(nbuf_copy);
} else {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
rx.intra_bss.pkts, 1,
len);
tid_stats->intrabss_cnt++;
}
return true;
}
return false;
}
/* /*
* dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
* pkt with DA not equal to vdev mac addr, fwd is not allowed. * pkt with DA not equal to vdev mac addr, fwd is not allowed.

View File

@@ -56,6 +56,20 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
struct hal_rx_msdu_metadata msdu_metadata); struct hal_rx_msdu_metadata msdu_metadata);
#endif #endif
/*
* dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case
* @soc: core txrx main context
* @ta_txrx_peer: source txrx_peer entry
* @nbuf_copy: nbuf that has to be intrabss forwarded
* @tid_stats: tid_stats structure
*
* Return: true if it is forwarded else false
*/
bool
dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats);
uint32_t dp_rx_process_be(struct dp_intr *int_ctx, uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num, hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
uint32_t quota); uint32_t quota);

View File

@@ -875,6 +875,10 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy); dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy,
tid_stats))
return false;
if (dp_tx_send((struct cdp_soc_t *)soc, if (dp_tx_send((struct cdp_soc_t *)soc,
ta_peer->vdev->vdev_id, nbuf_copy)) { ta_peer->vdev->vdev_id, nbuf_copy)) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,

View File

@@ -2737,6 +2737,9 @@ static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 && bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
tx_exc->ppdu_cookie == 0); tx_exc->ppdu_cookie == 0);
if (tx_exc->is_intrabss_fwd)
return true;
if (invalid_tid || invalid_encap_type || invalid_sec_type || if (invalid_tid || invalid_encap_type || invalid_sec_type ||
invalid_cookie) { invalid_cookie) {
return false; return false;
@@ -2828,6 +2831,77 @@ dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
/**
* dp_tx_nawds_handler() - NAWDS handler
*
* @soc: DP soc handle
* @vdev_id: id of DP vdev handle
* @msdu_info: msdu_info required to create HTT metadata
* @nbuf: skb
*
* This API transfers the multicast frames with the peer id
* on NAWDS enabled peer.
* Return: none
*/
static inline
void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info,
qdf_nbuf_t nbuf, uint16_t sa_peer_id)
{
struct dp_peer *peer = NULL;
qdf_nbuf_t nbuf_clone = NULL;
uint16_t peer_id = DP_INVALID_PEER;
struct dp_txrx_peer *txrx_peer;
qdf_spin_lock_bh(&vdev->peer_list_lock);
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer)
continue;
if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
peer_id = peer->peer_id;
/* Multicast packets needs to be
* dropped in case of intra bss forwarding
*/
if (sa_peer_id == peer->peer_id) {
dp_tx_debug("multicast packet");
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
tx.nawds_mcast_drop,
1);
continue;
}
nbuf_clone = qdf_nbuf_clone(nbuf);
if (!nbuf_clone) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR,
FL("nbuf clone failed"));
break;
}
nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
msdu_info, peer_id,
NULL);
if (nbuf_clone) {
dp_tx_debug("pkt send failed");
qdf_nbuf_free(nbuf_clone);
} else {
if (peer_id != DP_INVALID_PEER)
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
tx.nawds_mcast,
1, qdf_nbuf_len(nbuf));
}
}
}
qdf_spin_unlock_bh(&vdev->peer_list_lock);
}
/** /**
* dp_tx_send_exception() - Transmit a frame on a given VAP in exception path * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
* @soc: DP soc handle * @soc: DP soc handle
@@ -2935,20 +3009,36 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
*/ */
dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue); dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
/* if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
* Check exception descriptors if (qdf_unlikely(vdev->nawds_enabled)) {
*/ /*
if (dp_tx_exception_limit_check(vdev)) * This is a multicast packet
goto fail; */
dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
tx_exc_metadata->peer_id);
DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
1, qdf_nbuf_len(nbuf));
}
/* Single linear frame */ nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
/* DP_INVALID_PEER, NULL);
* If nbuf is a simple linear frame, use send_single function to } else {
* prepare direct-buffer type TCL descriptor and enqueue to TCL /*
* SRNG. There is no need to setup a MSDU extension descriptor. * Check exception descriptors
*/ */
nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, if (dp_tx_exception_limit_check(vdev))
tx_exc_metadata->peer_id, tx_exc_metadata); goto fail;
/* Single linear frame */
/*
* If nbuf is a simple linear frame, use send_single function to
* prepare direct-buffer type TCL descriptor and enqueue to TCL
* SRNG. There is no need to setup a MSDU extension descriptor.
*/
nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
tx_exc_metadata->peer_id,
tx_exc_metadata);
}
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION); dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
return nbuf; return nbuf;
@@ -3099,97 +3189,6 @@ qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
#endif #endif
/**
* dp_tx_nawds_handler() - NAWDS handler
*
* @soc: DP soc handle
* @vdev_id: id of DP vdev handle
* @msdu_info: msdu_info required to create HTT metadata
* @nbuf: skb
*
* This API transfers the multicast frames with the peer id
* on NAWDS enabled peer.
* Return: none
*/
static inline
void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
{
struct dp_peer *peer = NULL;
qdf_nbuf_t nbuf_clone = NULL;
uint16_t peer_id = DP_INVALID_PEER;
uint16_t sa_peer_id = DP_INVALID_PEER;
struct dp_ast_entry *ast_entry = NULL;
qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
struct dp_txrx_peer *txrx_peer;
if (!soc->ast_offload_support) {
if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) {
qdf_spin_lock_bh(&soc->ast_lock);
ast_entry = dp_peer_ast_hash_find_by_pdevid
(soc,
(uint8_t *)(eh->ether_shost),
vdev->pdev->pdev_id);
if (ast_entry)
sa_peer_id = ast_entry->peer_id;
qdf_spin_unlock_bh(&soc->ast_lock);
}
} else {
if ((qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) &&
qdf_nbuf_get_tx_fctx(nbuf))
sa_peer_id = *(uint32_t *)qdf_nbuf_get_tx_fctx(nbuf);
}
qdf_spin_lock_bh(&vdev->peer_list_lock);
TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer)
continue;
if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
peer_id = peer->peer_id;
/* Multicast packets needs to be
* dropped in case of intra bss forwarding
*/
if (sa_peer_id == peer->peer_id) {
dp_tx_debug("multicast packet");
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
tx.nawds_mcast_drop,
1);
continue;
}
nbuf_clone = qdf_nbuf_clone(nbuf);
if (!nbuf_clone) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR,
FL("nbuf clone failed"));
break;
}
nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
msdu_info, peer_id,
NULL);
if (nbuf_clone) {
dp_tx_debug("pkt send failed");
qdf_nbuf_free(nbuf_clone);
} else {
if (peer_id != DP_INVALID_PEER) {
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
tx.nawds_mcast,
1, qdf_nbuf_len(nbuf));
}
}
}
}
qdf_spin_unlock_bh(&vdev->peer_list_lock);
}
#ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
static inline static inline
void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf) void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
@@ -3350,8 +3349,27 @@ qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
if (qdf_unlikely(vdev->nawds_enabled)) { if (qdf_unlikely(vdev->nawds_enabled)) {
qdf_ether_header_t *eh = (qdf_ether_header_t *) qdf_ether_header_t *eh = (qdf_ether_header_t *)
qdf_nbuf_data(nbuf); qdf_nbuf_data(nbuf);
if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf); uint16_t sa_peer_id = DP_INVALID_PEER;
if (!soc->ast_offload_support &&
qdf_nbuf_get_tx_ftype(nbuf) ==
CB_FTYPE_INTRABSS_FWD) {
struct dp_ast_entry *ast_entry = NULL;
qdf_spin_lock_bh(&soc->ast_lock);
ast_entry = dp_peer_ast_hash_find_by_pdevid
(soc,
(uint8_t *)(eh->ether_shost),
vdev->pdev->pdev_id);
if (ast_entry)
sa_peer_id = ast_entry->peer_id;
qdf_spin_unlock_bh(&soc->ast_lock);
}
dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
sa_peer_id);
}
peer_id = DP_INVALID_PEER; peer_id = DP_INVALID_PEER;
DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast, DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,

View File

@@ -1728,6 +1728,12 @@ struct dp_arch_ops {
void *ring_desc, void *ring_desc,
struct dp_rx_desc **r_rx_desc); struct dp_rx_desc **r_rx_desc);
bool
(*dp_rx_intrabss_handle_nawds)(struct dp_soc *soc,
struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats);
struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc, struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
uint32_t cookie); uint32_t cookie);
uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc, uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc,

View File

@@ -527,6 +527,14 @@ static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
bool
dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats)
{
return false;
}
void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops) void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
{ {
#ifndef QCA_HOST_MODE_WIFI_DISABLED #ifndef QCA_HOST_MODE_WIFI_DISABLED
@@ -564,6 +572,7 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li; arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
arch_ops->dp_rx_desc_cookie_2_va = arch_ops->dp_rx_desc_cookie_2_va =
dp_rx_desc_cookie_2_va_li; dp_rx_desc_cookie_2_va_li;
arch_ops->dp_rx_intrabss_handle_nawds = dp_rx_intrabss_handle_nawds_li;
arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li; arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
arch_ops->dp_rx_peer_metadata_peer_id_get = arch_ops->dp_rx_peer_metadata_peer_id_get =
dp_rx_peer_metadata_peer_id_get_li; dp_rx_peer_metadata_peer_id_get_li;

View File

@@ -102,6 +102,10 @@ dp_rx_peer_metadata_peer_id_get_li(struct dp_soc *soc, uint32_t peer_metadata)
return metadata->peer_id; return metadata->peer_id;
} }
bool
dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats);
#ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
static inline static inline
void dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf, qdf_nbuf_t next) void dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf, qdf_nbuf_t next)