qcacmn: Avoid using ast_entry in intra_bss handling
Avoid using ast_entry in intra_bss handling, instead use da_peer_id directly from the msdu_metadata Change-Id: Ic586f297e8e393504d7d399cff7a67c3035aa11f
This commit is contained in:

committed by
Madan Koyyalamudi

parent
a6902b1d58
commit
146d67af95
@@ -20,7 +20,6 @@
|
|||||||
#include "hal_hw_headers.h"
|
#include "hal_hw_headers.h"
|
||||||
#include "dp_types.h"
|
#include "dp_types.h"
|
||||||
#include "dp_rx.h"
|
#include "dp_rx.h"
|
||||||
#include "dp_tx.h"
|
|
||||||
#include "dp_be_rx.h"
|
#include "dp_be_rx.h"
|
||||||
#include "dp_peer.h"
|
#include "dp_peer.h"
|
||||||
#include "hal_rx.h"
|
#include "hal_rx.h"
|
||||||
@@ -1057,16 +1056,62 @@ uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
||||||
#if defined(QCA_WIFI_WCN7850) || !defined(INTRA_BSS_FW_OFFLOAD)
|
#ifdef INTRA_BSS_FW_OFFLOAD
|
||||||
bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_peer *ta_peer,
|
static bool
|
||||||
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
|
dp_rx_intrabss_ucast_check_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
||||||
struct hal_rx_msdu_metadata msdu_metadata)
|
struct dp_peer *ta_peer,
|
||||||
|
struct hal_rx_msdu_metadata *msdu_metadata)
|
||||||
{
|
{
|
||||||
/* Hamilton V1 uses Lithium path */
|
return qdf_nbuf_is_intra_bss(nbuf);
|
||||||
return dp_rx_intrabss_fwd(soc, ta_peer, rx_tlv_hdr, nbuf,
|
|
||||||
msdu_metadata);
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
static bool
|
||||||
|
dp_rx_intrabss_ucast_check_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
||||||
|
struct dp_peer *ta_peer,
|
||||||
|
struct hal_rx_msdu_metadata *msdu_metadata)
|
||||||
|
{
|
||||||
|
uint16_t da_peer_id;
|
||||||
|
struct dp_peer *da_peer;
|
||||||
|
|
||||||
|
if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* The field da_idx here holds DA peer id
|
||||||
|
*/
|
||||||
|
da_peer_id = msdu_metadata->da_idx;
|
||||||
|
|
||||||
|
/* TA peer cannot be same as peer(DA) on which AST is present
|
||||||
|
* this indicates a change in topology and that AST entries
|
||||||
|
* are yet to be updated.
|
||||||
|
*/
|
||||||
|
if ((da_peer_id == ta_peer->peer_id) ||
|
||||||
|
(da_peer_id == HTT_INVALID_PEER))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
da_peer = dp_peer_get_ref_by_id(soc, da_peer_id,
|
||||||
|
DP_MOD_ID_RX);
|
||||||
|
if (!da_peer)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* If the source or destination peer in the isolation
|
||||||
|
* list then dont forward instead push to bridge stack.
|
||||||
|
*/
|
||||||
|
if (dp_get_peer_isolation(ta_peer) ||
|
||||||
|
dp_get_peer_isolation(da_peer) ||
|
||||||
|
(da_peer->vdev->vdev_id != ta_peer->vdev->vdev_id)) {
|
||||||
|
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (da_peer->bss_peer) {
|
||||||
|
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
/*
|
/*
|
||||||
* dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
|
* dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
|
||||||
* pkt with DA not equal to vdev mac addr, fwd is not allowed.
|
* pkt with DA not equal to vdev mac addr, fwd is not allowed.
|
||||||
@@ -1082,8 +1127,6 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_peer *ta_peer,
|
|||||||
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
|
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
|
||||||
struct hal_rx_msdu_metadata msdu_metadata)
|
struct hal_rx_msdu_metadata msdu_metadata)
|
||||||
{
|
{
|
||||||
uint16_t len;
|
|
||||||
qdf_nbuf_t nbuf_copy;
|
|
||||||
uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
|
uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
|
||||||
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
|
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
|
||||||
struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
|
struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
|
||||||
@@ -1097,80 +1140,14 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_peer *ta_peer,
|
|||||||
* like igmpsnoop decide whether to forward or not with
|
* like igmpsnoop decide whether to forward or not with
|
||||||
* Mcast enhancement.
|
* Mcast enhancement.
|
||||||
*/
|
*/
|
||||||
if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) {
|
if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer)
|
||||||
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
|
return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
|
||||||
nbuf))
|
nbuf, tid_stats);
|
||||||
return true;
|
|
||||||
|
|
||||||
if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
|
if (dp_rx_intrabss_ucast_check_be(soc, nbuf, ta_peer, &msdu_metadata))
|
||||||
return false;
|
return dp_rx_intrabss_ucast_fwd(soc, ta_peer, rx_tlv_hdr,
|
||||||
|
nbuf, tid_stats);
|
||||||
|
|
||||||
/* If the source peer in the isolation list
|
|
||||||
* then dont forward instead push to bridge stack
|
|
||||||
*/
|
|
||||||
if (dp_get_peer_isolation(ta_peer))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
nbuf_copy = qdf_nbuf_copy(nbuf);
|
|
||||||
if (!nbuf_copy)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
|
||||||
if (dp_tx_send((struct cdp_soc_t *)soc,
|
|
||||||
ta_peer->vdev->vdev_id, nbuf_copy)) {
|
|
||||||
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
|
|
||||||
tid_stats->fail_cnt[INTRABSS_DROP]++;
|
|
||||||
qdf_nbuf_free(nbuf_copy);
|
|
||||||
} else {
|
|
||||||
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
|
|
||||||
tid_stats->intrabss_cnt++;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (qdf_nbuf_is_intra_bss(nbuf)) {
|
|
||||||
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
|
|
||||||
nbuf))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
|
||||||
|
|
||||||
/* linearize the nbuf just before we send to
|
|
||||||
* dp_tx_send()
|
|
||||||
*/
|
|
||||||
if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
|
|
||||||
if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
nbuf = qdf_nbuf_unshare(nbuf);
|
|
||||||
if (!nbuf) {
|
|
||||||
DP_STATS_INC_PKT(ta_peer,
|
|
||||||
rx.intra_bss.fail, 1, len);
|
|
||||||
/* return true even though the pkt is
|
|
||||||
* not forwarded. Basically skb_unshare
|
|
||||||
* failed and we want to continue with
|
|
||||||
* next nbuf.
|
|
||||||
*/
|
|
||||||
tid_stats->fail_cnt[INTRABSS_DROP]++;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!dp_tx_send((struct cdp_soc_t *)soc,
|
|
||||||
ta_peer->vdev->vdev_id, nbuf)) {
|
|
||||||
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
|
|
||||||
len);
|
|
||||||
} else {
|
|
||||||
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
|
|
||||||
len);
|
|
||||||
tid_stats->fail_cnt[INTRABSS_DROP]++;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
|
@@ -501,7 +501,6 @@ dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
||||||
|
|
||||||
#ifndef FEATURE_WDS
|
#ifndef FEATURE_WDS
|
||||||
void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
|
void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
|
||||||
struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
|
struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
|
||||||
@@ -510,171 +509,111 @@ void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
|
* dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
|
||||||
*
|
*
|
||||||
* @soc: core txrx main context
|
* @soc: core txrx main context
|
||||||
* @ta_peer : source peer entry
|
* @ta_peer : source peer entry
|
||||||
* @rx_tlv_hdr : start address of rx tlvs
|
* @rx_tlv_hdr : start address of rx tlvs
|
||||||
* @nbuf : nbuf that has to be intrabss forwarded
|
* @nbuf : nbuf that has to be intrabss forwarded
|
||||||
|
* @tid_stats : tid stats pointer
|
||||||
*
|
*
|
||||||
* Return: bool: true if it is forwarded else false
|
* Return: bool: true if it is forwarded else false
|
||||||
*/
|
*/
|
||||||
bool
|
bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
|
||||||
dp_rx_intrabss_fwd(struct dp_soc *soc,
|
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
|
||||||
struct dp_peer *ta_peer,
|
struct cdp_tid_rx_stats *tid_stats)
|
||||||
uint8_t *rx_tlv_hdr,
|
|
||||||
qdf_nbuf_t nbuf,
|
|
||||||
struct hal_rx_msdu_metadata msdu_metadata)
|
|
||||||
{
|
{
|
||||||
uint16_t len;
|
uint16_t len;
|
||||||
uint8_t is_frag;
|
|
||||||
uint16_t da_peer_id = HTT_INVALID_PEER;
|
|
||||||
struct dp_peer *da_peer = NULL;
|
|
||||||
bool is_da_bss_peer = false;
|
|
||||||
struct dp_ast_entry *ast_entry;
|
|
||||||
qdf_nbuf_t nbuf_copy;
|
qdf_nbuf_t nbuf_copy;
|
||||||
uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
|
|
||||||
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
|
|
||||||
struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
|
|
||||||
tid_stats.tid_rx_stats[ring_id][tid];
|
|
||||||
|
|
||||||
/* check if the destination peer is available in peer table
|
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
|
||||||
* and also check if the source peer and destination peer
|
nbuf))
|
||||||
* belong to the same vap and destination peer is not bss peer.
|
return true;
|
||||||
|
|
||||||
|
if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* If the source peer in the isolation list
|
||||||
|
* then dont forward instead push to bridge stack
|
||||||
*/
|
*/
|
||||||
|
if (dp_get_peer_isolation(ta_peer))
|
||||||
|
return false;
|
||||||
|
|
||||||
if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
|
nbuf_copy = qdf_nbuf_copy(nbuf);
|
||||||
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
|
if (!nbuf_copy)
|
||||||
nbuf))
|
return false;
|
||||||
return true;
|
|
||||||
|
|
||||||
ast_entry = soc->ast_table[msdu_metadata.da_idx];
|
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
||||||
if (!ast_entry)
|
if (dp_tx_send((struct cdp_soc_t *)soc,
|
||||||
return false;
|
ta_peer->vdev->vdev_id, nbuf_copy)) {
|
||||||
|
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
|
||||||
if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
|
tid_stats->fail_cnt[INTRABSS_DROP]++;
|
||||||
ast_entry->is_active = TRUE;
|
qdf_nbuf_free(nbuf_copy);
|
||||||
return false;
|
} else {
|
||||||
}
|
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
|
||||||
|
tid_stats->intrabss_cnt++;
|
||||||
da_peer_id = ast_entry->peer_id;
|
|
||||||
|
|
||||||
if (da_peer_id == HTT_INVALID_PEER)
|
|
||||||
return false;
|
|
||||||
/* TA peer cannot be same as peer(DA) on which AST is present
|
|
||||||
* this indicates a change in topology and that AST entries
|
|
||||||
* are yet to be updated.
|
|
||||||
*/
|
|
||||||
if (da_peer_id == ta_peer->peer_id)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (ast_entry->vdev_id != ta_peer->vdev->vdev_id)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
da_peer = dp_peer_get_ref_by_id(soc, da_peer_id,
|
|
||||||
DP_MOD_ID_RX);
|
|
||||||
if (!da_peer)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* If the source or destination peer in the isolation
|
|
||||||
* list then dont forward instead push to bridge stack.
|
|
||||||
*/
|
|
||||||
if (dp_get_peer_isolation(ta_peer) ||
|
|
||||||
dp_get_peer_isolation(da_peer)) {
|
|
||||||
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
is_da_bss_peer = da_peer->bss_peer;
|
|
||||||
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
|
|
||||||
|
|
||||||
if (!is_da_bss_peer) {
|
|
||||||
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
|
||||||
is_frag = qdf_nbuf_is_frag(nbuf);
|
|
||||||
memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
|
|
||||||
|
|
||||||
/* linearize the nbuf just before we send to
|
|
||||||
* dp_tx_send()
|
|
||||||
*/
|
|
||||||
if (qdf_unlikely(is_frag)) {
|
|
||||||
if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
nbuf = qdf_nbuf_unshare(nbuf);
|
|
||||||
if (!nbuf) {
|
|
||||||
DP_STATS_INC_PKT(ta_peer,
|
|
||||||
rx.intra_bss.fail,
|
|
||||||
1,
|
|
||||||
len);
|
|
||||||
/* return true even though the pkt is
|
|
||||||
* not forwarded. Basically skb_unshare
|
|
||||||
* failed and we want to continue with
|
|
||||||
* next nbuf.
|
|
||||||
*/
|
|
||||||
tid_stats->fail_cnt[INTRABSS_DROP]++;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!dp_tx_send((struct cdp_soc_t *)soc,
|
|
||||||
ta_peer->vdev->vdev_id, nbuf)) {
|
|
||||||
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
|
|
||||||
len);
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
|
|
||||||
len);
|
|
||||||
tid_stats->fail_cnt[INTRABSS_DROP]++;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
/* if it is a broadcast pkt (eg: ARP) and it is not its own
|
|
||||||
* source, then clone the pkt and send the cloned pkt for
|
|
||||||
* intra BSS forwarding and original pkt up the network stack
|
|
||||||
* Note: how do we handle multicast pkts. do we forward
|
|
||||||
* all multicast pkts as is or let a higher layer module
|
|
||||||
* like igmpsnoop decide whether to forward or not with
|
|
||||||
* Mcast enhancement.
|
|
||||||
*/
|
|
||||||
else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
|
|
||||||
!ta_peer->bss_peer))) {
|
|
||||||
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
|
|
||||||
nbuf))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
|
|
||||||
goto end;
|
|
||||||
|
|
||||||
/* If the source peer in the isolation list
|
|
||||||
* then dont forward instead push to bridge stack
|
|
||||||
*/
|
|
||||||
if (dp_get_peer_isolation(ta_peer))
|
|
||||||
goto end;
|
|
||||||
|
|
||||||
nbuf_copy = qdf_nbuf_copy(nbuf);
|
|
||||||
if (!nbuf_copy)
|
|
||||||
goto end;
|
|
||||||
|
|
||||||
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
|
||||||
if (dp_tx_send((struct cdp_soc_t *)soc,
|
|
||||||
ta_peer->vdev->vdev_id, nbuf_copy)) {
|
|
||||||
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
|
|
||||||
tid_stats->fail_cnt[INTRABSS_DROP]++;
|
|
||||||
qdf_nbuf_free(nbuf_copy);
|
|
||||||
} else {
|
|
||||||
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
|
|
||||||
tid_stats->intrabss_cnt++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
end:
|
|
||||||
/* return false as we have to still send the original pkt
|
|
||||||
* up the stack
|
|
||||||
*/
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
|
||||||
|
*
|
||||||
|
* @soc: core txrx main context
|
||||||
|
* @ta_peer : source peer entry
|
||||||
|
* @rx_tlv_hdr : start address of rx tlvs
|
||||||
|
* @nbuf : nbuf that has to be intrabss forwarded
|
||||||
|
* @tid_stats : tid stats pointer
|
||||||
|
*
|
||||||
|
* Return: bool: true if it is forwarded else false
|
||||||
|
*/
|
||||||
|
bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
|
||||||
|
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
|
||||||
|
struct cdp_tid_rx_stats *tid_stats)
|
||||||
|
{
|
||||||
|
uint16_t len;
|
||||||
|
|
||||||
|
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
|
||||||
|
nbuf))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
||||||
|
|
||||||
|
/* linearize the nbuf just before we send to
|
||||||
|
* dp_tx_send()
|
||||||
|
*/
|
||||||
|
if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
|
||||||
|
if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
nbuf = qdf_nbuf_unshare(nbuf);
|
||||||
|
if (!nbuf) {
|
||||||
|
DP_STATS_INC_PKT(ta_peer,
|
||||||
|
rx.intra_bss.fail, 1, len);
|
||||||
|
/* return true even though the pkt is
|
||||||
|
* not forwarded. Basically skb_unshare
|
||||||
|
* failed and we want to continue with
|
||||||
|
* next nbuf.
|
||||||
|
*/
|
||||||
|
tid_stats->fail_cnt[INTRABSS_DROP]++;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dp_tx_send((struct cdp_soc_t *)soc,
|
||||||
|
ta_peer->vdev->vdev_id, nbuf)) {
|
||||||
|
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
|
||||||
|
len);
|
||||||
|
} else {
|
||||||
|
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
|
||||||
|
len);
|
||||||
|
tid_stats->fail_cnt[INTRABSS_DROP]++;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
|
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
|
||||||
|
|
||||||
#ifdef MESH_MODE_SUPPORT
|
#ifdef MESH_MODE_SUPPORT
|
||||||
|
@@ -1079,24 +1079,6 @@ void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
||||||
/*
|
|
||||||
* dp_rx_intrabss_fwd() - API for intrabss fwd. For EAPOL
|
|
||||||
* pkt with DA not equal to vdev mac addr, fwd is not allowed.
|
|
||||||
* @soc: core txrx main context
|
|
||||||
* @ta_peer: source peer entry
|
|
||||||
* @rx_tlv_hdr: start address of rx tlvs
|
|
||||||
* @nbuf: nbuf that has to be intrabss forwarded
|
|
||||||
* @msdu_metadata: msdu metadata
|
|
||||||
*
|
|
||||||
* Return: true if it is forwarded else false
|
|
||||||
*/
|
|
||||||
|
|
||||||
bool dp_rx_intrabss_fwd(struct dp_soc *soc,
|
|
||||||
struct dp_peer *ta_peer,
|
|
||||||
uint8_t *rx_tlv_hdr,
|
|
||||||
qdf_nbuf_t nbuf,
|
|
||||||
struct hal_rx_msdu_metadata msdu_metadata);
|
|
||||||
|
|
||||||
#ifdef DISABLE_EAPOL_INTRABSS_FWD
|
#ifdef DISABLE_EAPOL_INTRABSS_FWD
|
||||||
#ifdef WLAN_FEATURE_11BE_MLO
|
#ifdef WLAN_FEATURE_11BE_MLO
|
||||||
static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
|
static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
|
||||||
@@ -1159,6 +1141,15 @@ bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
#endif /* DISABLE_EAPOL_INTRABSS_FWD */
|
#endif /* DISABLE_EAPOL_INTRABSS_FWD */
|
||||||
|
|
||||||
|
bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
|
||||||
|
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
|
||||||
|
struct cdp_tid_rx_stats *tid_stats);
|
||||||
|
|
||||||
|
bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
|
||||||
|
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
|
||||||
|
struct cdp_tid_rx_stats *tid_stats);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dp_rx_defrag_concat() - Concatenate the fragments
|
* dp_rx_defrag_concat() - Concatenate the fragments
|
||||||
*
|
*
|
||||||
|
@@ -83,6 +83,103 @@ static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc,
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef QCA_HOST_MODE_WIFI_DISABLE
|
||||||
|
static bool
|
||||||
|
dp_rx_intrabss_ucast_check_li(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
||||||
|
struct dp_peer *ta_peer,
|
||||||
|
struct hal_rx_msdu_metadata *msdu_metadata)
|
||||||
|
{
|
||||||
|
uint16_t da_peer_id;
|
||||||
|
struct dp_peer *da_peer;
|
||||||
|
struct dp_ast_entry *ast_entry;
|
||||||
|
|
||||||
|
if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
ast_entry = soc->ast_table[msdu_metadata->da_idx];
|
||||||
|
if (!ast_entry)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
|
||||||
|
ast_entry->is_active = TRUE;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
da_peer_id = ast_entry->peer_id;
|
||||||
|
/* TA peer cannot be same as peer(DA) on which AST is present
|
||||||
|
* this indicates a change in topology and that AST entries
|
||||||
|
* are yet to be updated.
|
||||||
|
*/
|
||||||
|
if ((da_peer_id == ta_peer->peer_id) ||
|
||||||
|
(da_peer_id == HTT_INVALID_PEER))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
da_peer = dp_peer_get_ref_by_id(soc, da_peer_id,
|
||||||
|
DP_MOD_ID_RX);
|
||||||
|
if (!da_peer)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* If the source or destination peer in the isolation
|
||||||
|
* list then dont forward instead push to bridge stack.
|
||||||
|
*/
|
||||||
|
if (dp_get_peer_isolation(ta_peer) ||
|
||||||
|
dp_get_peer_isolation(da_peer) ||
|
||||||
|
(da_peer->vdev->vdev_id != ta_peer->vdev->vdev_id)) {
|
||||||
|
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (da_peer->bss_peer) {
|
||||||
|
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dp_rx_intrabss_fwd_li() - Implements the Intra-BSS forwarding logic
|
||||||
|
*
|
||||||
|
* @soc: core txrx main context
|
||||||
|
* @ta_peer : source peer entry
|
||||||
|
* @rx_tlv_hdr : start address of rx tlvs
|
||||||
|
* @nbuf : nbuf that has to be intrabss forwarded
|
||||||
|
*
|
||||||
|
* Return: bool: true if it is forwarded else false
|
||||||
|
*/
|
||||||
|
static bool
|
||||||
|
dp_rx_intrabss_fwd_li(struct dp_soc *soc,
|
||||||
|
struct dp_peer *ta_peer,
|
||||||
|
uint8_t *rx_tlv_hdr,
|
||||||
|
qdf_nbuf_t nbuf,
|
||||||
|
struct hal_rx_msdu_metadata msdu_metadata)
|
||||||
|
{
|
||||||
|
uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
|
||||||
|
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
|
||||||
|
struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
|
||||||
|
tid_stats.tid_rx_stats[ring_id][tid];
|
||||||
|
|
||||||
|
/* if it is a broadcast pkt (eg: ARP) and it is not its own
|
||||||
|
* source, then clone the pkt and send the cloned pkt for
|
||||||
|
* intra BSS forwarding and original pkt up the network stack
|
||||||
|
* Note: how do we handle multicast pkts. do we forward
|
||||||
|
* all multicast pkts as is or let a higher layer module
|
||||||
|
* like igmpsnoop decide whether to forward or not with
|
||||||
|
* Mcast enhancement.
|
||||||
|
*/
|
||||||
|
if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer)
|
||||||
|
return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
|
||||||
|
nbuf, tid_stats);
|
||||||
|
|
||||||
|
if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_peer, &msdu_metadata))
|
||||||
|
return dp_rx_intrabss_ucast_fwd(soc, ta_peer, rx_tlv_hdr,
|
||||||
|
nbuf, tid_stats);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dp_rx_process_li() - Brain of the Rx processing functionality
|
* dp_rx_process_li() - Brain of the Rx processing functionality
|
||||||
* Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
|
* Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
|
||||||
@@ -756,8 +853,9 @@ done:
|
|||||||
|
|
||||||
/* Intrabss-fwd */
|
/* Intrabss-fwd */
|
||||||
if (dp_rx_check_ap_bridge(vdev))
|
if (dp_rx_check_ap_bridge(vdev))
|
||||||
if (dp_rx_intrabss_fwd(soc, peer, rx_tlv_hdr,
|
if (dp_rx_intrabss_fwd_li(soc, peer, rx_tlv_hdr,
|
||||||
nbuf, msdu_metadata)) {
|
nbuf,
|
||||||
|
msdu_metadata)) {
|
||||||
nbuf = next;
|
nbuf = next;
|
||||||
tid_stats->intrabss_cnt++;
|
tid_stats->intrabss_cnt++;
|
||||||
continue; /* Get next desc */
|
continue; /* Get next desc */
|
||||||
|
Reference in New Issue
Block a user