qcacmn: Avoid using ast_entry in intra_bss handling

Avoid using ast_entry in intra_bss handling, instead use
da_peer_id directly from the msdu_metadata

Change-Id: Ic586f297e8e393504d7d399cff7a67c3035aa11f
这个提交包含在:
Pavankumar Nandeshwar
2021-10-26 04:25:38 -07:00
提交者 Madan Koyyalamudi
父节点 a6902b1d58
当前提交 146d67af95
修改 4 个文件,包含 254 行新增249 行删除

查看文件

@@ -501,7 +501,6 @@ dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
}
#ifndef QCA_HOST_MODE_WIFI_DISABLED
#ifndef FEATURE_WDS
void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
@@ -510,171 +509,111 @@ void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
#endif
/*
* dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
* dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
*
* @soc: core txrx main context
* @ta_peer : source peer entry
* @rx_tlv_hdr : start address of rx tlvs
* @nbuf : nbuf that has to be intrabss forwarded
* @tid_stats : tid stats pointer
*
* Return: bool: true if it is forwarded else false
*/
bool
dp_rx_intrabss_fwd(struct dp_soc *soc,
struct dp_peer *ta_peer,
uint8_t *rx_tlv_hdr,
qdf_nbuf_t nbuf,
struct hal_rx_msdu_metadata msdu_metadata)
bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct cdp_tid_rx_stats *tid_stats)
{
uint16_t len;
uint8_t is_frag;
uint16_t da_peer_id = HTT_INVALID_PEER;
struct dp_peer *da_peer = NULL;
bool is_da_bss_peer = false;
struct dp_ast_entry *ast_entry;
qdf_nbuf_t nbuf_copy;
uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
tid_stats.tid_rx_stats[ring_id][tid];
/* check if the destination peer is available in peer table
* and also check if the source peer and destination peer
* belong to the same vap and destination peer is not bss peer.
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
nbuf))
return true;
if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
return false;
/* If the source peer in the isolation list
* then dont forward instead push to bridge stack
*/
if (dp_get_peer_isolation(ta_peer))
return false;
if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
nbuf))
return true;
nbuf_copy = qdf_nbuf_copy(nbuf);
if (!nbuf_copy)
return false;
ast_entry = soc->ast_table[msdu_metadata.da_idx];
if (!ast_entry)
return false;
if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
ast_entry->is_active = TRUE;
return false;
}
da_peer_id = ast_entry->peer_id;
if (da_peer_id == HTT_INVALID_PEER)
return false;
/* TA peer cannot be same as peer(DA) on which AST is present
* this indicates a change in topology and that AST entries
* are yet to be updated.
*/
if (da_peer_id == ta_peer->peer_id)
return false;
if (ast_entry->vdev_id != ta_peer->vdev->vdev_id)
return false;
da_peer = dp_peer_get_ref_by_id(soc, da_peer_id,
DP_MOD_ID_RX);
if (!da_peer)
return false;
/* If the source or destination peer in the isolation
* list then dont forward instead push to bridge stack.
*/
if (dp_get_peer_isolation(ta_peer) ||
dp_get_peer_isolation(da_peer)) {
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
return false;
}
is_da_bss_peer = da_peer->bss_peer;
dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
if (!is_da_bss_peer) {
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
is_frag = qdf_nbuf_is_frag(nbuf);
memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
/* linearize the nbuf just before we send to
* dp_tx_send()
*/
if (qdf_unlikely(is_frag)) {
if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
return false;
nbuf = qdf_nbuf_unshare(nbuf);
if (!nbuf) {
DP_STATS_INC_PKT(ta_peer,
rx.intra_bss.fail,
1,
len);
/* return true even though the pkt is
* not forwarded. Basically skb_unshare
* failed and we want to continue with
* next nbuf.
*/
tid_stats->fail_cnt[INTRABSS_DROP]++;
return true;
}
}
if (!dp_tx_send((struct cdp_soc_t *)soc,
ta_peer->vdev->vdev_id, nbuf)) {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
len);
return true;
} else {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
len);
tid_stats->fail_cnt[INTRABSS_DROP]++;
return false;
}
}
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
if (dp_tx_send((struct cdp_soc_t *)soc,
ta_peer->vdev->vdev_id, nbuf_copy)) {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
tid_stats->fail_cnt[INTRABSS_DROP]++;
qdf_nbuf_free(nbuf_copy);
} else {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
tid_stats->intrabss_cnt++;
}
/* if it is a broadcast pkt (eg: ARP) and it is not its own
* source, then clone the pkt and send the cloned pkt for
* intra BSS forwarding and original pkt up the network stack
* Note: how do we handle multicast pkts. do we forward
* all multicast pkts as is or let a higher layer module
* like igmpsnoop decide whether to forward or not with
* Mcast enhancement.
*/
else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
!ta_peer->bss_peer))) {
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
nbuf))
return true;
if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
goto end;
/* If the source peer in the isolation list
* then dont forward instead push to bridge stack
*/
if (dp_get_peer_isolation(ta_peer))
goto end;
nbuf_copy = qdf_nbuf_copy(nbuf);
if (!nbuf_copy)
goto end;
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
if (dp_tx_send((struct cdp_soc_t *)soc,
ta_peer->vdev->vdev_id, nbuf_copy)) {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
tid_stats->fail_cnt[INTRABSS_DROP]++;
qdf_nbuf_free(nbuf_copy);
} else {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
tid_stats->intrabss_cnt++;
}
}
end:
/* return false as we have to still send the original pkt
* up the stack
*/
return false;
}
/*
* dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
*
* @soc: core txrx main context
* @ta_peer : source peer entry
* @rx_tlv_hdr : start address of rx tlvs
* @nbuf : nbuf that has to be intrabss forwarded
* @tid_stats : tid stats pointer
*
* Return: bool: true if it is forwarded else false
*/
bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct cdp_tid_rx_stats *tid_stats)
{
uint16_t len;
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
nbuf))
return true;
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
/* linearize the nbuf just before we send to
* dp_tx_send()
*/
if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
return false;
nbuf = qdf_nbuf_unshare(nbuf);
if (!nbuf) {
DP_STATS_INC_PKT(ta_peer,
rx.intra_bss.fail, 1, len);
/* return true even though the pkt is
* not forwarded. Basically skb_unshare
* failed and we want to continue with
* next nbuf.
*/
tid_stats->fail_cnt[INTRABSS_DROP]++;
return false;
}
}
if (!dp_tx_send((struct cdp_soc_t *)soc,
ta_peer->vdev->vdev_id, nbuf)) {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
len);
} else {
DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
len);
tid_stats->fail_cnt[INTRABSS_DROP]++;
return false;
}
return true;
}
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
#ifdef MESH_MODE_SUPPORT