qcacmn: Add Support of Rx MLO Link Stats for ML Peer

Add Support to update Rx per packet path, MLO Link
stats for ML Peer

Change-Id: Ica25993126b4ce49f9e36b7b290d9887e4885155
CRs-Fixed: 3397721
This commit is contained in:
Kenvish Butani
2023-01-12 15:03:39 +05:30
committed by Madan Koyyalamudi
parent 3243b9e4f4
commit 7fdd918e7f
8 changed files with 153 additions and 90 deletions

View File

@@ -214,6 +214,7 @@ uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
uint32_t peer_ext_stats;
uint32_t dsf;
uint32_t l3_pad;
uint8_t link_id = 0;
DP_HIST_INIT();
@@ -651,7 +652,8 @@ done:
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
rx.raw, 1,
msdu_len);
msdu_len,
link_id);
} else {
DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
@@ -677,7 +679,8 @@ done:
if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
dp_rx_err("%pK: Policy Check Drop pkt", soc);
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.policy_check_drop, 1);
rx.policy_check_drop,
1, link_id);
tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
/* Drop & free packet */
dp_rx_nbuf_free(nbuf);
@@ -698,7 +701,7 @@ done:
if (!is_eapol) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.peer_unauth_rx_pkt_drop,
1);
1, link_id);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
@@ -717,7 +720,8 @@ done:
tid) == false) {
DP_PEER_PER_PKT_STATS_INC
(txrx_peer,
rx.multipass_rx_pkt_drop, 1);
rx.multipass_rx_pkt_drop,
1, link_id);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
@@ -731,7 +735,7 @@ done:
tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.nawds_mcast_drop,
1);
1, link_id);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
@@ -775,7 +779,7 @@ done:
}
dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
reo_ring_num, tid_stats);
reo_ring_num, tid_stats, link_id);
if (qdf_likely(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet) &&
@@ -784,7 +788,9 @@ done:
if (dp_rx_check_ap_bridge(vdev))
if (dp_rx_intrabss_fwd_be(soc, txrx_peer,
rx_tlv_hdr,
nbuf)) {
nbuf,
msdu_metadata,
link_id)) {
nbuf = next;
tid_stats->intrabss_cnt++;
continue; /* Get next desc */
@@ -811,7 +817,8 @@ done:
if (qdf_unlikely(txrx_peer->in_twt))
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
rx.to_stack_twt, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf));
QDF_NBUF_CB_RX_PKT_LEN(nbuf),
link_id);
tid_stats->delivered_to_stack++;
nbuf = next;
@@ -1121,7 +1128,8 @@ static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
struct dp_vdev *vdev,
struct dp_txrx_peer *peer,
qdf_nbuf_t nbuf)
qdf_nbuf_t nbuf,
uint8_t link_id)
{
struct dp_vdev *mcast_primary_vdev = NULL;
struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
@@ -1137,13 +1145,15 @@ bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
if (qdf_unlikely(vdev->multipass_en)) {
if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
DP_PEER_PER_PKT_STATS_INC(peer,
rx.multipass_rx_pkt_drop, 1);
rx.multipass_rx_pkt_drop,
1, link_id);
return false;
}
}
if (!peer->bss_peer) {
if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf, tid_stats))
if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf,
tid_stats, link_id))
dp_rx_err("forwarding failed");
}
@@ -1204,7 +1214,8 @@ send_pkt:
bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
struct dp_vdev *vdev,
struct dp_txrx_peer *peer,
qdf_nbuf_t nbuf)
qdf_nbuf_t nbuf,
uint8_t link_id)
{
return false;
}
@@ -1577,7 +1588,8 @@ bool
dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats)
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id)
{
if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) {
struct cdp_tx_exception_metadata tx_exc_metadata = {0};
@@ -1593,13 +1605,13 @@ dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
&tx_exc_metadata)) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
rx.intra_bss.fail, 1,
len);
len, link_id);
tid_stats->fail_cnt[INTRABSS_DROP]++;
qdf_nbuf_free(nbuf_copy);
} else {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
rx.intra_bss.pkts, 1,
len);
len, link_id);
tid_stats->intrabss_cnt++;
}
return true;
@@ -1613,7 +1625,9 @@ dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
}
bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct hal_rx_msdu_metadata msdu_metadata,
uint8_t link_id)
{
uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
@@ -1633,7 +1647,7 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
*/
if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) {
return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
nbuf, tid_stats);
nbuf, tid_stats, link_id);
}
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
@@ -1646,7 +1660,8 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
&msdu_metadata, &params)) {
ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer,
params.tx_vdev_id,
rx_tlv_hdr, nbuf, tid_stats);
rx_tlv_hdr, nbuf, tid_stats,
link_id);
}
return ret;

View File

@@ -47,13 +47,17 @@ struct dp_be_intrabss_params {
* @ta_txrx_peer: source peer entry
* @rx_tlv_hdr: start address of rx tlvs
* @nbuf: nbuf that has to be intrabss forwarded
* @msdu_metadata: msdu metadata
* @link_id: link id on which the packet is received
*
* Return: true if it is forwarded else false
*/
bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
struct dp_txrx_peer *ta_txrx_peer,
uint8_t *rx_tlv_hdr,
qdf_nbuf_t nbuf);
qdf_nbuf_t nbuf,
struct hal_rx_msdu_metadata msdu_metadata,
uint8_t link_id);
#endif
/**
@@ -62,6 +66,7 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
* @ta_txrx_peer: source txrx_peer entry
* @nbuf_copy: nbuf that has to be intrabss forwarded
* @tid_stats: tid_stats structure
* @link_id: link id on which the packet is received
*
* Return: true if it is forwarded else false
*/
@@ -69,7 +74,8 @@ bool
dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats);
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id);
void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
uint32_t *msg_word,
@@ -281,13 +287,15 @@ dp_soc_get_num_soc_be(struct dp_soc *soc)
* @vdev: DP vdev handle
* @peer: DP peer handle
* @nbuf: nbuf to be enqueued
* @link_id: link id on which the packet is received
*
* Return: true when packet sent to stack, false failure
*/
bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
struct dp_vdev *vdev,
struct dp_txrx_peer *peer,
qdf_nbuf_t nbuf);
qdf_nbuf_t nbuf,
uint8_t link_id);
/**
* dp_peer_rx_reorder_queue_setup_be() - Send reo queue setup wmi cmd to FW

View File

@@ -1106,7 +1106,8 @@ void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct cdp_tid_rx_stats *tid_stats)
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id)
{
uint16_t len;
qdf_nbuf_t nbuf_copy;
@@ -1115,7 +1116,7 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
nbuf))
return true;
if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf, link_id))
return false;
/* If the source peer in the isolation list
@@ -1135,7 +1136,8 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer,
nbuf_copy,
tid_stats))
tid_stats,
link_id))
return false;
/* Don't send packets if tx is paused */
@@ -1143,11 +1145,11 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
!dp_tx_send((struct cdp_soc_t *)soc,
ta_peer->vdev->vdev_id, nbuf_copy)) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
len);
len, link_id);
tid_stats->intrabss_cnt++;
} else {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
len);
len, link_id);
tid_stats->fail_cnt[INTRABSS_DROP]++;
dp_rx_nbuf_free(nbuf_copy);
}
@@ -1157,7 +1159,8 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
uint8_t tx_vdev_id,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct cdp_tid_rx_stats *tid_stats)
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id)
{
uint16_t len;
@@ -1174,7 +1177,7 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
if (!nbuf) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer,
rx.intra_bss.fail,
1, len);
1, len, link_id);
/* return true even though the pkt is
* not forwarded. Basically skb_unshare
* failed and we want to continue with
@@ -1192,10 +1195,10 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc,
tx_vdev_id, nbuf)) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
len);
len, link_id);
} else {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
len);
len, link_id);
tid_stats->fail_cnt[INTRABSS_DROP]++;
return false;
}
@@ -2297,6 +2300,7 @@ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
* @nss: Number of Spatial Streams
* @bw: BandWidth
* @pkt_type: Corresponds to preamble
* @link_id: Link Id on which packet is received
*
* To be precisely record rates, following factors are considered:
* Exclude specific frames, ARP, DHCP, ssdp, etc.
@@ -2308,7 +2312,8 @@ static void
dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
uint32_t sgi, uint32_t mcs,
uint32_t nss, uint32_t bw, uint32_t pkt_type)
uint32_t nss, uint32_t bw, uint32_t pkt_type,
uint8_t link_id)
{
uint32_t rix;
uint16_t ratecode;
@@ -2321,7 +2326,7 @@ dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
return;
}
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs, link_id);
/* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */
if (qdf_unlikely(pkt_type == DOT11_B))
@@ -2336,16 +2341,17 @@ dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
punc_mode,
&rix,
&ratecode);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps, link_id);
avg_rx_rate =
dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate,
ratekbps);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type);
dp_ath_rate_lpf(
txrx_peer->stats[link_id].extd_stats.rx.avg_rx_rate,
ratekbps);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type, link_id);
}
#else
static inline void
@@ -2365,13 +2371,15 @@ dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
* @nbuf: received msdu buffer
* @rx_tlv_hdr: rx tlv header
* @txrx_peer: datapath txrx_peer handle
* @link_id: link id on which the packet is received
*
* Return: void
*/
static inline
void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer)
struct dp_txrx_peer *txrx_peer,
uint8_t link_id)
{
bool is_ampdu;
uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
@@ -2382,8 +2390,9 @@ void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
* Try to use ring desc instead of tlv.
*/
is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu, link_id);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu),
link_id);
sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
@@ -2398,42 +2407,47 @@ void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
hal_2_dp_pkt_type_map[pkt_type]);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1,
((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)),
link_id);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1);
((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)),
link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1, link_id);
/*
* only if nss > 0 and pkt_type is 11N/AC/AX,
* then increase index [nss - 1] in array counter.
*/
if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type))
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1, link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1,
hal_rx_tlv_mic_err_get(soc->hal_soc,
rx_tlv_hdr));
rx_tlv_hdr), link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1,
hal_rx_tlv_decrypt_err_get(soc->hal_soc,
rx_tlv_hdr));
rx_tlv_hdr), link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1,
link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1,
link_id);
dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
DP_PEER_EXTD_STATS_INC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
1);
1, link_id);
dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
sgi, mcs, nss, bw, pkt_type);
sgi, mcs, nss, bw, pkt_type, link_id);
}
#else
static inline
void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer)
struct dp_txrx_peer *txrx_peer,
uint8_t link_id)
{
}
#endif
@@ -2441,7 +2455,7 @@ void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
#if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
static inline void
dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
qdf_nbuf_t nbuf)
qdf_nbuf_t nbuf, uint8_t link_id)
{
uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf);
@@ -2458,12 +2472,12 @@ dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
/* only count stats per lmac for MLO connection*/
DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf),
txrx_peer->mld_peer);
txrx_peer->mld_peer, link_id);
}
#else
static inline void
dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
qdf_nbuf_t nbuf)
qdf_nbuf_t nbuf, uint8_t link_id)
{
}
#endif
@@ -2472,7 +2486,8 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer,
uint8_t ring_id,
struct cdp_tid_rx_stats *tid_stats)
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id)
{
bool is_not_amsdu;
struct dp_vdev *vdev = txrx_peer->vdev;
@@ -2484,29 +2499,36 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
qdf_nbuf_is_rx_chfrag_end(nbuf);
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1,
msdu_len);
msdu_len, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1,
is_not_amsdu);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu);
is_not_amsdu, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1,
!is_not_amsdu, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1,
qdf_nbuf_is_rx_retry_flag(nbuf));
dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf);
qdf_nbuf_is_rx_retry_flag(nbuf), link_id);
dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf, link_id);
tid_stats->msdu_cnt++;
enh_flag = vdev->pdev->enhanced_stats_en;
if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
enh_flag = vdev->pdev->enhanced_stats_en;
DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag, link_id);
tid_stats->mcast_msdu_cnt++;
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len,
enh_flag, link_id);
tid_stats->bcast_msdu_cnt++;
}
} else {
DP_PEER_UC_INCC_PKT(txrx_peer, 1, msdu_len,
enh_flag, link_id);
}
txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks();
txrx_peer->stats[link_id].per_pkt_stats.rx.last_rx_ts =
qdf_system_ticks();
dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer);
dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr,
txrx_peer, link_id);
}
#ifndef WDS_VENDOR_EXTENSION

View File

@@ -358,12 +358,13 @@ bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
#ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
static inline
bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf)
qdf_nbuf_t nbuf, uint8_t link_id)
{
if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi &&
qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer,
rx.intra_bss.mdns_no_fwd, 1);
rx.intra_bss.mdns_no_fwd,
1, link_id);
return false;
}
return true;
@@ -371,7 +372,7 @@ bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
#else
static inline
bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf)
qdf_nbuf_t nbuf, uint8_t link_id)
{
return true;
}
@@ -1434,13 +1435,15 @@ bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
* @rx_tlv_hdr: start address of rx tlvs
* @nbuf: nbuf that has to be intrabss forwarded
* @tid_stats: tid stats pointer
* @link_id: link Id on which packet is received
*
* Return: bool: true if it is forwarded else false
*/
bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc,
struct dp_txrx_peer *ta_peer,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct cdp_tid_rx_stats *tid_stats);
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id);
/**
* dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
@@ -1450,6 +1453,7 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc,
* @rx_tlv_hdr: start address of rx tlvs
* @nbuf: nbuf that has to be intrabss forwarded
* @tid_stats: tid stats pointer
* @link_id: link Id on which packet is received
*
* Return: bool: true if it is forwarded else false
*/
@@ -1457,7 +1461,8 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc,
struct dp_txrx_peer *ta_peer,
uint8_t tx_vdev_id,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct cdp_tid_rx_stats *tid_stats);
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id);
/**
* dp_rx_defrag_concat() - Concatenate the fragments
@@ -2143,6 +2148,7 @@ void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
* @txrx_peer: pointer to the txrx peer object.
* @ring_id: reo dest ring number on which pkt is reaped.
* @tid_stats: per tid rx stats.
* @link_id: link Id on which packet is received
*
* update all the per msdu stats for that nbuf.
*
@@ -2152,7 +2158,8 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer,
uint8_t ring_id,
struct cdp_tid_rx_stats *tid_stats);
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id);
/**
* dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if

View File

@@ -2286,7 +2286,8 @@ struct dp_arch_ops {
(*dp_rx_intrabss_mcast_handler)(struct dp_soc *soc,
struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats);
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id);
void (*dp_rx_word_mask_subscribe)(
struct dp_soc *soc,
@@ -2329,7 +2330,8 @@ struct dp_arch_ops {
void (*dp_tx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t nbuf);
bool (*dp_rx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
struct dp_txrx_peer *peer, qdf_nbuf_t nbuf);
struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
uint8_t link_id);
bool (*dp_tx_is_mcast_primary)(struct dp_soc *soc,
struct dp_vdev *vdev);
#endif

View File

@@ -515,7 +515,8 @@ static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
bool
dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats)
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id)
{
return false;
}

View File

@@ -173,7 +173,7 @@ dp_rx_intrabss_fwd_li(struct dp_soc *soc,
*/
if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_txrx_peer->bss_peer)
return dp_rx_intrabss_mcbc_fwd(soc, ta_txrx_peer, rx_tlv_hdr,
nbuf, tid_stats);
nbuf, tid_stats, 0);
if (dp_rx_intrabss_eapol_drop_check(soc, ta_txrx_peer, rx_tlv_hdr,
nbuf))
@@ -182,7 +182,8 @@ dp_rx_intrabss_fwd_li(struct dp_soc *soc,
if (dp_rx_intrabss_ucast_check_li(soc, nbuf, ta_txrx_peer,
&msdu_metadata, &tx_vdev_id))
return dp_rx_intrabss_ucast_fwd(soc, ta_txrx_peer, tx_vdev_id,
rx_tlv_hdr, nbuf, tid_stats);
rx_tlv_hdr, nbuf, tid_stats,
0);
return false;
}
@@ -724,7 +725,8 @@ done:
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
rx.raw, 1,
msdu_len);
msdu_len,
0);
} else {
DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
@@ -756,7 +758,7 @@ done:
tid) == false) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.multipass_rx_pkt_drop,
1);
1, 0);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
@@ -766,7 +768,8 @@ done:
if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
dp_rx_err("%pK: Policy Check Drop pkt", soc);
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.policy_check_drop, 1);
rx.policy_check_drop,
1, 0);
tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
/* Drop & free packet */
dp_rx_nbuf_free(nbuf);
@@ -782,7 +785,8 @@ done:
false))) {
tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.nawds_mcast_drop, 1);
rx.nawds_mcast_drop,
1, 0);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
@@ -800,7 +804,7 @@ done:
if (!is_eapol) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.peer_unauth_rx_pkt_drop,
1);
1, 0);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
@@ -818,7 +822,7 @@ done:
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
reo_ring_num, tid_stats);
reo_ring_num, tid_stats, 0);
if (qdf_unlikely(vdev->mesh_vdev)) {
if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
@@ -863,7 +867,8 @@ done:
/* this is a looped back MCBC pkt,drop it */
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
rx.mec_drop, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf));
QDF_NBUF_CB_RX_PKT_LEN(nbuf),
0);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
@@ -907,7 +912,8 @@ done:
if (qdf_unlikely(txrx_peer->in_twt))
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
rx.to_stack_twt, 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf));
QDF_NBUF_CB_RX_PKT_LEN(nbuf),
0);
tid_stats->delivered_to_stack++;
nbuf = next;

View File

@@ -153,7 +153,9 @@ dp_rx_peer_metadata_peer_id_get_li(struct dp_soc *soc, uint32_t peer_metadata)
bool
dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
qdf_nbuf_t nbuf_copy,
struct cdp_tid_rx_stats *tid_stats);
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id);
#ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
static inline
void dp_rx_prefetch_nbuf_data(qdf_nbuf_t nbuf, qdf_nbuf_t next)