qcacmn: Add Support of Rx MLO Link Stats for ML Peer

Add Support to update Rx per packet path, MLO Link
stats for ML Peer

Change-Id: Ica25993126b4ce49f9e36b7b290d9887e4885155
CRs-Fixed: 3397721
这个提交包含在:
Kenvish Butani
2023-01-12 15:03:39 +05:30
提交者 Madan Koyyalamudi
父节点 3243b9e4f4
当前提交 7fdd918e7f
修改 8 个文件,包含 153 行新增90 行删除

查看文件

@@ -1106,7 +1106,8 @@ void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct cdp_tid_rx_stats *tid_stats)
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id)
{
uint16_t len;
qdf_nbuf_t nbuf_copy;
@@ -1115,7 +1116,7 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
nbuf))
return true;
if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf, link_id))
return false;
/* If the source peer in the isolation list
@@ -1135,7 +1136,8 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer,
nbuf_copy,
tid_stats))
tid_stats,
link_id))
return false;
/* Don't send packets if tx is paused */
@@ -1143,11 +1145,11 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
!dp_tx_send((struct cdp_soc_t *)soc,
ta_peer->vdev->vdev_id, nbuf_copy)) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
len);
len, link_id);
tid_stats->intrabss_cnt++;
} else {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
len);
len, link_id);
tid_stats->fail_cnt[INTRABSS_DROP]++;
dp_rx_nbuf_free(nbuf_copy);
}
@@ -1157,7 +1159,8 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
uint8_t tx_vdev_id,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct cdp_tid_rx_stats *tid_stats)
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id)
{
uint16_t len;
@@ -1174,7 +1177,7 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
if (!nbuf) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer,
rx.intra_bss.fail,
1, len);
1, len, link_id);
/* return true even though the pkt is
* not forwarded. Basically skb_unshare
* failed and we want to continue with
@@ -1192,10 +1195,10 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc,
tx_vdev_id, nbuf)) {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
len);
len, link_id);
} else {
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
len);
len, link_id);
tid_stats->fail_cnt[INTRABSS_DROP]++;
return false;
}
@@ -2297,6 +2300,7 @@ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
* @nss: Number of Spatial Streams
* @bw: BandWidth
* @pkt_type: Corresponds to preamble
* @link_id: Link Id on which packet is received
*
* To be precisely record rates, following factors are considered:
* Exclude specific frames, ARP, DHCP, ssdp, etc.
@@ -2308,7 +2312,8 @@ static void
dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
uint32_t sgi, uint32_t mcs,
uint32_t nss, uint32_t bw, uint32_t pkt_type)
uint32_t nss, uint32_t bw, uint32_t pkt_type,
uint8_t link_id)
{
uint32_t rix;
uint16_t ratecode;
@@ -2321,7 +2326,7 @@ dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
return;
}
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs, link_id);
/* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */
if (qdf_unlikely(pkt_type == DOT11_B))
@@ -2336,16 +2341,17 @@ dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
punc_mode,
&rix,
&ratecode);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps, link_id);
avg_rx_rate =
dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate,
ratekbps);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type);
dp_ath_rate_lpf(
txrx_peer->stats[link_id].extd_stats.rx.avg_rx_rate,
ratekbps);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi, link_id);
DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type, link_id);
}
#else
static inline void
@@ -2365,13 +2371,15 @@ dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
* @nbuf: received msdu buffer
* @rx_tlv_hdr: rx tlv header
* @txrx_peer: datapath txrx_peer handle
* @link_id: link id on which the packet is received
*
* Return: void
*/
static inline
void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer)
struct dp_txrx_peer *txrx_peer,
uint8_t link_id)
{
bool is_ampdu;
uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
@@ -2382,8 +2390,9 @@ void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
* Try to use ring desc instead of tlv.
*/
is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu, link_id);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu),
link_id);
sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
@@ -2398,42 +2407,47 @@ void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
hal_2_dp_pkt_type_map[pkt_type]);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1,
((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)),
link_id);
DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1);
((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)),
link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1, link_id);
/*
* only if nss > 0 and pkt_type is 11N/AC/AX,
* then increase index [nss - 1] in array counter.
*/
if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type))
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1, link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1,
hal_rx_tlv_mic_err_get(soc->hal_soc,
rx_tlv_hdr));
rx_tlv_hdr), link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1,
hal_rx_tlv_decrypt_err_get(soc->hal_soc,
rx_tlv_hdr));
rx_tlv_hdr), link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1,
link_id);
DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1,
link_id);
dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
DP_PEER_EXTD_STATS_INC(txrx_peer,
rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
1);
1, link_id);
dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
sgi, mcs, nss, bw, pkt_type);
sgi, mcs, nss, bw, pkt_type, link_id);
}
#else
static inline
void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer)
struct dp_txrx_peer *txrx_peer,
uint8_t link_id)
{
}
#endif
@@ -2441,7 +2455,7 @@ void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
#if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
static inline void
dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
qdf_nbuf_t nbuf)
qdf_nbuf_t nbuf, uint8_t link_id)
{
uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf);
@@ -2458,12 +2472,12 @@ dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
/* only count stats per lmac for MLO connection*/
DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1,
QDF_NBUF_CB_RX_PKT_LEN(nbuf),
txrx_peer->mld_peer);
txrx_peer->mld_peer, link_id);
}
#else
static inline void
dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
qdf_nbuf_t nbuf)
qdf_nbuf_t nbuf, uint8_t link_id)
{
}
#endif
@@ -2472,7 +2486,8 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer,
uint8_t ring_id,
struct cdp_tid_rx_stats *tid_stats)
struct cdp_tid_rx_stats *tid_stats,
uint8_t link_id)
{
bool is_not_amsdu;
struct dp_vdev *vdev = txrx_peer->vdev;
@@ -2484,29 +2499,36 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
qdf_nbuf_is_rx_chfrag_end(nbuf);
DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1,
msdu_len);
msdu_len, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1,
is_not_amsdu);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu);
is_not_amsdu, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1,
!is_not_amsdu, link_id);
DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1,
qdf_nbuf_is_rx_retry_flag(nbuf));
dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf);
qdf_nbuf_is_rx_retry_flag(nbuf), link_id);
dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf, link_id);
tid_stats->msdu_cnt++;
enh_flag = vdev->pdev->enhanced_stats_en;
if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
enh_flag = vdev->pdev->enhanced_stats_en;
DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag, link_id);
tid_stats->mcast_msdu_cnt++;
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len,
enh_flag, link_id);
tid_stats->bcast_msdu_cnt++;
}
} else {
DP_PEER_UC_INCC_PKT(txrx_peer, 1, msdu_len,
enh_flag, link_id);
}
txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks();
txrx_peer->stats[link_id].per_pkt_stats.rx.last_rx_ts =
qdf_system_ticks();
dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer);
dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr,
txrx_peer, link_id);
}
#ifndef WDS_VENDOR_EXTENSION