qcacmn: Add support to get stats for MLD interface

Add support to get stats for MLD interface in single netdev model

Change-Id: I223a5c003191147970be57b92e99f1df3c66b339
CRs-Fixed: 3444443
This commit is contained in:
Harsh Kumar Bijlani
2023-03-13 19:07:04 +05:30
committed by Madan Koyyalamudi
parent 8ca91ab2bf
commit af094fe095
8 changed files with 266 additions and 34 deletions

View File

@@ -184,4 +184,27 @@ static inline void cdp_mlo_update_delta_tqm(ol_txrx_soc_handle soc,
soc->ops->mlo_ops->mlo_update_delta_tqm(soc, delta_tqm);
}
/*
* cdp_mlo_get_mld_vdev_stats - Get MLD vdev stats
* @soc: soc handle
* @vdev_id: vdev_id of one of the vdev's of the MLD group
* @buf: buffer to hold vdev_stats
*
* return: QDF_STATUS
*/
static inline QDF_STATUS
cdp_mlo_get_mld_vdev_stats(ol_txrx_soc_handle soc,
uint8_t vdev_id, struct cdp_vdev_stats *buf)
{
if (!soc || !soc->ops) {
QDF_BUG(0);
return QDF_STATUS_E_FAILURE;
}
if (!soc->ops->mlo_ops || !soc->ops->mlo_ops->mlo_get_mld_vdev_stats)
return QDF_STATUS_E_FAILURE;
return soc->ops->mlo_ops->mlo_get_mld_vdev_stats(soc, vdev_id, buf);
}
#endif /*_CDP_TXRX_MLO_H_*/

View File

@@ -156,6 +156,7 @@ enum cdp_peer_txq_flush_policy {
* @mlo_update_mlo_ts_offset: update MLO timestamp offset for SOC
* @mlo_ctxt_attach: Attach DP MLO context
* @mlo_ctxt_detach: Detach DP MLO context
* @mlo_get_mld_vdev_stats: Get MLD vdev stats
*/
struct cdp_mlo_ops {
void (*mlo_soc_setup)(struct cdp_soc_t *cdp_soc,
@@ -178,6 +179,8 @@ struct cdp_mlo_ops {
uint64_t delta_tqm);
void (*mlo_update_mlo_ts_offset)(struct cdp_soc_t *soc_hdl,
uint64_t offset);
QDF_STATUS (*mlo_get_mld_vdev_stats)(struct cdp_soc_t *soc,
uint8_t vdev_id, void *buf);
};
#endif

View File

@@ -2383,7 +2383,7 @@ static void dp_txrx_set_mlo_mcast_primary_vdev_param_be(
if (be_vdev->mcast_primary) {
struct cdp_txrx_peer_params_update params = {0};
dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_mlo_mcast_reset_pri_mcast,
(void *)&be_vdev->mcast_primary,
DP_MOD_ID_TX_MCAST);

View File

@@ -516,13 +516,13 @@ void dp_mlo_partner_chips_map(struct dp_soc *soc,
void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
uint16_t peer_id);
#ifdef WLAN_MCAST_MLO
#ifdef WLAN_MLO_MULTI_CHIP
typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
struct dp_vdev *ptnr_vdev,
void *arg);
/**
* dp_mcast_mlo_iter_ptnr_vdev() - API to iterate through ptnr vdev list
* dp_mlo_iter_ptnr_vdev() - API to iterate through ptnr vdev list
* @be_soc: dp_soc_be pointer
* @be_vdev: dp_vdev_be pointer
* @func: function to be called for each peer
@@ -531,12 +531,13 @@ typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
*
* Return: None
*/
void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
struct dp_vdev_be *be_vdev,
dp_ptnr_vdev_iter_func func,
void *arg,
dp_ptnr_vdev_iter_func func, void *arg,
enum dp_mod_id mod_id);
#endif
#ifdef WLAN_MCAST_MLO
/**
* dp_mlo_get_mcast_primary_vdev() - get ref to mcast primary vdev
* @be_soc: dp_soc_be pointer

View File

@@ -659,7 +659,7 @@ dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_tx_mlo_mcast_multipass_lookup,
&mpass_buf, DP_MOD_ID_TX);
/*
@@ -694,7 +694,7 @@ dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
mpass_buf_copy.nbuf = nbuf_copy;
/* send frame on partner vdevs */
dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_tx_mlo_mcast_multipass_send,
&mpass_buf_copy, DP_MOD_ID_TX);
@@ -707,7 +707,7 @@ dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
be_vdev->seq_num++;
}
dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_tx_mlo_mcast_multipass_send,
&mpass_buf, DP_MOD_ID_TX);
dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
@@ -796,7 +796,7 @@ void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
return;
/* send frame on partner vdevs */
dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_tx_mlo_mcast_pkt_send,
nbuf, DP_MOD_ID_REINJECT);
@@ -879,7 +879,7 @@ dp_tx_mlo_mcast_send_be(struct dp_soc *soc, struct dp_vdev *vdev,
*/
qdf_nbuf_ref(nbuf);
if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf))) {
dp_mcast_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_tx_mlo_mcast_enhance_be,
nbuf, DP_MOD_ID_TX);
qdf_nbuf_free(nbuf);

View File

@@ -543,6 +543,60 @@ static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
be_soc->mlo_tstamp_offset = offset;
}
#ifdef CONFIG_MLO_SINGLE_DEV
static void dp_mlo_aggregate_mld_vdev_stats(struct dp_vdev_be *be_vdev,
struct dp_vdev *ptnr_vdev,
void *arg)
{
struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
struct cdp_vdev_stats *src_vdev_stats = &ptnr_vdev->stats;
/* Aggregate vdev ingress stats */
DP_UPDATE_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats);
/* Aggregate unmapped peers stats */
DP_UPDATE_PER_PKT_STATS(tgt_vdev_stats, src_vdev_stats);
DP_UPDATE_EXTD_STATS(tgt_vdev_stats, src_vdev_stats);
/* Aggregate associated peers stats */
dp_vdev_iterate_peer(ptnr_vdev, dp_update_vdev_stats, tgt_vdev_stats,
DP_MOD_ID_GENERIC_STATS);
}
static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
uint8_t vdev_id, void *buf)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
struct cdp_vdev_stats *vdev_stats;
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
DP_MOD_ID_GENERIC_STATS);
struct dp_vdev_be *vdev_be = NULL;
if (!vdev)
return QDF_STATUS_E_FAILURE;
vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
if (!vdev_be) {
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
return QDF_STATUS_E_FAILURE;
}
vdev_stats = (struct cdp_vdev_stats *)buf;
dp_aggregate_vdev_stats(vdev, buf);
/* Aggregate stats from partner vdevs */
dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
dp_mlo_aggregate_mld_vdev_stats, buf,
DP_MOD_ID_GENERIC_STATS);
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
return QDF_STATUS_SUCCESS;
}
#endif
static struct cdp_mlo_ops dp_mlo_ops = {
.mlo_soc_setup = dp_mlo_soc_setup,
.mlo_soc_teardown = dp_mlo_soc_teardown,
@@ -554,6 +608,9 @@ static struct cdp_mlo_ops dp_mlo_ops = {
.mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
.mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
.mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
#ifdef CONFIG_MLO_SINGLE_DEV
.mlo_get_mld_vdev_stats = dp_mlo_get_mld_vdev_stats,
#endif
};
void dp_soc_mlo_fill_params(struct dp_soc *soc,
@@ -821,8 +878,8 @@ dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
return NULL;
}
#ifdef WLAN_MCAST_MLO
void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
#ifdef WLAN_MLO_MULTI_CHIP
void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
struct dp_vdev_be *be_vdev,
dp_ptnr_vdev_iter_func func,
void *arg,
@@ -855,8 +912,10 @@ void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
}
}
qdf_export_symbol(dp_mcast_mlo_iter_ptnr_vdev);
qdf_export_symbol(dp_mlo_iter_ptnr_vdev);
#endif
#ifdef WLAN_MCAST_MLO
struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
struct dp_vdev_be *be_vdev,
enum dp_mod_id mod_id)

View File

@@ -2169,6 +2169,153 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
DP_UPDATE_11BE_STATS(_tgtobj, _srcobj); \
} while (0)
#define DP_UPDATE_INGRESS_STATS(_tgtobj, _srcobj) \
do { \
uint8_t i = 0; \
_tgtobj->tx_i.rcvd.num += _srcobj->tx_i.rcvd.num; \
_tgtobj->tx_i.rcvd.bytes += _srcobj->tx_i.rcvd.bytes; \
_tgtobj->tx_i.rcvd_in_fast_xmit_flow += \
_srcobj->tx_i.rcvd_in_fast_xmit_flow; \
for (i = 0; i < CDP_MAX_TX_DATA_RINGS; i++) { \
_tgtobj->tx_i.rcvd_per_core[i] += \
_srcobj->tx_i.rcvd_per_core[i]; \
} \
_tgtobj->tx_i.processed.num += _srcobj->tx_i.processed.num; \
_tgtobj->tx_i.processed.bytes += \
_srcobj->tx_i.processed.bytes; \
_tgtobj->tx_i.reinject_pkts.num += \
_srcobj->tx_i.reinject_pkts.num; \
_tgtobj->tx_i.reinject_pkts.bytes += \
_srcobj->tx_i.reinject_pkts.bytes; \
_tgtobj->tx_i.inspect_pkts.num += \
_srcobj->tx_i.inspect_pkts.num; \
_tgtobj->tx_i.inspect_pkts.bytes += \
_srcobj->tx_i.inspect_pkts.bytes; \
_tgtobj->tx_i.nawds_mcast.num += \
_srcobj->tx_i.nawds_mcast.num; \
_tgtobj->tx_i.nawds_mcast.bytes += \
_srcobj->tx_i.nawds_mcast.bytes; \
_tgtobj->tx_i.bcast.num += _srcobj->tx_i.bcast.num; \
_tgtobj->tx_i.bcast.bytes += _srcobj->tx_i.bcast.bytes; \
_tgtobj->tx_i.raw.raw_pkt.num += \
_srcobj->tx_i.raw.raw_pkt.num; \
_tgtobj->tx_i.raw.raw_pkt.bytes += \
_srcobj->tx_i.raw.raw_pkt.bytes; \
_tgtobj->tx_i.raw.dma_map_error += \
_srcobj->tx_i.raw.dma_map_error; \
_tgtobj->tx_i.raw.invalid_raw_pkt_datatype += \
_srcobj->tx_i.raw.invalid_raw_pkt_datatype; \
_tgtobj->tx_i.raw.num_frags_overflow_err += \
_srcobj->tx_i.raw.num_frags_overflow_err; \
_tgtobj->tx_i.sg.sg_pkt.num += _srcobj->tx_i.sg.sg_pkt.num; \
_tgtobj->tx_i.sg.sg_pkt.bytes += \
_srcobj->tx_i.sg.sg_pkt.bytes; \
_tgtobj->tx_i.sg.non_sg_pkts.num += \
_srcobj->tx_i.sg.non_sg_pkts.num; \
_tgtobj->tx_i.sg.non_sg_pkts.bytes += \
_srcobj->tx_i.sg.non_sg_pkts.bytes; \
_tgtobj->tx_i.sg.dropped_host.num += \
_srcobj->tx_i.sg.dropped_host.num; \
_tgtobj->tx_i.sg.dropped_host.bytes += \
_srcobj->tx_i.sg.dropped_host.bytes; \
_tgtobj->tx_i.sg.dropped_target += \
_srcobj->tx_i.sg.dropped_target; \
_tgtobj->tx_i.sg.dma_map_error += \
_srcobj->tx_i.sg.dma_map_error; \
_tgtobj->tx_i.mcast_en.mcast_pkt.num += \
_srcobj->tx_i.mcast_en.mcast_pkt.num; \
_tgtobj->tx_i.mcast_en.mcast_pkt.bytes += \
_srcobj->tx_i.mcast_en.mcast_pkt.bytes; \
_tgtobj->tx_i.mcast_en.dropped_map_error += \
_srcobj->tx_i.mcast_en.dropped_map_error; \
_tgtobj->tx_i.mcast_en.dropped_self_mac += \
_srcobj->tx_i.mcast_en.dropped_self_mac; \
_tgtobj->tx_i.mcast_en.dropped_send_fail += \
_srcobj->tx_i.mcast_en.dropped_send_fail; \
_tgtobj->tx_i.mcast_en.ucast += _srcobj->tx_i.mcast_en.ucast; \
_tgtobj->tx_i.mcast_en.fail_seg_alloc += \
_srcobj->tx_i.mcast_en.fail_seg_alloc; \
_tgtobj->tx_i.mcast_en.clone_fail += \
_srcobj->tx_i.mcast_en.clone_fail; \
_tgtobj->tx_i.igmp_mcast_en.igmp_rcvd += \
_srcobj->tx_i.igmp_mcast_en.igmp_rcvd; \
_tgtobj->tx_i.igmp_mcast_en.igmp_ucast_converted += \
_srcobj->tx_i.igmp_mcast_en.igmp_ucast_converted; \
_tgtobj->tx_i.dropped.desc_na.num += \
_srcobj->tx_i.dropped.desc_na.num; \
_tgtobj->tx_i.dropped.desc_na.bytes += \
_srcobj->tx_i.dropped.desc_na.bytes; \
_tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.num += \
_srcobj->tx_i.dropped.desc_na_exc_alloc_fail.num; \
_tgtobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes += \
_srcobj->tx_i.dropped.desc_na_exc_alloc_fail.bytes; \
_tgtobj->tx_i.dropped.desc_na_exc_outstand.num += \
_srcobj->tx_i.dropped.desc_na_exc_outstand.num; \
_tgtobj->tx_i.dropped.desc_na_exc_outstand.bytes += \
_srcobj->tx_i.dropped.desc_na_exc_outstand.bytes; \
_tgtobj->tx_i.dropped.exc_desc_na.num += \
_srcobj->tx_i.dropped.exc_desc_na.num; \
_tgtobj->tx_i.dropped.exc_desc_na.bytes += \
_srcobj->tx_i.dropped.exc_desc_na.bytes; \
_tgtobj->tx_i.dropped.ring_full += \
_srcobj->tx_i.dropped.ring_full; \
_tgtobj->tx_i.dropped.enqueue_fail += \
_srcobj->tx_i.dropped.enqueue_fail; \
_tgtobj->tx_i.dropped.dma_error += \
_srcobj->tx_i.dropped.dma_error; \
_tgtobj->tx_i.dropped.res_full += \
_srcobj->tx_i.dropped.res_full; \
_tgtobj->tx_i.dropped.headroom_insufficient += \
_srcobj->tx_i.dropped.headroom_insufficient; \
_tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check += \
_srcobj->tx_i.dropped.fail_per_pkt_vdev_id_check; \
_tgtobj->tx_i.dropped.drop_ingress += \
_srcobj->tx_i.dropped.drop_ingress; \
_tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path += \
_srcobj->tx_i.dropped.invalid_peer_id_in_exc_path; \
_tgtobj->tx_i.dropped.tx_mcast_drop += \
_srcobj->tx_i.dropped.tx_mcast_drop; \
_tgtobj->tx_i.dropped.fw2wbm_tx_drop += \
_srcobj->tx_i.dropped.fw2wbm_tx_drop; \
_tgtobj->tx_i.dropped.dropped_pkt.num = \
_tgtobj->tx_i.dropped.dma_error + \
_tgtobj->tx_i.dropped.ring_full + \
_tgtobj->tx_i.dropped.enqueue_fail + \
_tgtobj->tx_i.dropped.fail_per_pkt_vdev_id_check + \
_tgtobj->tx_i.dropped.desc_na.num + \
_tgtobj->tx_i.dropped.res_full + \
_tgtobj->tx_i.dropped.drop_ingress + \
_tgtobj->tx_i.dropped.headroom_insufficient + \
_tgtobj->tx_i.dropped.invalid_peer_id_in_exc_path + \
_tgtobj->tx_i.dropped.tx_mcast_drop + \
_tgtobj->tx_i.dropped.fw2wbm_tx_drop; \
_tgtobj->tx_i.dropped.dropped_pkt.bytes += \
_srcobj->tx_i.dropped.dropped_pkt.bytes; \
_tgtobj->tx_i.mesh.exception_fw += \
_srcobj->tx_i.mesh.exception_fw; \
_tgtobj->tx_i.mesh.completion_fw += \
_srcobj->tx_i.mesh.completion_fw; \
_tgtobj->tx_i.cce_classified += \
_srcobj->tx_i.cce_classified; \
_tgtobj->tx_i.cce_classified_raw += \
_srcobj->tx_i.cce_classified_raw; \
_tgtobj->tx_i.sniffer_rcvd.num += \
_srcobj->tx_i.sniffer_rcvd.num; \
_tgtobj->tx_i.sniffer_rcvd.bytes += \
_srcobj->tx_i.sniffer_rcvd.bytes; \
_tgtobj->rx_i.reo_rcvd_pkt.num += \
_srcobj->rx_i.reo_rcvd_pkt.num; \
_tgtobj->rx_i.reo_rcvd_pkt.bytes += \
_srcobj->rx_i.reo_rcvd_pkt.bytes; \
_tgtobj->rx_i.null_q_desc_pkt.num += \
_srcobj->rx_i.null_q_desc_pkt.num; \
_tgtobj->rx_i.null_q_desc_pkt.bytes += \
_srcobj->rx_i.null_q_desc_pkt.bytes; \
_tgtobj->rx_i.routed_eapol_pkt.num += \
_srcobj->rx_i.routed_eapol_pkt.num; \
_tgtobj->rx_i.routed_eapol_pkt.bytes += \
_srcobj->rx_i.routed_eapol_pkt.bytes; \
} while (0)
/**
* dp_peer_find_attach() - Allocates memory for peer objects
* @soc: SoC handle

View File

@@ -5698,7 +5698,6 @@ void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
if (!vdev || !vdev->pdev)
return;
dp_update_vdev_ingress_stats(vdev);
qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));