qcacmn: Update stats API interface with xmit type

Update stats API interface with xmit type. Also, updates
unmap peer update mechanism.

Change-Id: Ib39dee1c74e59d5c2ff523b3eca983e54b54676b
CRs-Fixed: 3561679
This commit is contained in:
Aman Mehta
2023-07-22 02:43:15 +05:30
committed by Rahul Choudhary
parent f49654adcd
commit 6e3897bd78
6 changed files with 279 additions and 145 deletions

View File

@@ -26,6 +26,9 @@
#include <wlan_mlo_mgr_cmn.h>
#include "dp_umac_reset.h"
#define dp_aggregate_vdev_stats_for_unmapped_peers(_tgtobj, _srcobj) \
DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj)
#ifdef DP_UMAC_HW_RESET_SUPPORT
/**
* dp_umac_reset_update_partner_map() - Update Umac reset partner map
@@ -390,7 +393,7 @@ static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
static inline
void dp_aggregate_vdev_basic_stats(
struct cdp_vdev_stats *tgt_vdev_stats,
struct cdp_vdev_stats *src_vdev_stats)
struct dp_vdev_stats *src_vdev_stats)
{
DP_UPDATE_BASIC_STATS(tgt_vdev_stats, src_vdev_stats);
}
@@ -399,47 +402,37 @@ void dp_aggregate_vdev_basic_stats(
* dp_aggregate_vdev_ingress_stats() - aggregate vdev ingress stats
* @tgt_vdev_stats: target vdev buffer
* @src_vdev_stats: source vdev buffer
* @xmit_type: xmit type of packet - MLD/Link
*
* return: void
*/
static inline
void dp_aggregate_vdev_ingress_stats(
struct cdp_vdev_stats *tgt_vdev_stats,
struct cdp_vdev_stats *src_vdev_stats)
struct dp_vdev_stats *src_vdev_stats,
enum dp_pkt_xmit_type xmit_type)
{
/* Aggregate vdev ingress stats */
DP_UPDATE_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats);
}
/**
* dp_aggregate_vdev_stats_for_unmapped_peers() - aggregate unmap peer stats
* @tgt_vdev_stats: target vdev buffer
* @src_vdev_stats: source vdev buffer
*
* return: void
*/
static inline
void dp_aggregate_vdev_stats_for_unmapped_peers(
struct cdp_vdev_stats *tgt_vdev_stats,
struct cdp_vdev_stats *src_vdev_stats)
{
/* Aggregate unmapped peers stats */
DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(tgt_vdev_stats, src_vdev_stats);
DP_UPDATE_LINK_VDEV_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats,
xmit_type);
}
/**
* dp_aggregate_all_vdev_stats() - aggregate vdev ingress and unmap peer stats
* @tgt_vdev_stats: target vdev buffer
* @src_vdev_stats: source vdev buffer
* @xmit_type: xmit type of packet - MLD/Link
*
* return: void
*/
static inline
void dp_aggregate_all_vdev_stats(
struct cdp_vdev_stats *tgt_vdev_stats,
struct cdp_vdev_stats *src_vdev_stats)
struct dp_vdev_stats *src_vdev_stats,
enum dp_pkt_xmit_type xmit_type)
{
dp_aggregate_vdev_ingress_stats(tgt_vdev_stats, src_vdev_stats);
dp_aggregate_vdev_ingress_stats(tgt_vdev_stats, src_vdev_stats,
xmit_type);
dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
src_vdev_stats);
}
@@ -449,13 +442,15 @@ void dp_aggregate_all_vdev_stats(
* @be_vdev: Dp Vdev handle
* @bridge_vdev: Dp vdev handle for bridge vdev
* @arg: buffer for target vdev stats
* @xmit_type: xmit type of packet - MLD/Link
*
* return: void
*/
static
void dp_mlo_vdev_stats_aggr_bridge_vap(struct dp_vdev_be *be_vdev,
struct dp_vdev *bridge_vdev,
void *arg)
void *arg,
enum dp_pkt_xmit_type xmit_type)
{
struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
struct dp_vdev_be *bridge_be_vdev = NULL;
@@ -464,12 +459,50 @@ void dp_mlo_vdev_stats_aggr_bridge_vap(struct dp_vdev_be *be_vdev,
if (!bridge_be_vdev)
return;
dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_vdev->stats);
dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_be_vdev->mlo_stats);
dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_vdev->stats,
xmit_type);
dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
(&bridge_be_vdev->mlo_stats));
dp_vdev_iterate_peer(bridge_vdev, dp_update_vdev_stats, tgt_vdev_stats,
DP_MOD_ID_GENERIC_STATS);
}
/**
* dp_mlo_vdev_stats_aggr_bridge_vap_unified() - aggregate bridge vdev stats for
* unified mode, all MLO and legacy packets are submitted to vdev
* @be_vdev: Dp Vdev handle
* @bridge_vdev: Dp vdev handle for bridge vdev
* @arg: buffer for target vdev stats
*
* return: void
*/
static
void dp_mlo_vdev_stats_aggr_bridge_vap_unified(struct dp_vdev_be *be_vdev,
struct dp_vdev *bridge_vdev,
void *arg)
{
dp_mlo_vdev_stats_aggr_bridge_vap(be_vdev, bridge_vdev, arg,
DP_XMIT_TOTAL);
}
/**
* dp_mlo_vdev_stats_aggr_bridge_vap_mld() - aggregate bridge vdev stats for MLD
* mode, all MLO packets are submitted to MLD
* @be_vdev: Dp Vdev handle
* @bridge_vdev: Dp vdev handle for bridge vdev
* @arg: buffer for target vdev stats
*
* return: void
*/
static
void dp_mlo_vdev_stats_aggr_bridge_vap_mld(struct dp_vdev_be *be_vdev,
struct dp_vdev *bridge_vdev,
void *arg)
{
dp_mlo_vdev_stats_aggr_bridge_vap(be_vdev, bridge_vdev, arg,
DP_XMIT_MLD);
}
/**
* dp_aggregate_interface_stats_based_on_peer_type() - aggregate stats at
* VDEV level based on peer type connected to vdev
@@ -500,21 +533,21 @@ void dp_aggregate_interface_stats_based_on_peer_type(
if (peer_type == DP_PEER_TYPE_LEGACY) {
dp_aggregate_all_vdev_stats(tgt_vdev_stats,
&vdev->stats);
&vdev->stats, DP_XMIT_LINK);
} else {
if (be_vdev->mcast_primary) {
dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_mlo_vdev_stats_aggr_bridge_vap,
dp_mlo_vdev_stats_aggr_bridge_vap_mld,
(void *)vdev_stats,
DP_MOD_ID_GENERIC_STATS,
DP_BRIDGE_VDEV_ITER,
DP_VDEV_ITERATE_SKIP_SELF);
}
dp_aggregate_vdev_ingress_stats(tgt_vdev_stats,
&vdev->stats);
&vdev->stats, DP_XMIT_MLD);
dp_aggregate_vdev_stats_for_unmapped_peers(
tgt_vdev_stats,
&be_vdev->mlo_stats);
(&be_vdev->mlo_stats));
}
/* Aggregate associated peer stats */
@@ -549,14 +582,16 @@ void dp_aggregate_interface_stats(struct dp_vdev *vdev,
if (be_vdev->mcast_primary) {
dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
dp_mlo_vdev_stats_aggr_bridge_vap,
dp_mlo_vdev_stats_aggr_bridge_vap_unified,
(void *)vdev_stats, DP_MOD_ID_GENERIC_STATS,
DP_BRIDGE_VDEV_ITER,
DP_VDEV_ITERATE_SKIP_SELF);
}
dp_aggregate_all_vdev_stats(vdev_stats, &be_vdev->mlo_stats);
dp_aggregate_all_vdev_stats(vdev_stats, &vdev->stats);
dp_aggregate_vdev_stats_for_unmapped_peers(vdev_stats,
(&be_vdev->mlo_stats));
dp_aggregate_all_vdev_stats(vdev_stats, &vdev->stats,
DP_XMIT_TOTAL);
dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
DP_MOD_ID_GENERIC_STATS);
@@ -644,7 +679,8 @@ dp_aggregate_sta_interface_stats(struct dp_soc *soc,
link_peer = link_peers_info.link_peers[i];
dp_update_vdev_stats(soc, link_peer, buf);
dp_aggregate_vdev_ingress_stats((struct cdp_vdev_stats *)buf,
&link_peer->vdev->stats);
&link_peer->vdev->stats,
DP_XMIT_TOTAL);
dp_aggregate_vdev_basic_stats(
(struct cdp_vdev_stats *)buf,
&link_peer->vdev->stats);

View File

@@ -4149,7 +4149,7 @@ void dp_ipa_aggregate_vdev_stats(struct dp_vdev *vdev,
soc = vdev->pdev->soc;
dp_update_vdev_ingress_stats(vdev);
qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
dp_copy_vdev_stats_to_tgt_buf(vdev_stats, &vdev->stats, DP_XMIT_LINK);
dp_vdev_iterate_peer(vdev, dp_ipa_update_vdev_stats, vdev_stats,
DP_MOD_ID_GENERIC_STATS);
dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);

View File

@@ -8191,11 +8191,19 @@ dp_print_pdev_tx_stats(struct dp_pdev *pdev)
#if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MCAST_MLO)
void dp_print_vdev_mlo_mcast_tx_stats(struct dp_vdev *vdev)
{
uint8_t idx;
uint32_t send_pkt_count = 0;
uint32_t fail_pkt_count = 0;
for (idx = 0; idx < DP_INGRESS_STATS_MAX_SIZE; idx++) {
send_pkt_count +=
vdev->stats.tx_i[idx].mlo_mcast.send_pkt_count;
fail_pkt_count +=
vdev->stats.tx_i[idx].mlo_mcast.fail_pkt_count;
}
DP_PRINT_STATS("MLO MCAST TX stats:");
DP_PRINT_STATS(" send packet count = %u",
vdev->stats.tx_i.mlo_mcast.send_pkt_count);
DP_PRINT_STATS(" failed packet count = %u",
vdev->stats.tx_i.mlo_mcast.fail_pkt_count);
DP_PRINT_STATS(" send packet count = %u", send_pkt_count);
DP_PRINT_STATS(" failed packet count = %u", fail_pkt_count);
}
#endif
@@ -8920,14 +8928,21 @@ static inline struct dp_peer *dp_get_stats_peer(struct dp_peer *peer)
}
#endif
/**
* dp_update_vdev_basic_stats() - Update vdev basic stats
* @txrx_peer: DP txrx_peer handle
* @tgtobj: Pointer to buffer for vdev stats
*
* Return: None
*/
static inline
void dp_update_vdev_be_basic_stats(struct dp_txrx_peer *txrx_peer,
struct dp_vdev_stats *tgtobj)
{
if (qdf_unlikely(!txrx_peer || !tgtobj))
return;
if (!dp_peer_get_hw_txrx_stats_en(txrx_peer)) {
tgtobj->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
tgtobj->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
tgtobj->tx.tx_failed += txrx_peer->tx_failed;
}
tgtobj->rx.to_stack.num += txrx_peer->to_stack.num;
tgtobj->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
}
void dp_update_vdev_basic_stats(struct dp_txrx_peer *txrx_peer,
struct cdp_vdev_stats *tgtobj)
{
@@ -8973,47 +8988,37 @@ void dp_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
}
link_stats:
dp_monitor_peer_get_stats(soc, srcobj, vdev_stats, UPDATE_VDEV_STATS);
dp_monitor_peer_get_stats(soc, srcobj, vdev_stats, UPDATE_VDEV_STATS_MLD);
}
void dp_get_vdev_stats_for_unmap_peer_legacy(struct dp_vdev *vdev,
struct dp_peer *peer)
{
struct dp_txrx_peer *txrx_peer = dp_get_txrx_peer(peer);
struct dp_vdev_stats *vdev_stats = &vdev->stats;
struct dp_soc *soc = vdev->pdev->soc;
struct dp_peer_per_pkt_stats *per_pkt_stats;
if (!txrx_peer)
goto link_stats;
dp_peer_aggregate_tid_stats(peer);
per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats;
dp_update_vdev_be_basic_stats(txrx_peer, vdev_stats);
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
link_stats:
dp_monitor_peer_get_stats(soc, peer, vdev_stats, UPDATE_VDEV_STATS);
}
void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
struct dp_peer *peer)
{
struct dp_soc *soc = vdev->pdev->soc;
struct dp_txrx_peer *txrx_peer;
struct dp_peer_per_pkt_stats *per_pkt_stats;
struct cdp_vdev_stats *vdev_stats = &vdev->stats;
uint8_t link_id = 0;
struct dp_pdev *pdev = vdev->pdev;
if (soc->arch_ops.dp_get_vdev_stats_for_unmap_peer)
soc->arch_ops.dp_get_vdev_stats_for_unmap_peer(vdev,
peer,
&vdev_stats);
txrx_peer = dp_get_txrx_peer(peer);
if (!txrx_peer)
goto link_stats;
dp_peer_aggregate_tid_stats(peer);
if (!IS_MLO_DP_LINK_PEER(peer)) {
per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats;
dp_update_vdev_basic_stats(txrx_peer, vdev_stats);
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
}
if (IS_MLO_DP_LINK_PEER(peer)) {
link_id = dp_get_peer_hw_link_id(soc, pdev);
if (link_id > 0) {
per_pkt_stats =
&txrx_peer->stats[link_id].per_pkt_stats;
DP_UPDATE_PER_PKT_STATS(vdev_stats, per_pkt_stats);
}
}
link_stats:
dp_monitor_peer_get_stats(soc, peer, vdev_stats, UPDATE_VDEV_STATS);
soc->arch_ops.dp_get_vdev_stats_for_unmap_peer(vdev, peer);
}
#else
void dp_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
@@ -9053,7 +9058,7 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
struct dp_txrx_peer *txrx_peer;
struct dp_peer_per_pkt_stats *per_pkt_stats;
struct dp_peer_extd_stats *extd_stats;
struct cdp_vdev_stats *vdev_stats = &vdev->stats;
struct dp_vdev_stats *vdev_stats = &vdev->stats;
uint8_t inx = 0;
uint8_t stats_arr_size = 0;
@@ -9062,7 +9067,7 @@ void dp_update_vdev_stats_on_peer_unmap(struct dp_vdev *vdev,
return;
stats_arr_size = txrx_peer->stats_arr_size;
dp_update_vdev_basic_stats(txrx_peer, vdev_stats);
dp_update_vdev_be_basic_stats(txrx_peer, vdev_stats);
for (inx = 0; inx < stats_arr_size; inx++) {
per_pkt_stats = &txrx_peer->stats[inx].per_pkt_stats;
@@ -9358,22 +9363,58 @@ void dp_update_pdev_stats(struct dp_pdev *tgtobj,
void dp_update_vdev_ingress_stats(struct dp_vdev *tgtobj)
{
tgtobj->stats.tx_i.dropped.dropped_pkt.num =
tgtobj->stats.tx_i.dropped.dma_error +
tgtobj->stats.tx_i.dropped.ring_full +
tgtobj->stats.tx_i.dropped.enqueue_fail +
tgtobj->stats.tx_i.dropped.fail_per_pkt_vdev_id_check +
tgtobj->stats.tx_i.dropped.desc_na.num +
tgtobj->stats.tx_i.dropped.res_full +
tgtobj->stats.tx_i.dropped.drop_ingress +
tgtobj->stats.tx_i.dropped.headroom_insufficient +
tgtobj->stats.tx_i.dropped.invalid_peer_id_in_exc_path +
tgtobj->stats.tx_i.dropped.tx_mcast_drop +
tgtobj->stats.tx_i.dropped.fw2wbm_tx_drop;
uint8_t idx;
for (idx = 0; idx < DP_INGRESS_STATS_MAX_SIZE; idx++) {
tgtobj->stats.tx_i[idx].dropped.dropped_pkt.num +=
tgtobj->stats.tx_i[idx].dropped.dma_error +
tgtobj->stats.tx_i[idx].dropped.ring_full +
tgtobj->stats.tx_i[idx].dropped.enqueue_fail +
tgtobj->stats.tx_i[idx].dropped.fail_per_pkt_vdev_id_check +
tgtobj->stats.tx_i[idx].dropped.desc_na.num +
tgtobj->stats.tx_i[idx].dropped.res_full +
tgtobj->stats.tx_i[idx].dropped.drop_ingress +
tgtobj->stats.tx_i[idx].dropped.headroom_insufficient +
tgtobj->stats.tx_i[idx].dropped.invalid_peer_id_in_exc_path +
tgtobj->stats.tx_i[idx].dropped.tx_mcast_drop +
tgtobj->stats.tx_i[idx].dropped.fw2wbm_tx_drop;
}
}
#ifdef HW_TX_DELAY_STATS_ENABLE
static inline
void dp_update_hw_tx_delay_stats(struct cdp_vdev_stats *vdev_stats,
struct dp_vdev_stats *stats)
{
qdf_mem_copy(&vdev_stats->tid_tx_stats, &stats->tid_tx_stats,
sizeof(stats->tid_tx_stats));
}
#else
static inline
void dp_update_hw_tx_delay_stats(struct cdp_vdev_stats *vdev_stats,
struct dp_vdev_stats *stats)
{
}
#endif
void dp_copy_vdev_stats_to_tgt_buf(struct cdp_vdev_stats *vdev_stats,
struct dp_vdev_stats *stats,
enum dp_pkt_xmit_type xmit_type)
{
DP_UPDATE_LINK_VDEV_INGRESS_STATS(vdev_stats, stats, xmit_type);
qdf_mem_copy(&vdev_stats->rx_i, &stats->rx_i, sizeof(stats->rx_i));
qdf_mem_copy(&vdev_stats->tx, &stats->tx, sizeof(stats->tx));
qdf_mem_copy(&vdev_stats->rx, &stats->rx, sizeof(stats->rx));
qdf_mem_copy(&vdev_stats->tso_stats, &stats->tso_stats,
sizeof(stats->tso_stats));
dp_update_hw_tx_delay_stats(vdev_stats, stats);
}
void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj,
struct cdp_vdev_stats *srcobj)
struct dp_vdev_stats *srcobj)
{
tgtobj->tx.last_tx_rate = srcobj->tx.last_tx_rate;
tgtobj->tx.last_tx_rate_mcs = srcobj->tx.last_tx_rate_mcs;
@@ -9385,50 +9426,69 @@ void dp_update_vdev_rate_stats(struct cdp_vdev_stats *tgtobj,
void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
struct dp_vdev *srcobj)
{
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.nawds_mcast);
int idx;
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.rcvd);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.rcvd_in_fast_xmit_flow);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.rcvd_per_core[0]);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.rcvd_per_core[1]);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.rcvd_per_core[2]);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.rcvd_per_core[3]);
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.processed);
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.reinject_pkts);
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.inspect_pkts);
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.raw.raw_pkt);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.dma_map_error);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.raw.num_frags_overflow_err);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_host.num);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.sg.dropped_target);
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sg.sg_pkt);
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.mcast_en.mcast_pkt);
DP_STATS_AGGR(tgtobj, srcobj,
tx_i.mcast_en.dropped_map_error);
DP_STATS_AGGR(tgtobj, srcobj,
tx_i.mcast_en.dropped_self_mac);
DP_STATS_AGGR(tgtobj, srcobj,
tx_i.mcast_en.dropped_send_fail);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.mcast_en.ucast);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.igmp_mcast_en.igmp_rcvd);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.igmp_mcast_en.igmp_ucast_converted);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.fail_per_pkt_vdev_id_check);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.drop_ingress);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.invalid_peer_id_in_exc_path);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.tx_mcast_drop);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.fw2wbm_tx_drop);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw);
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sniffer_rcvd);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw);
for (idx = 0; idx < DP_INGRESS_STATS_MAX_SIZE; idx++) {
DP_STATS_AGGR_PKT_IDX(tgtobj, srcobj, tx_i, nawds_mcast, idx);
DP_STATS_AGGR_PKT_IDX(tgtobj, srcobj, tx_i, rcvd, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj,
tx_i, rcvd_in_fast_xmit_flow, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, rcvd_per_core[0], idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, rcvd_per_core[1], idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, rcvd_per_core[2], idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, rcvd_per_core[3], idx);
DP_STATS_AGGR_PKT_IDX(tgtobj, srcobj, tx_i, processed, idx);
DP_STATS_AGGR_PKT_IDX(tgtobj, srcobj, tx_i, reinject_pkts, idx);
DP_STATS_AGGR_PKT_IDX(tgtobj, srcobj, tx_i, inspect_pkts, idx);
DP_STATS_AGGR_PKT_IDX(tgtobj, srcobj, tx_i, raw.raw_pkt, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, raw.dma_map_error, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj,
tx_i, raw.num_frags_overflow_err, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, sg.dropped_host.num,
idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, sg.dropped_target, idx);
DP_STATS_AGGR_PKT_IDX(tgtobj, srcobj, tx_i, sg.sg_pkt, idx);
DP_STATS_AGGR_PKT_IDX(tgtobj, srcobj, tx_i, mcast_en.mcast_pkt,
idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj,
tx_i, mcast_en.dropped_map_error, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj,
tx_i, mcast_en.dropped_self_mac, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj,
tx_i, mcast_en.dropped_send_fail, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, mcast_en.ucast, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i,
igmp_mcast_en.igmp_rcvd, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i,
igmp_mcast_en.igmp_ucast_converted, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, dropped.dma_error, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, dropped.ring_full, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, dropped.enqueue_fail,
idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i,
dropped.fail_per_pkt_vdev_id_check, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, dropped.desc_na.num,
idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, dropped.res_full, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, dropped.drop_ingress,
idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i,
dropped.headroom_insufficient, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i,
dropped.invalid_peer_id_in_exc_path, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i,
dropped.tx_mcast_drop, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, dropped.fw2wbm_tx_drop,
idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, cce_classified, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, cce_classified_raw,
idx);
DP_STATS_AGGR_PKT_IDX(tgtobj, srcobj, tx_i, sniffer_rcvd, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, mesh.exception_fw, idx);
DP_STATS_AGGR_IDX(tgtobj, srcobj, tx_i, mesh.completion_fw,
idx);
}
DP_STATS_AGGR_PKT(tgtobj, srcobj, rx_i.reo_rcvd_pkt);
DP_STATS_AGGR_PKT(tgtobj, srcobj, rx_i.null_q_desc_pkt);
DP_STATS_AGGR_PKT(tgtobj, srcobj, rx_i.routed_eapol_pkt);

View File

@@ -208,6 +208,19 @@ typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc, void *arg,
#define DP_VDEV_ITERATE_SKIP_SELF 0
#endif
/**
* enum dp_pkt_xmit_type - The type of ingress stats are being referred
*
* @DP_XMIT_LINK: Packet ingress-ed on Link
* @DP_XMIT_MLD: Packet ingress-ed on MLD
* @DP_XMIT_TOTAL: Packets ingress-ed on MLD and LINK
*/
enum dp_pkt_xmit_type {
DP_XMIT_LINK,
DP_XMIT_MLD,
DP_XMIT_TOTAL,
};
enum rx_pktlog_mode {
DP_RX_PKTLOG_DISABLED = 0,
DP_RX_PKTLOG_FULL,
@@ -2527,8 +2540,7 @@ struct dp_arch_ops {
void (*dp_get_vdev_stats_for_unmap_peer)(
struct dp_vdev *vdev,
struct dp_peer *peer,
struct cdp_vdev_stats **vdev_stats);
struct dp_peer *peer);
QDF_STATUS (*dp_get_interface_stats)(struct cdp_soc_t *soc_hdl,
uint8_t vdev_id,
void *buf,
@@ -3870,6 +3882,27 @@ struct dp_tx_latency {
};
#endif
/**
* struct dp_vdev_stats - vdev stats structure for dp vdev
* @tx_i: ingress tx stats, contains legacy and MLO ingress tx stats
* @rx_i: ingress rx stats
* @tx: cdp tx stats
* @rx: cdp rx stats
* @tso_stats: tso stats
* @tid_tx_stats: tid tx stats
*/
struct dp_vdev_stats {
struct cdp_tx_ingress_stats tx_i[DP_INGRESS_STATS_MAX_SIZE];
struct cdp_rx_ingress_stats rx_i;
struct cdp_tx_stats tx;
struct cdp_rx_stats rx;
struct cdp_tso_stats tso_stats;
#ifdef HW_TX_DELAY_STATS_ENABLE
struct cdp_tid_tx_stats tid_tx_stats[CDP_MAX_TX_COMP_RINGS]
[CDP_MAX_DATA_TIDS];
#endif
};
/* VDEV structure for data path state */
struct dp_vdev {
/* OS device abstraction */
@@ -4070,7 +4103,7 @@ struct dp_vdev {
uint64_t prev_rx_deliver_tstamp;
/* VDEV Stats */
struct cdp_vdev_stats stats;
struct dp_vdev_stats stats;
/* Is this a proxySTA VAP */
uint8_t proxysta_vdev : 1, /* Is this a proxySTA VAP */

View File

@@ -623,9 +623,9 @@ static uint8_t dp_get_hw_link_id_li(struct dp_pdev *pdev)
static void dp_get_vdev_stats_for_unmap_peer_li(
struct dp_vdev *vdev,
struct dp_peer *peer,
struct cdp_vdev_stats **vdev_stats)
struct dp_peer *peer)
{
dp_get_vdev_stats_for_unmap_peer_legacy(vdev, peer);
}
static struct

View File

@@ -255,7 +255,9 @@ void dp_tx_process_htt_completion_li(struct dp_soc *soc,
}
case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
{
DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
DP_STATS_INC(vdev,
tx_i[DP_XMIT_LINK].dropped.fail_per_pkt_vdev_id_check,
1);
goto release_tx_desc;
}
default:
@@ -544,7 +546,8 @@ dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev,
"%s %d : HAL RING Access Failed -- %pK",
__func__, __LINE__, hal_ring_hdl);
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail,
1);
dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
return status;
}
@@ -557,7 +560,8 @@ dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev,
if (qdf_unlikely(!hal_tx_desc)) {
dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail,
1);
dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
goto ring_access_fail;
}
@@ -567,7 +571,8 @@ dp_tx_hw_enqueue_li(struct dp_soc *soc, struct dp_vdev *vdev,
hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
msdu_info, ring_id);
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
DP_STATS_INC_PKT(vdev, tx_i[DP_XMIT_LINK].processed, 1,
tx_desc->length);
DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
dp_tx_update_stats(soc, tx_desc, ring_id);
status = QDF_STATUS_SUCCESS;