qcacmn: Add cdp api to get accumulated pdev tid stats

Add cdp api to get accumulated pdev tid stats along with per tid
total tid rx and tx stats.

Change-Id: I7862c4265f199d0588d8865c5ed97e06010dd79f
CRs-Fixed: 3207499
Cette révision appartient à :
Subrat Mishra
2022-05-26 18:24:18 +05:30
révisé par Madan Koyyalamudi
Parent 1049507d23
révision 120fb06e12
6 fichiers modifiés avec 129 ajouts et 1 suppressions

Voir le fichier

@@ -955,4 +955,28 @@ cdp_vdev_is_tx_delay_stats_enabled(ol_txrx_soc_handle soc, uint8_t vdev_id)
return 0; return 0;
} }
#endif #endif
/**
* cdp_get_pdev_tid_stats() - Get pdev tid stats
* @soc: soc handle
* @pdev_id: Pdev id
* @tid_stats: Pointer to cdp_tid_stats_intf
*
* Return: status Success/Failure
*/
static inline QDF_STATUS
cdp_get_pdev_tid_stats(ol_txrx_soc_handle soc, uint8_t pdev_id,
struct cdp_tid_stats_intf *tid_stats)
{
if (!soc || !soc->ops || !soc->ops->host_stats_ops) {
dp_cdp_debug("Invalid Instance:");
return QDF_STATUS_E_FAILURE;
}
if (!soc->ops->host_stats_ops->txrx_get_pdev_tid_stats)
return QDF_STATUS_E_FAILURE;
return soc->ops->host_stats_ops->txrx_get_pdev_tid_stats(soc, pdev_id,
tid_stats);
}
#endif /* _CDP_TXRX_HOST_STATS_H_ */ #endif /* _CDP_TXRX_HOST_STATS_H_ */

Voir le fichier

@@ -1153,6 +1153,9 @@ struct cdp_host_stats_ops {
uint8_t (*is_tx_delay_stats_enabled)(struct cdp_soc_t *soc_hdl, uint8_t (*is_tx_delay_stats_enabled)(struct cdp_soc_t *soc_hdl,
uint8_t vdev_id); uint8_t vdev_id);
#endif #endif
QDF_STATUS
(*txrx_get_pdev_tid_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
struct cdp_tid_stats_intf *tid_stats);
}; };
struct cdp_wds_ops { struct cdp_wds_ops {

Voir le fichier

@@ -1042,6 +1042,20 @@ struct cdp_tid_stats {
[CDP_MAX_DATA_TIDS]; [CDP_MAX_DATA_TIDS];
}; };
/*
* struct cdp_tid_stats_intf
* @ingress_stack: Total packets received from linux stack
* @osif_drop: drops in osif layer
* @tx_total: total of per ring transmit counters per tid
* @rx_total: total of per ring receive counters per tid
*/
struct cdp_tid_stats_intf {
uint64_t ingress_stack;
uint64_t osif_drop;
struct cdp_tid_tx_stats tx_total[CDP_MAX_DATA_TIDS];
struct cdp_tid_rx_stats rx_total[CDP_MAX_DATA_TIDS];
};
/* /*
* struct cdp_delay_tx_stats: Tx delay stats * struct cdp_delay_tx_stats: Tx delay stats
* @tx_swq_delay: software enqueue delay * @tx_swq_delay: software enqueue delay

Voir le fichier

@@ -2784,7 +2784,18 @@ void dp_pdev_print_tid_stats(struct dp_pdev *pdev);
* Return:void * Return:void
*/ */
void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev); void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev);
#endif /* CONFIG_WIN */ #endif /* QCA_ENH_V3_STATS_SUPPORT */
/**
* dp_pdev_get_tid_stats(): Get accumulated pdev level tid_stats
* @soc_hdl: soc handle
* @pdev_id: id of dp_pdev handle
* @tid_stats: Pointer for cdp_tid_stats_intf
*
* Return: QDF_STATUS_SUCCESS or QDF_STATUS_E_INVAL
*/
QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
struct cdp_tid_stats_intf *tid_stats);
void dp_soc_set_txrx_ring_map(struct dp_soc *soc); void dp_soc_set_txrx_ring_map(struct dp_soc *soc);

Voir le fichier

@@ -12727,6 +12727,7 @@ static struct cdp_host_stats_ops dp_ops_host_stats = {
dp_enable_disable_vdev_tx_delay_stats, dp_enable_disable_vdev_tx_delay_stats,
.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled, .is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
#endif #endif
.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
/* TODO */ /* TODO */
}; };

Voir le fichier

@@ -4708,6 +4708,7 @@ dp_accumulate_tid_stats(struct dp_pdev *pdev, uint8_t tid,
for (ring_id = 0; ring_id < CDP_MAX_TX_COMP_RINGS; ring_id++) { for (ring_id = 0; ring_id < CDP_MAX_TX_COMP_RINGS; ring_id++) {
per_ring_tx = &tid_stats->tid_tx_stats[ring_id][tid]; per_ring_tx = &tid_stats->tid_tx_stats[ring_id][tid];
total_tx->success_cnt += per_ring_tx->success_cnt; total_tx->success_cnt += per_ring_tx->success_cnt;
total_tx->comp_fail_cnt += per_ring_tx->comp_fail_cnt;
for (tqm_status_idx = 0; tqm_status_idx < CDP_MAX_TX_TQM_STATUS; tqm_status_idx++) { for (tqm_status_idx = 0; tqm_status_idx < CDP_MAX_TX_TQM_STATUS; tqm_status_idx++) {
total_tx->tqm_status_cnt[tqm_status_idx] += total_tx->tqm_status_cnt[tqm_status_idx] +=
per_ring_tx->tqm_status_cnt[tqm_status_idx]; per_ring_tx->tqm_status_cnt[tqm_status_idx];
@@ -4977,6 +4978,80 @@ void dp_pdev_print_rx_error_stats(struct dp_pdev *pdev)
} }
} }
} }
QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
struct cdp_tid_stats_intf *tid_stats)
{
struct dp_soc *soc = (struct dp_soc *)soc_hdl;
struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
struct cdp_tid_rx_stats rx;
struct cdp_tid_tx_stats tx;
uint8_t tid;
uint32_t size;
if (!pdev)
return QDF_STATUS_E_INVAL;
size = sizeof(struct cdp_delay_stats);
for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
dp_accumulate_tid_stats(pdev, tid, &tx, &rx, TID_COUNTER_STATS);
/* Copy specific accumulated Tx tid stats */
tid_stats->tx_total[tid].success_cnt = tx.success_cnt;
tid_stats->tx_total[tid].comp_fail_cnt = tx.comp_fail_cnt;
qdf_mem_copy(&tid_stats->tx_total[tid].tqm_status_cnt[0],
&tx.tqm_status_cnt[0],
CDP_MAX_TX_TQM_STATUS * sizeof(uint64_t));
qdf_mem_copy(&tid_stats->tx_total[tid].htt_status_cnt[0],
&tx.htt_status_cnt[0],
CDP_MAX_TX_HTT_STATUS * sizeof(uint64_t));
qdf_mem_copy(&tid_stats->tx_total[tid].swdrop_cnt[0],
&tx.swdrop_cnt[0], TX_MAX_DROP * sizeof(uint64_t));
/* Copy specific accumulated Rx tid stats */
tid_stats->rx_total[tid].delivered_to_stack =
rx.delivered_to_stack;
tid_stats->rx_total[tid].intrabss_cnt = rx.intrabss_cnt;
tid_stats->rx_total[tid].msdu_cnt = rx.msdu_cnt;
tid_stats->rx_total[tid].mcast_msdu_cnt = rx.mcast_msdu_cnt;
tid_stats->rx_total[tid].bcast_msdu_cnt = rx.bcast_msdu_cnt;
qdf_mem_copy(&tid_stats->rx_total[tid].fail_cnt[0],
&rx.fail_cnt[0], RX_MAX_DROP * sizeof(uint64_t));
dp_accumulate_tid_stats(pdev, tid, &tx, &rx, TID_DELAY_STATS);
/* Copy specific accumulated Tx delay stats */
qdf_mem_copy(&tid_stats->tx_total[tid].swq_delay,
&tx.swq_delay, size);
qdf_mem_copy(&tid_stats->tx_total[tid].hwtx_delay,
&tx.hwtx_delay, size);
qdf_mem_copy(&tid_stats->tx_total[tid].intfrm_delay,
&tx.intfrm_delay, size);
/* Copy specific accumulated Rx delay stats */
qdf_mem_copy(&tid_stats->rx_total[tid].intfrm_delay,
&rx.intfrm_delay, size);
qdf_mem_copy(&tid_stats->rx_total[tid].to_stack_delay,
&rx.to_stack_delay, size);
}
for (tid = 0; tid < CDP_MAX_VOW_TID; tid++) {
dp_accumulate_tid_stats(pdev, tid, &tx, &rx,
TID_RX_ERROR_STATS);
/* Copy specific accumulated VOW Rx stats */
qdf_mem_copy(&tid_stats->rx_total[tid].reo_err,
&rx.reo_err, sizeof(struct cdp_reo_error_stats));
qdf_mem_copy(&tid_stats->rx_total[tid].rxdma_err, &rx.rxdma_err,
sizeof(struct cdp_rxdma_error_stats));
}
tid_stats->ingress_stack = pdev->stats.tid_stats.ingress_stack;
tid_stats->osif_drop = pdev->stats.tid_stats.osif_drop;
return QDF_STATUS_SUCCESS;
}
#else
QDF_STATUS dp_pdev_get_tid_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
struct cdp_tid_stats_intf *tid_stats)
{
return QDF_STATUS_E_INVAL;
}
#endif #endif
#ifdef HW_TX_DELAY_STATS_ENABLE #ifdef HW_TX_DELAY_STATS_ENABLE