qcacmn: Add fields for Tx TQM and FW exception drops

VoW stats should contain fields to count TQM and FW exception
drops

Change-Id: I71a81b8e9cc9428b5c727d77c0eeec5bb23a2b42
This commit is contained in:
Mainak Sen
2019-08-05 14:20:33 +05:30
committed by nshrivas
parent 365029bb1f
commit f3053eb1d1
4 changed files with 46 additions and 15 deletions

View File

@@ -70,6 +70,8 @@
#define CDP_MAX_RX_RINGS 4 /* max rx rings */ #define CDP_MAX_RX_RINGS 4 /* max rx rings */
#define CDP_MAX_TX_COMP_RINGS 3 /* max tx completion rings */ #define CDP_MAX_TX_COMP_RINGS 3 /* max tx completion rings */
#define CDP_MAX_TX_TQM_STATUS 9 /* max tx tqm completion status */
#define CDP_MAX_TX_HTT_STATUS 7 /* max tx htt completion status */
/* TID level VoW stats macros /* TID level VoW stats macros
* to add and get stats * to add and get stats
@@ -347,6 +349,8 @@ struct cdp_delay_stats {
* @success_cnt: total successful transmit count * @success_cnt: total successful transmit count
* @comp_fail_cnt: firmware drop found in tx completion path * @comp_fail_cnt: firmware drop found in tx completion path
* @swdrop_cnt: software drop in tx path * @swdrop_cnt: software drop in tx path
* @tqm_status_cnt: TQM completion status count
* @htt_status_cnt: HTT completion status count
*/ */
struct cdp_tid_tx_stats { struct cdp_tid_tx_stats {
struct cdp_delay_stats swq_delay; struct cdp_delay_stats swq_delay;
@@ -355,6 +359,8 @@ struct cdp_tid_tx_stats {
uint64_t success_cnt; uint64_t success_cnt;
uint64_t comp_fail_cnt; uint64_t comp_fail_cnt;
uint64_t swdrop_cnt[TX_MAX_DROP]; uint64_t swdrop_cnt[TX_MAX_DROP];
uint64_t tqm_status_cnt[CDP_MAX_TX_TQM_STATUS];
uint64_t htt_status_cnt[CDP_MAX_TX_HTT_STATUS];
}; };
/* /*

View File

@@ -4097,7 +4097,7 @@ dp_accumulate_tid_stats(struct dp_pdev *pdev, uint8_t tid,
struct cdp_tid_tx_stats *total_tx, struct cdp_tid_tx_stats *total_tx,
struct cdp_tid_rx_stats *total_rx, uint8_t type) struct cdp_tid_rx_stats *total_rx, uint8_t type)
{ {
uint8_t ring_id = 0, drop = 0; uint8_t ring_id = 0, drop = 0, tqm_status_idx = 0, htt_status_idx = 0;
struct cdp_tid_stats *tid_stats = &pdev->stats.tid_stats; struct cdp_tid_stats *tid_stats = &pdev->stats.tid_stats;
struct cdp_tid_tx_stats *per_ring_tx = NULL; struct cdp_tid_tx_stats *per_ring_tx = NULL;
struct cdp_tid_rx_stats *per_ring_rx = NULL; struct cdp_tid_rx_stats *per_ring_rx = NULL;
@@ -4119,7 +4119,16 @@ dp_accumulate_tid_stats(struct dp_pdev *pdev, uint8_t tid,
for (ring_id = 0; ring_id < CDP_MAX_TX_COMP_RINGS; ring_id++) { for (ring_id = 0; ring_id < CDP_MAX_TX_COMP_RINGS; ring_id++) {
per_ring_tx = &tid_stats->tid_tx_stats[ring_id][tid]; per_ring_tx = &tid_stats->tid_tx_stats[ring_id][tid];
total_tx->success_cnt += per_ring_tx->success_cnt; total_tx->success_cnt += per_ring_tx->success_cnt;
total_tx->comp_fail_cnt += per_ring_tx->comp_fail_cnt; for (tqm_status_idx = 0; tqm_status_idx < CDP_MAX_TX_TQM_STATUS; tqm_status_idx++) {
total_tx->tqm_status_cnt[tqm_status_idx] +=
per_ring_tx->tqm_status_cnt[tqm_status_idx];
}
for (htt_status_idx = 0; htt_status_idx < CDP_MAX_TX_HTT_STATUS; htt_status_idx++) {
total_tx->htt_status_cnt[htt_status_idx] +=
per_ring_tx->htt_status_cnt[htt_status_idx];
}
for (drop = 0; drop < TX_MAX_DROP; drop++) for (drop = 0; drop < TX_MAX_DROP; drop++)
total_tx->swdrop_cnt[drop] += total_tx->swdrop_cnt[drop] +=
per_ring_tx->swdrop_cnt[drop]; per_ring_tx->swdrop_cnt[drop];
@@ -4170,7 +4179,7 @@ void dp_pdev_print_tid_stats(struct dp_pdev *pdev)
{ {
struct cdp_tid_tx_stats total_tx; struct cdp_tid_tx_stats total_tx;
struct cdp_tid_rx_stats total_rx; struct cdp_tid_rx_stats total_rx;
uint8_t tid; uint8_t tid, tqm_status_idx, htt_status_idx;
DP_PRINT_STATS("Packets received in hardstart: %llu ", DP_PRINT_STATS("Packets received in hardstart: %llu ",
pdev->stats.tid_stats.ingress_stack); pdev->stats.tid_stats.ingress_stack);
@@ -4182,9 +4191,24 @@ void dp_pdev_print_tid_stats(struct dp_pdev *pdev)
dp_accumulate_tid_stats(pdev, tid, &total_tx, &total_rx, dp_accumulate_tid_stats(pdev, tid, &total_tx, &total_rx,
TID_COUNTER_STATS); TID_COUNTER_STATS);
DP_PRINT_STATS("----TID: %d----", tid); DP_PRINT_STATS("----TID: %d----", tid);
DP_PRINT_STATS("Tx Success Count: %llu", total_tx.success_cnt); DP_PRINT_STATS("Tx TQM Success Count: %llu",
DP_PRINT_STATS("Tx Firmware Drop Count: %llu", total_tx.tqm_status_cnt[HAL_TX_TQM_RR_FRAME_ACKED]);
total_tx.comp_fail_cnt); DP_PRINT_STATS("Tx HTT Success Count: %llu",
total_tx.htt_status_cnt[HTT_TX_FW2WBM_TX_STATUS_OK]);
for (tqm_status_idx = 1; tqm_status_idx < CDP_MAX_TX_TQM_STATUS; tqm_status_idx++) {
if (total_tx.tqm_status_cnt[tqm_status_idx]) {
DP_PRINT_STATS("Tx TQM Drop Count[%d]: %llu",
tqm_status_idx, total_tx.tqm_status_cnt[tqm_status_idx]);
}
}
for (htt_status_idx = 1; htt_status_idx < CDP_MAX_TX_HTT_STATUS; htt_status_idx++) {
if (total_tx.htt_status_cnt[htt_status_idx]) {
DP_PRINT_STATS("Tx HTT Drop Count[%d]: %llu",
htt_status_idx, total_tx.htt_status_cnt[htt_status_idx]);
}
}
DP_PRINT_STATS("Tx Hardware Drop Count: %llu", DP_PRINT_STATS("Tx Hardware Drop Count: %llu",
total_tx.swdrop_cnt[TX_HW_ENQUEUE]); total_tx.swdrop_cnt[TX_HW_ENQUEUE]);
DP_PRINT_STATS("Tx Software Drop Count: %llu", DP_PRINT_STATS("Tx Software Drop Count: %llu",

View File

@@ -2852,12 +2852,13 @@ dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
peer->stats.tx.dropped.fw_reason2 + peer->stats.tx.dropped.fw_reason2 +
peer->stats.tx.dropped.fw_reason3; peer->stats.tx.dropped.fw_reason3;
if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) { if (ts->status < CDP_MAX_TX_TQM_STATUS) {
tid_stats->comp_fail_cnt++; tid_stats->tqm_status_cnt[ts->status]++;
return;
} }
tid_stats->success_cnt++; if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
return;
}
DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma); DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
@@ -3327,11 +3328,8 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
if (qdf_unlikely(pdev->delay_stats_flag)) if (qdf_unlikely(pdev->delay_stats_flag))
dp_tx_compute_delay(vdev, tx_desc, tid, ring_id); dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
if (qdf_unlikely(tx_status != HTT_TX_FW2WBM_TX_STATUS_OK)) { if (tx_status < CDP_MAX_TX_HTT_STATUS) {
ts.status = HAL_TX_TQM_RR_REM_CMD_REM; tid_stats->htt_status_cnt[tx_status]++;
tid_stats->comp_fail_cnt++;
} else {
tid_stats->success_cnt++;
} }
peer = dp_peer_find_by_id(soc, ts.peer_id); peer = dp_peer_find_by_id(soc, ts.peer_id);

View File

@@ -256,6 +256,8 @@ enum hal_tx_encap_type {
* remove reason is fw_reason2 * remove reason is fw_reason2
* @HAL_TX_TQM_RR_FW_REASON3 : Remove command where fw indicated that * @HAL_TX_TQM_RR_FW_REASON3 : Remove command where fw indicated that
* remove reason is fw_reason3 * remove reason is fw_reason3
* @HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE : Remove command where fw indicated that
* remove reason is remove disable queue
*/ */
enum hal_tx_tqm_release_reason { enum hal_tx_tqm_release_reason {
HAL_TX_TQM_RR_FRAME_ACKED, HAL_TX_TQM_RR_FRAME_ACKED,
@@ -266,6 +268,7 @@ enum hal_tx_tqm_release_reason {
HAL_TX_TQM_RR_FW_REASON1, HAL_TX_TQM_RR_FW_REASON1,
HAL_TX_TQM_RR_FW_REASON2, HAL_TX_TQM_RR_FW_REASON2,
HAL_TX_TQM_RR_FW_REASON3, HAL_TX_TQM_RR_FW_REASON3,
HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE,
}; };
/* enum - Table IDs for 2 DSCP-TID mapping Tables that TCL H/W supports /* enum - Table IDs for 2 DSCP-TID mapping Tables that TCL H/W supports