qcacmn: add periodic and host Tx/Rx stats support for WCN6450

Changes required to support periodic and host Tx/Rx stats for WCN6450.

Command to request and dump the host Tx/Rx stats,
iwpriv wlan0 txrx_stats <stats no> <mac_id>
mac_id: 0 - mac0(5 GHz), 1 - mac1(2 GHz)
        0 for single mac
stats no: 20 - TXRX_CLEAR_STATS
	  21 - TXRX_RX_RATE_STATS
	  22 - TXRX_TX_RATE_STATS
	  23 - TXRX_TX_HOST_STATS
	  24 - TXRX_RX_HOST_STATS
	  25 - TXRX_AST_STATS
	  26 - TXRX_SRNG_PTR_STATS
	  27 - TXRX_RX_MON_STATS
	  29 - TXRX_SOC_CFG_PARAMS
	  30 - TXRX_PDEV_CFG_PARAMS
	  31 - TXRX_NAPI_STATS
	  32 - TXRX_SOC_INTERRUPT_STATS
	  33 - TXRX_SOC_FSE_STATS

Change-Id: Ibda4d531c9074a24f8c39916b44d9e3c38f189ee
CRs-Fixed: 3485279
This commit is contained in:
Venkateswara Naralasetty
2023-07-11 14:38:52 +05:30
committed by Rahul Choudhary
parent 4e97268972
commit 51ddb93d21
7 changed files with 787 additions and 366 deletions

View File

@@ -288,6 +288,7 @@ enum dp_fw_stats {
* dp_stats_mapping_table - Firmware and Host statistics
* currently supported
*/
#ifndef WLAN_SOFTUMAC_SUPPORT
const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
@@ -332,6 +333,51 @@ const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID},
{TXRX_FW_STATS_INVALID, TXRX_PEER_STATS},
};
#else
const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
/* Last ENUM for HTT FW STATS */
{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
};
#endif
/* MCL specific functions */
#if defined(DP_CON_MON)

View File

@@ -6019,16 +6019,66 @@ dp_print_wbm2sw_ring_stats_from_hal(struct dp_pdev *pdev)
}
#endif
void
dp_print_ring_stats(struct dp_pdev *pdev)
/*
* Format is:
* [0 18 1728, 1 15 1222, 2 24 1969,...]
* 2 character space for [ and ]
* 8 reo * 3 white space = 24
* 8 char space for reo rings
* 8 * 10 (uint32_t max value is 4294967295) = 80
* 8 * 20 (uint64_t max value is 18446744073709551615) = 160
* 8 commas
* 1 for \0
* Total of 283
*/
#define DP_STATS_STR_LEN 283
#ifndef WLAN_SOFTUMAC_SUPPORT
static int
dp_fill_rx_interrupt_ctx_stats(struct dp_intr *intr_ctx,
char *buf, int buf_len)
{
struct dp_soc *soc = pdev->soc;
uint32_t i;
int mac_id;
int lmac_id;
int i;
int pos = 0;
if (hif_rtpm_get(HIF_RTPM_GET_SYNC, HIF_RTPM_ID_DP_RING_STATS))
return;
if (buf_len <= 0 || !buf) {
dp_err("incorrect buf or buf_len(%d)!", buf_len);
return pos;
}
for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
if (intr_ctx->intr_stats.num_rx_ring_masks[i])
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"reo[%u]:%u ", i,
intr_ctx->intr_stats.num_rx_ring_masks[i]);
}
return pos;
}
static int
dp_fill_tx_interrupt_ctx_stats(struct dp_intr *intr_ctx,
char *buf, int buf_len)
{ int i;
int pos = 0;
if (buf_len <= 0 || !buf) {
dp_err("incorrect buf or buf_len(%d)!", buf_len);
return pos;
}
for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
if (intr_ctx->intr_stats.num_tx_ring_masks[i])
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"tx_comps[%u]:%u ", i,
intr_ctx->intr_stats.num_tx_ring_masks[i]);
}
return pos;
}
static inline void dp_print_umac_ring_stats(struct dp_pdev *pdev)
{
uint8_t i;
dp_print_ring_stat_from_hal(pdev->soc,
&pdev->soc->wbm_idle_link_ring,
@@ -6067,6 +6117,363 @@ dp_print_ring_stats(struct dp_pdev *pdev)
&pdev->soc->tcl_data_ring[i],
TCL_DATA);
dp_print_wbm2sw_ring_stats_from_hal(pdev);
}
static inline void dp_print_ce_ring_stats(struct dp_pdev *pdev) {}
static inline void dp_print_tx_ring_stats(struct dp_soc *soc)
{
uint8_t i;
for (i = 0; i < soc->num_tcl_data_rings; i++) {
DP_PRINT_STATS("Enqueue to SW2TCL%u: %u", i + 1,
soc->stats.tx.tcl_enq[i]);
DP_PRINT_STATS("TX completions reaped from ring %u: %u",
i, soc->stats.tx.tx_comp[i]);
}
}
static inline void dp_print_rx_ring_stats(struct dp_pdev *pdev)
{
uint8_t dp_stats_str[DP_STATS_STR_LEN] = {'\0'};
uint8_t *buf = dp_stats_str;
size_t pos = 0;
size_t buf_len = DP_STATS_STR_LEN;
uint8_t i;
pos += qdf_scnprintf(buf + pos, buf_len - pos, "%s", "REO/msdus/bytes [");
for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
if (!pdev->stats.rx.rcvd_reo[i].num)
continue;
pos += qdf_scnprintf(buf + pos, buf_len - pos,
"%d %llu %llu, ",
i, pdev->stats.rx.rcvd_reo[i].num,
pdev->stats.rx.rcvd_reo[i].bytes);
}
pos += qdf_scnprintf(buf + pos, buf_len - pos, "%s", "]");
DP_PRINT_STATS("%s", dp_stats_str);
}
static inline void
dp_print_rx_err_stats(struct dp_soc *soc, struct dp_pdev *pdev)
{
uint8_t error_code;
DP_PRINT_STATS("intra-bss EAPOL drops: %u",
soc->stats.rx.err.intrabss_eapol_drop);
DP_PRINT_STATS("mic errors %u",
pdev->stats.rx.err.mic_err);
DP_PRINT_STATS("Invalid peer on rx path: %llu",
pdev->soc->stats.rx.err.rx_invalid_peer.num);
DP_PRINT_STATS("sw_peer_id invalid %llu",
pdev->soc->stats.rx.err.rx_invalid_peer_id.num);
DP_PRINT_STATS("packet_len invalid %llu",
pdev->soc->stats.rx.err.rx_invalid_pkt_len.num);
DP_PRINT_STATS("sa or da idx invalid %u",
pdev->soc->stats.rx.err.invalid_sa_da_idx);
DP_PRINT_STATS("defrag peer uninit %u",
pdev->soc->stats.rx.err.defrag_peer_uninit);
DP_PRINT_STATS("pkts delivered no peer %u",
pdev->soc->stats.rx.err.pkt_delivered_no_peer);
DP_PRINT_STATS("RX invalid cookie: %d",
soc->stats.rx.err.invalid_cookie);
DP_PRINT_STATS("RX stale cookie: %d",
soc->stats.rx.err.stale_cookie);
DP_PRINT_STATS("2k jump delba sent: %u",
pdev->soc->stats.rx.err.rx_2k_jump_delba_sent);
DP_PRINT_STATS("2k jump msdu to stack: %u",
pdev->soc->stats.rx.err.rx_2k_jump_to_stack);
DP_PRINT_STATS("2k jump msdu drop: %u",
pdev->soc->stats.rx.err.rx_2k_jump_drop);
DP_PRINT_STATS("REO err oor msdu to stack %u",
pdev->soc->stats.rx.err.reo_err_oor_to_stack);
DP_PRINT_STATS("REO err oor msdu drop: %u",
pdev->soc->stats.rx.err.reo_err_oor_drop);
DP_PRINT_STATS("Rx err msdu rejected: %d",
soc->stats.rx.err.rejected);
DP_PRINT_STATS("Rx raw frame dropped: %d",
soc->stats.rx.err.raw_frm_drop);
DP_PRINT_STATS("Rx stale link desc cookie: %d",
pdev->soc->stats.rx.err.invalid_link_cookie);
DP_PRINT_STATS("Rx nbuf sanity fails: %d",
pdev->soc->stats.rx.err.nbuf_sanity_fail);
DP_PRINT_STATS("Rx refill duplicate link desc: %d",
pdev->soc->stats.rx.err.dup_refill_link_desc);
DP_PRINT_STATS("Rx ipa smmu map duplicate: %d",
pdev->soc->stats.rx.err.ipa_smmu_map_dup);
DP_PRINT_STATS("Rx ipa smmu unmap duplicate: %d",
pdev->soc->stats.rx.err.ipa_smmu_unmap_dup);
DP_PRINT_STATS("Rx ipa smmu unmap no pipes: %d",
pdev->soc->stats.rx.err.ipa_unmap_no_pipe);
DP_PRINT_STATS("PN-in-Dest error frame pn-check fail: %d",
soc->stats.rx.err.pn_in_dest_check_fail);
DP_PRINT_STATS("Reo Statistics");
DP_PRINT_STATS("near_full: %u ", soc->stats.rx.near_full);
DP_PRINT_STATS("rbm error: %u msdus",
pdev->soc->stats.rx.err.invalid_rbm);
DP_PRINT_STATS("hal ring access fail: %u msdus",
pdev->soc->stats.rx.err.hal_ring_access_fail);
DP_PRINT_STATS("hal ring access full fail: %u msdus",
pdev->soc->stats.rx.err.hal_ring_access_full_fail);
for (error_code = 0; error_code < HAL_REO_ERR_MAX;
error_code++) {
if (!pdev->soc->stats.rx.err.reo_error[error_code])
continue;
DP_PRINT_STATS("Reo error number (%u): %u msdus",
error_code,
pdev->soc->stats.rx.err.reo_error[error_code]);
}
}
void dp_print_soc_tx_stats(struct dp_soc *soc)
{
uint8_t desc_pool_id;
soc->stats.tx.desc_in_use = 0;
DP_PRINT_STATS("SOC Tx Stats:\n");
for (desc_pool_id = 0;
desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
desc_pool_id++)
soc->stats.tx.desc_in_use +=
soc->tx_desc[desc_pool_id].num_allocated;
DP_PRINT_STATS("Tx Descriptors In Use = %u",
soc->stats.tx.desc_in_use);
DP_PRINT_STATS("Tx Invalid peer:");
DP_PRINT_STATS(" Packets = %llu",
soc->stats.tx.tx_invalid_peer.num);
DP_PRINT_STATS(" Bytes = %llu",
soc->stats.tx.tx_invalid_peer.bytes);
DP_PRINT_STATS("Packets dropped due to TCL ring full = %u %u %u %u",
soc->stats.tx.tcl_ring_full[0],
soc->stats.tx.tcl_ring_full[1],
soc->stats.tx.tcl_ring_full[2],
soc->stats.tx.tcl_ring_full[3]);
DP_PRINT_STATS("Tx invalid completion release = %u",
soc->stats.tx.invalid_release_source);
DP_PRINT_STATS("TX invalid Desc from completion ring = %u",
soc->stats.tx.invalid_tx_comp_desc);
DP_PRINT_STATS("Tx comp wbm internal error = %d : [%d %d %d %d]",
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_ALL],
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER],
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC],
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF],
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED]);
DP_PRINT_STATS("Tx comp non wbm internal error = %d",
soc->stats.tx.non_wbm_internal_err);
DP_PRINT_STATS("Tx comp loop pkt limit hit = %d",
soc->stats.tx.tx_comp_loop_pkt_limit_hit);
DP_PRINT_STATS("Tx comp HP out of sync2 = %d",
soc->stats.tx.hp_oos2);
dp_print_tx_ppeds_stats(soc);
}
#define DP_INT_CTX_STATS_STRING_LEN 512
void dp_print_soc_interrupt_stats(struct dp_soc *soc)
{
char *buf;
char int_ctx_str[DP_INT_CTX_STATS_STRING_LEN] = {'\0'};
int i, pos, buf_len;
struct dp_intr_stats *intr_stats;
buf = int_ctx_str;
buf_len = DP_INT_CTX_STATS_STRING_LEN;
for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
pos = 0;
qdf_mem_zero(int_ctx_str, sizeof(int_ctx_str));
intr_stats = &soc->intr_ctx[i].intr_stats;
if (!intr_stats->num_masks && !intr_stats->num_near_full_masks)
continue;
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"%2u[%3d] - Total:%u ",
i,
hif_get_int_ctx_irq_num(soc->hif_handle,
i),
intr_stats->num_masks);
if (soc->intr_ctx[i].tx_ring_mask)
pos += dp_fill_tx_interrupt_ctx_stats(&soc->intr_ctx[i],
buf + pos,
buf_len - pos);
if (soc->intr_ctx[i].rx_ring_mask)
pos += dp_fill_rx_interrupt_ctx_stats(&soc->intr_ctx[i],
buf + pos,
buf_len - pos);
if (soc->intr_ctx[i].rx_err_ring_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"reo_err:%u ",
intr_stats->num_rx_err_ring_masks);
if (soc->intr_ctx[i].rx_wbm_rel_ring_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"wbm_rx_err:%u ",
intr_stats->num_rx_wbm_rel_ring_masks);
if (soc->intr_ctx[i].rxdma2host_ring_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"rxdma2_host_err:%u ",
intr_stats->num_rxdma2host_ring_masks);
if (soc->intr_ctx[i].rx_near_full_grp_1_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"rx_near_full_grp_1:%u ",
intr_stats->num_near_full_masks);
if (soc->intr_ctx[i].rx_near_full_grp_2_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"rx_near_full_grp_2:%u ",
intr_stats->num_near_full_masks);
if (soc->intr_ctx[i].tx_ring_near_full_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"tx_near_full:%u ",
intr_stats->num_near_full_masks);
dp_info("%s", int_ctx_str);
}
}
#else
static inline void dp_print_umac_ring_stats(struct dp_pdev *pdev) {}
static inline void dp_print_ce_ring_stats(struct dp_pdev *pdev)
{
hif_ce_print_ring_stats(pdev->soc->hif_handle);
}
static inline void dp_print_tx_ring_stats(struct dp_soc *soc)
{
uint8_t i;
for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
DP_PRINT_STATS("Enqueue to Tx ring %u: %u", i + 1,
soc->stats.tx.tcl_enq[i]);
DP_PRINT_STATS("TX completions reaped from ring %u: %u",
i, soc->stats.tx.tx_comp[i]);
}
}
static inline void dp_print_rx_ring_stats(struct dp_pdev *pdev)
{
uint8_t dp_stats_str[DP_STATS_STR_LEN] = {'\0'};
uint8_t *buf = dp_stats_str;
size_t pos = 0;
size_t buf_len = DP_STATS_STR_LEN;
uint8_t i;
pos += qdf_scnprintf(buf + pos, buf_len - pos, "%s", "RX/msdus/bytes [");
for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
if (!pdev->stats.rx.rcvd_reo[i].num)
continue;
pos += qdf_scnprintf(buf + pos, buf_len - pos,
"%d %llu %llu, ",
i, pdev->stats.rx.rcvd_reo[i].num,
pdev->stats.rx.rcvd_reo[i].bytes);
}
pos += qdf_scnprintf(buf + pos, buf_len - pos, "%s", "]");
DP_PRINT_STATS("%s", dp_stats_str);
}
static inline void
dp_print_rx_err_stats(struct dp_soc *soc, struct dp_pdev *pdev)
{
DP_PRINT_STATS("intra-bss EAPOL drops: %u",
soc->stats.rx.err.intrabss_eapol_drop);
DP_PRINT_STATS("mic errors %u",
pdev->stats.rx.err.mic_err);
DP_PRINT_STATS("2k jump msdu drop: %u",
pdev->soc->stats.rx.err.rx_2k_jump_drop);
DP_PRINT_STATS("REO err oor msdu drop: %u",
pdev->soc->stats.rx.err.reo_err_oor_drop);
DP_PRINT_STATS("Invalid peer on rx path: %llu",
pdev->soc->stats.rx.err.rx_invalid_peer.num);
DP_PRINT_STATS("sw_peer_id invalid %llu",
pdev->soc->stats.rx.err.rx_invalid_peer_id.num);
DP_PRINT_STATS("packet_len invalid %llu",
pdev->soc->stats.rx.err.rx_invalid_pkt_len.num);
DP_PRINT_STATS("sa or da idx invalid %u",
pdev->soc->stats.rx.err.invalid_sa_da_idx);
DP_PRINT_STATS("defrag peer uninit %u",
pdev->soc->stats.rx.err.defrag_peer_uninit);
DP_PRINT_STATS("pkts delivered no peer %u",
pdev->soc->stats.rx.err.pkt_delivered_no_peer);
DP_PRINT_STATS("RX invalid cookie: %d",
soc->stats.rx.err.invalid_cookie);
DP_PRINT_STATS("RX stale cookie: %d",
soc->stats.rx.err.stale_cookie);
DP_PRINT_STATS("Rx err msdu rejected: %d",
soc->stats.rx.err.rejected);
DP_PRINT_STATS("Rx raw frame dropped: %d",
soc->stats.rx.err.raw_frm_drop);
DP_PRINT_STATS("Rx nbuf sanity fails: %d",
pdev->soc->stats.rx.err.nbuf_sanity_fail);
DP_PRINT_STATS("PN-in-Dest error frame pn-check fail: %d",
soc->stats.rx.err.pn_in_dest_check_fail);
}
void dp_print_soc_tx_stats(struct dp_soc *soc)
{
uint8_t desc_pool_id;
soc->stats.tx.desc_in_use = 0;
DP_PRINT_STATS("SOC Tx Stats:\n");
for (desc_pool_id = 0;
desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
desc_pool_id++)
soc->stats.tx.desc_in_use +=
soc->tx_desc[desc_pool_id].num_allocated;
DP_PRINT_STATS("Tx Descriptors In Use = %u",
soc->stats.tx.desc_in_use);
DP_PRINT_STATS("Tx Invalid peer:");
DP_PRINT_STATS(" Packets = %llu",
soc->stats.tx.tx_invalid_peer.num);
DP_PRINT_STATS(" Bytes = %llu",
soc->stats.tx.tx_invalid_peer.bytes);
DP_PRINT_STATS("Packets dropped due to Tx ring full = %u",
soc->stats.tx.tcl_ring_full[0]);
DP_PRINT_STATS("Tx invalid completion release = %u",
soc->stats.tx.invalid_release_source);
DP_PRINT_STATS("TX invalid Desc from completion ring = %u",
soc->stats.tx.invalid_tx_comp_desc);
dp_print_tx_ppeds_stats(soc);
}
/* TODO: print CE intr stats? */
void dp_print_soc_interrupt_stats(struct dp_soc *soc) {}
#endif
void
dp_print_ring_stats(struct dp_pdev *pdev)
{
struct dp_soc *soc = pdev->soc;
uint32_t i;
int mac_id;
int lmac_id;
if (hif_rtpm_get(HIF_RTPM_GET_SYNC, HIF_RTPM_ID_DP_RING_STATS))
return;
dp_print_ce_ring_stats(pdev);
dp_print_umac_ring_stats(pdev);
if (pdev->soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
for (i = 0; i < pdev->soc->num_rx_refill_buf_rings; i++) {
@@ -7275,7 +7682,7 @@ void dp_print_per_ring_stats(struct dp_soc *soc)
uint16_t core;
uint64_t total_packets;
DP_PRINT_STATS("Reo packets per ring:");
DP_PRINT_STATS("Rx packets per ring:");
for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
total_packets = 0;
DP_PRINT_STATS("Packets on ring %u:", ring);
@@ -7328,19 +7735,6 @@ static void dp_pdev_print_tx_rx_rates(struct dp_pdev *pdev)
qdf_mem_free(vdev_array);
}
/*
* Format is:
* [0 18 1728, 1 15 1222, 2 24 1969,...]
* 2 character space for [ and ]
* 8 reo * 3 white space = 24
* 8 char space for reo rings
* 8 * 10 (uint32_t max value is 4294967295) = 80
* 8 * 20 (uint64_t max value is 18446744073709551615) = 160
* 8 commas
* 1 for \0
* Total of 283
*/
#define DP_STATS_STR_LEN 283
void dp_txrx_path_stats(struct dp_soc *soc)
{
uint8_t error_code;
@@ -7369,12 +7763,8 @@ void dp_txrx_path_stats(struct dp_soc *soc)
DP_PRINT_STATS("successfully transmitted: %llu msdus (%llu bytes)",
pdev->stats.tx.tx_success.num,
pdev->stats.tx.tx_success.bytes);
for (i = 0; i < soc->num_tcl_data_rings; i++) {
DP_PRINT_STATS("Enqueue to SW2TCL%u: %u", i + 1,
soc->stats.tx.tcl_enq[i]);
DP_PRINT_STATS("TX completions reaped from ring %u: %u",
i, soc->stats.tx.tx_comp[i]);
}
dp_print_tx_ring_stats(soc);
DP_PRINT_STATS("Invalid release source: %u",
soc->stats.tx.invalid_release_source);
@@ -7455,19 +7845,7 @@ void dp_txrx_path_stats(struct dp_soc *soc)
pdev->stats.rx.to_stack.num,
pdev->stats.rx.to_stack.bytes);
pos = 0;
pos += qdf_scnprintf(buf + pos, buf_len - pos, "%s", "REO/msdus/bytes [");
for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
if (!pdev->stats.rx.rcvd_reo[i].num)
continue;
pos += qdf_scnprintf(buf + pos, buf_len - pos,
"%d %llu %llu, ",
i, pdev->stats.rx.rcvd_reo[i].num,
pdev->stats.rx.rcvd_reo[i].bytes);
}
pos += qdf_scnprintf(buf + pos, buf_len - pos, "%s", "]");
DP_PRINT_STATS("%s", dp_stats_str);
dp_print_rx_ring_stats(pdev);
for (i = 0; i < CDP_MAX_LMACS; i++)
DP_PRINT_STATS("received on lmac[%d] %llu msdus (%llu bytes)",
@@ -7481,80 +7859,14 @@ void dp_txrx_path_stats(struct dp_soc *soc)
pdev->stats.rx.intra_bss.fail.bytes);
DP_PRINT_STATS("intra-bss no mdns fwds %u msdus",
pdev->stats.rx.intra_bss.mdns_no_fwd);
DP_PRINT_STATS("intra-bss EAPOL drops: %u",
soc->stats.rx.err.intrabss_eapol_drop);
DP_PRINT_STATS("raw packets %llu msdus ( %llu bytes)",
pdev->stats.rx.raw.num,
pdev->stats.rx.raw.bytes);
DP_PRINT_STATS("mic errors %u",
pdev->stats.rx.err.mic_err);
DP_PRINT_STATS("Invalid peer on rx path: %llu",
pdev->soc->stats.rx.err.rx_invalid_peer.num);
DP_PRINT_STATS("sw_peer_id invalid %llu",
pdev->soc->stats.rx.err.rx_invalid_peer_id.num);
DP_PRINT_STATS("packet_len invalid %llu",
pdev->soc->stats.rx.err.rx_invalid_pkt_len.num);
DP_PRINT_STATS("sa or da idx invalid %u",
pdev->soc->stats.rx.err.invalid_sa_da_idx);
DP_PRINT_STATS("defrag peer uninit %u",
pdev->soc->stats.rx.err.defrag_peer_uninit);
DP_PRINT_STATS("pkts delivered no peer %u",
pdev->soc->stats.rx.err.pkt_delivered_no_peer);
DP_PRINT_STATS("RX invalid cookie: %d",
soc->stats.rx.err.invalid_cookie);
DP_PRINT_STATS("RX stale cookie: %d",
soc->stats.rx.err.stale_cookie);
DP_PRINT_STATS("2k jump delba sent: %u",
pdev->soc->stats.rx.err.rx_2k_jump_delba_sent);
DP_PRINT_STATS("2k jump msdu to stack: %u",
pdev->soc->stats.rx.err.rx_2k_jump_to_stack);
DP_PRINT_STATS("2k jump msdu drop: %u",
pdev->soc->stats.rx.err.rx_2k_jump_drop);
DP_PRINT_STATS("REO err oor msdu to stack %u",
pdev->soc->stats.rx.err.reo_err_oor_to_stack);
DP_PRINT_STATS("REO err oor msdu drop: %u",
pdev->soc->stats.rx.err.reo_err_oor_drop);
DP_PRINT_STATS("Rx err msdu rejected: %d",
soc->stats.rx.err.rejected);
DP_PRINT_STATS("Rx raw frame dropped: %d",
soc->stats.rx.err.raw_frm_drop);
DP_PRINT_STATS("Rx stale link desc cookie: %d",
pdev->soc->stats.rx.err.invalid_link_cookie);
DP_PRINT_STATS("Rx nbuf sanity fails: %d",
pdev->soc->stats.rx.err.nbuf_sanity_fail);
DP_PRINT_STATS("Rx refill duplicate link desc: %d",
pdev->soc->stats.rx.err.dup_refill_link_desc);
DP_PRINT_STATS("Rx ipa smmu map duplicate: %d",
pdev->soc->stats.rx.err.ipa_smmu_map_dup);
DP_PRINT_STATS("Rx ipa smmu unmap duplicate: %d",
pdev->soc->stats.rx.err.ipa_smmu_unmap_dup);
DP_PRINT_STATS("Rx ipa smmu unmap no pipes: %d",
pdev->soc->stats.rx.err.ipa_unmap_no_pipe);
DP_PRINT_STATS("PN-in-Dest error frame pn-check fail: %d",
soc->stats.rx.err.pn_in_dest_check_fail);
DP_PRINT_STATS("Reo Statistics");
DP_PRINT_STATS("near_full: %u ", soc->stats.rx.near_full);
DP_PRINT_STATS("rbm error: %u msdus",
pdev->soc->stats.rx.err.invalid_rbm);
DP_PRINT_STATS("hal ring access fail: %u msdus",
pdev->soc->stats.rx.err.hal_ring_access_fail);
DP_PRINT_STATS("hal ring access full fail: %u msdus",
pdev->soc->stats.rx.err.hal_ring_access_full_fail);
DP_PRINT_STATS("Rx BAR frames:%d", soc->stats.rx.bar_frame);
for (error_code = 0; error_code < HAL_REO_ERR_MAX;
error_code++) {
if (!pdev->soc->stats.rx.err.reo_error[error_code])
continue;
DP_PRINT_STATS("Reo error number (%u): %u msdus",
error_code,
pdev->soc->stats.rx.err
.reo_error[error_code]);
}
dp_print_rx_err_stats(soc, pdev);
for (error_code = 0; error_code < HAL_RXDMA_ERR_MAX;
error_code++) {
@@ -8001,94 +8313,6 @@ void dp_print_global_desc_count(void)
}
#endif
void
dp_print_soc_tx_stats(struct dp_soc *soc)
{
uint8_t desc_pool_id;
soc->stats.tx.desc_in_use = 0;
DP_PRINT_STATS("SOC Tx Stats:\n");
for (desc_pool_id = 0;
desc_pool_id < wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
desc_pool_id++)
soc->stats.tx.desc_in_use +=
soc->tx_desc[desc_pool_id].num_allocated;
DP_PRINT_STATS("Tx Descriptors In Use = %u",
soc->stats.tx.desc_in_use);
DP_PRINT_STATS("Tx Invalid peer:");
DP_PRINT_STATS(" Packets = %llu",
soc->stats.tx.tx_invalid_peer.num);
DP_PRINT_STATS(" Bytes = %llu",
soc->stats.tx.tx_invalid_peer.bytes);
DP_PRINT_STATS("Packets dropped due to TCL ring full = %u %u %u %u",
soc->stats.tx.tcl_ring_full[0],
soc->stats.tx.tcl_ring_full[1],
soc->stats.tx.tcl_ring_full[2],
soc->stats.tx.tcl_ring_full[3]);
DP_PRINT_STATS("Tx invalid completion release = %u",
soc->stats.tx.invalid_release_source);
DP_PRINT_STATS("TX invalid Desc from completion ring = %u",
soc->stats.tx.invalid_tx_comp_desc);
DP_PRINT_STATS("Tx comp wbm internal error = %d : [%d %d %d %d]",
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_ALL],
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER],
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC],
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF],
soc->stats.tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED]);
DP_PRINT_STATS("Tx comp non wbm internal error = %d",
soc->stats.tx.non_wbm_internal_err);
DP_PRINT_STATS("Tx comp loop pkt limit hit = %d",
soc->stats.tx.tx_comp_loop_pkt_limit_hit);
DP_PRINT_STATS("Tx comp HP out of sync2 = %d",
soc->stats.tx.hp_oos2);
dp_print_tx_ppeds_stats(soc);
}
static
int dp_fill_rx_interrupt_ctx_stats(struct dp_intr *intr_ctx,
char *buf, int buf_len)
{ int i;
int pos = 0;
if (buf_len <= 0 || !buf) {
dp_err("incorrect buf or buf_len(%d)!", buf_len);
return pos;
}
for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
if (intr_ctx->intr_stats.num_rx_ring_masks[i])
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"reo[%u]:%u ", i,
intr_ctx->intr_stats.num_rx_ring_masks[i]);
}
return pos;
}
static
int dp_fill_tx_interrupt_ctx_stats(struct dp_intr *intr_ctx,
char *buf, int buf_len)
{ int i;
int pos = 0;
if (buf_len <= 0 || !buf) {
dp_err("incorrect buf or buf_len(%d)!", buf_len);
return pos;
}
for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
if (intr_ctx->intr_stats.num_tx_ring_masks[i])
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"tx_comps[%u]:%u ", i,
intr_ctx->intr_stats.num_tx_ring_masks[i]);
}
return pos;
}
#ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
#define DP_SRNG_HIGH_WM_STATS_STRING_LEN 512
void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask)
@@ -8119,81 +8343,6 @@ void dp_dump_srng_high_wm_stats(struct dp_soc *soc, uint64_t srng_mask)
}
#endif
#define DP_INT_CTX_STATS_STRING_LEN 512
void dp_print_soc_interrupt_stats(struct dp_soc *soc)
{
char *buf;
char int_ctx_str[DP_INT_CTX_STATS_STRING_LEN] = {'\0'};
int i, pos, buf_len;
struct dp_intr_stats *intr_stats;
buf = int_ctx_str;
buf_len = DP_INT_CTX_STATS_STRING_LEN;
for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
pos = 0;
qdf_mem_zero(int_ctx_str, sizeof(int_ctx_str));
intr_stats = &soc->intr_ctx[i].intr_stats;
if (!intr_stats->num_masks && !intr_stats->num_near_full_masks)
continue;
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"%2u[%3d] - Total:%u ",
i,
hif_get_int_ctx_irq_num(soc->hif_handle,
i),
intr_stats->num_masks);
if (soc->intr_ctx[i].tx_ring_mask)
pos += dp_fill_tx_interrupt_ctx_stats(&soc->intr_ctx[i],
buf + pos,
buf_len - pos);
if (soc->intr_ctx[i].rx_ring_mask)
pos += dp_fill_rx_interrupt_ctx_stats(&soc->intr_ctx[i],
buf + pos,
buf_len - pos);
if (soc->intr_ctx[i].rx_err_ring_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"reo_err:%u ",
intr_stats->num_rx_err_ring_masks);
if (soc->intr_ctx[i].rx_wbm_rel_ring_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"wbm_rx_err:%u ",
intr_stats->num_rx_wbm_rel_ring_masks);
if (soc->intr_ctx[i].rxdma2host_ring_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"rxdma2_host_err:%u ",
intr_stats->num_rxdma2host_ring_masks);
if (soc->intr_ctx[i].rx_near_full_grp_1_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"rx_near_full_grp_1:%u ",
intr_stats->num_near_full_masks);
if (soc->intr_ctx[i].rx_near_full_grp_2_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"rx_near_full_grp_2:%u ",
intr_stats->num_near_full_masks);
if (soc->intr_ctx[i].tx_ring_near_full_mask)
pos += qdf_scnprintf(buf + pos,
buf_len - pos,
"tx_near_full:%u ",
intr_stats->num_near_full_masks);
dp_info("%s", int_ctx_str);
}
}
void
dp_print_soc_rx_stats(struct dp_soc *soc)
{

View File

@@ -623,11 +623,6 @@ void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
hal_debug("REO2SW3: %s",
hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
}
#else
void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
{
}
#endif
void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
{
@@ -647,6 +642,16 @@ void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
hist[REG_WRITE_SCHED_DELAY_SUB_5000us],
hist[REG_WRITE_SCHED_DELAY_GT_5000us]);
}
#else
void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
{
}
/* TODO: Need separate logic for Evros */
void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
{
}
#endif
int hal_get_reg_write_pending_work(void *hal_soc)
{

View File

@@ -2949,4 +2949,5 @@ static inline void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
{
}
#endif
void hif_ce_print_ring_stats(struct hif_opaque_softc *hif_ctx);
#endif /* _HIF_H_ */

View File

@@ -1645,6 +1645,50 @@ static inline void ce_update_wrt_idx_offset(struct hif_softc *scn,
else
QDF_BUG(0);
}
/*
* hif_ce_print_ring_stats() - Print ce ring statistics
*
* @hif_ctx: hif context
*
* Returns: None
*/
void hif_ce_print_ring_stats(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
struct CE_state *ce_state;
int i;
for (i = 0; i < scn->ce_count; i++) {
ce_state = scn->ce_id_to_state[i];
if (!ce_state)
continue;
if (ce_state->src_ring) {
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
"ce%d:SW: sw_index %u write_index %u",
ce_state->src_ring->sw_index,
ce_state->src_ring->write_index);
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
"ce%d:HW: read_index %u write_index %u",
CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr),
CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr));
}
if (ce_state->dest_ring) {
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
"ce%d:SW: sw_index %u write_index %u",
ce_state->dest_ring->sw_index,
ce_state->dest_ring->write_index);
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
"ce%d:HW: read_index %u write_index %u",
CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr),
CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr));
}
}
}
#endif
/**

View File

@@ -197,6 +197,7 @@ void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
}
#endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
#ifndef QCA_WIFI_WCN6450
/**
* hif_print_napi_latency_stats() - print NAPI scheduling latency stats
* @hif_state: hif context
@@ -301,6 +302,97 @@ static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
"%u|", stats->poll_time_buckets[i]);
}
void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
{
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
struct hif_exec_context *hif_ext_group;
struct qca_napi_stat *napi_stats;
int i, j;
/*
* Max value of uint_32 (poll_time_bucket) = 4294967295
* Thus we need 10 chars + 1 space =11 chars for each bucket value.
* +1 space for '\0'.
*/
char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
"NAPI[#]CPU[#] |scheds |polls |comps |dones |t-lim |max(us)|hist(500us buckets)");
for (i = 0;
(i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
i++) {
hif_ext_group = hif_state->hif_ext_group[i];
for (j = 0; j < num_possible_cpus(); j++) {
napi_stats = &hif_ext_group->stats[j];
if (!napi_stats->napi_schedules)
continue;
hif_get_poll_times_hist_str(napi_stats,
hist_str,
sizeof(hist_str));
QDF_TRACE(QDF_MODULE_ID_HIF,
QDF_TRACE_LEVEL_INFO_HIGH,
"NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
i, j,
napi_stats->napi_schedules,
napi_stats->napi_polls,
napi_stats->napi_completes,
napi_stats->napi_workdone,
napi_stats->time_limit_reached,
qdf_do_div(napi_stats->napi_max_poll_time,
1000),
hist_str);
}
}
hif_print_napi_latency_stats(hif_state);
}
qdf_export_symbol(hif_print_napi_stats);
#else
static inline
void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
uint8_t buf_len)
{
}
void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
{
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
struct hif_exec_context *hif_ext_group;
struct qca_napi_stat *napi_stats;
int i, j;
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
for (i = 0; i < hif_state->hif_num_extgroup; i++) {
if (hif_state->hif_ext_group[i]) {
hif_ext_group = hif_state->hif_ext_group[i];
for (j = 0; j < num_possible_cpus(); j++) {
napi_stats = &(hif_ext_group->stats[j]);
if (napi_stats->napi_schedules != 0)
QDF_TRACE(QDF_MODULE_ID_HIF,
QDF_TRACE_LEVEL_FATAL,
"NAPI[%2d]CPU[%d]: "
"%7d %7d %7d %7d ",
i, j,
napi_stats->napi_schedules,
napi_stats->napi_polls,
napi_stats->napi_completes,
napi_stats->napi_workdone);
}
}
}
hif_print_napi_latency_stats(hif_state);
}
qdf_export_symbol(hif_print_napi_stats);
#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
#endif /* QCA_WIFI_WCN6450 */
#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
/**
* hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
* @hif_ext_group: hif_ext_group of type NAPI
@@ -393,63 +485,7 @@ void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
hif_ext_group->poll_start_time = qdf_time_sched_clock();
}
void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
{
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
struct hif_exec_context *hif_ext_group;
struct qca_napi_stat *napi_stats;
int i, j;
/*
* Max value of uint_32 (poll_time_bucket) = 4294967295
* Thus we need 10 chars + 1 space =11 chars for each bucket value.
* +1 space for '\0'.
*/
char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
"NAPI[#]CPU[#] |scheds |polls |comps |dones |t-lim |max(us)|hist(500us buckets)");
for (i = 0;
(i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
i++) {
hif_ext_group = hif_state->hif_ext_group[i];
for (j = 0; j < num_possible_cpus(); j++) {
napi_stats = &hif_ext_group->stats[j];
if (!napi_stats->napi_schedules)
continue;
hif_get_poll_times_hist_str(napi_stats,
hist_str,
sizeof(hist_str));
QDF_TRACE(QDF_MODULE_ID_HIF,
QDF_TRACE_LEVEL_INFO_HIGH,
"NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
i, j,
napi_stats->napi_schedules,
napi_stats->napi_polls,
napi_stats->napi_completes,
napi_stats->napi_workdone,
napi_stats->time_limit_reached,
qdf_do_div(napi_stats->napi_max_poll_time,
1000),
hist_str);
}
}
hif_print_napi_latency_stats(hif_state);
}
qdf_export_symbol(hif_print_napi_stats);
#else
static inline
void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
uint8_t buf_len)
{
}
static inline
void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
{
@@ -459,39 +495,6 @@ static inline
void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
{
}
void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
{
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
struct hif_exec_context *hif_ext_group;
struct qca_napi_stat *napi_stats;
int i, j;
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
for (i = 0; i < hif_state->hif_num_extgroup; i++) {
if (hif_state->hif_ext_group[i]) {
hif_ext_group = hif_state->hif_ext_group[i];
for (j = 0; j < num_possible_cpus(); j++) {
napi_stats = &(hif_ext_group->stats[j]);
if (napi_stats->napi_schedules != 0)
QDF_TRACE(QDF_MODULE_ID_HIF,
QDF_TRACE_LEVEL_FATAL,
"NAPI[%2d]CPU[%d]: "
"%7d %7d %7d %7d ",
i, j,
napi_stats->napi_schedules,
napi_stats->napi_polls,
napi_stats->napi_completes,
napi_stats->napi_workdone);
}
}
}
hif_print_napi_latency_stats(hif_state);
}
qdf_export_symbol(hif_print_napi_stats);
#endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)

View File

@@ -749,7 +749,7 @@ inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
hif_irq_enable(scn, NAPI_ID2PIPE(id));
}
#ifdef HIF_LATENCY_PROFILE_ENABLE
#if defined(QCA_WIFI_WCN6450) && defined(HIF_LATENCY_PROFILE_ENABLE)
/*
* hif_napi_latency_profile_start() - update the schedule start timestamp
*
@@ -807,6 +807,42 @@ static void hif_napi_latency_profile_measure(struct qca_napi_info *napi_info)
else
napi_info->sched_latency_stats[7]++;
}
static void hif_print_napi_latency_stats(struct qca_napi_info *napii, int ce_id)
{
int i;
int64_t cur_tstamp;
const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] = {
"0-2 ms",
"3-10 ms",
"11-20 ms",
"21-50 ms",
"51-100 ms",
"101-250 ms",
"251-500 ms",
"> 500 ms"
};
cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
"Current timestamp: %lld", cur_tstamp);
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
"ce id %d Last serviced timestamp: %lld",
ce_id, napii->tstamp);
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
"Latency Bucket | Time elapsed");
for (i = 0; i < HIF_SCHED_LATENCY_BUCKETS; i++)
QDF_TRACE(QDF_MODULE_ID_HIF,
QDF_TRACE_LEVEL_INFO_HIGH,
"%s | %lld",
time_str[i],
napii->sched_latency_stats[i]);
}
#else
static inline void
hif_napi_latency_profile_start(struct hif_softc *scn, int ce_id)
@@ -817,8 +853,14 @@ static inline void
hif_napi_latency_profile_measure(struct qca_napi_info *napi_info)
{
}
static inline void
hif_print_napi_latency_stats(struct qca_napi_info *napii, int ce_id)
{
}
#endif
#ifdef QCA_WIFI_WCN6450
#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
/**
* hif_napi_update_service_start_time() - Update NAPI poll start time
@@ -868,6 +910,137 @@ static void hif_napi_fill_poll_time_histogram(struct qca_napi_info *napi_info)
++napi_stat->poll_time_buckets[bucket];
}
/*
* hif_get_poll_times_hist_str() - Get HIF poll times histogram string
* @stats: NAPI stats to get poll time buckets
* @buf: buffer to fill histogram string
* @buf_len: length of the buffer
*
* Return: void
*/
static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
uint8_t buf_len)
{
int i;
int str_index = 0;
for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
"%u|", stats->poll_time_buckets[i]);
}
void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
struct qca_napi_info *napii;
struct qca_napi_stat *napi_stats;
int ce_id, cpu;
/*
* Max value of uint_32 (poll_time_bucket) = 4294967295
* Thus we need 10 chars + 1 space =11 chars for each bucket value.
* +1 space for '\0'.
*/
char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
"NAPI[#]CPU[#] |scheds |polls |comps |dones |t-lim |max(us)|hist(500us buckets)");
for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
if (!hif_napi_enabled(hif_ctx, ce_id))
continue;
napii = scn->napi_data.napis[ce_id];
if (napii) {
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
napi_stats = &napii->stats[cpu];
hif_get_poll_times_hist_str(napi_stats,
hist_str,
sizeof(hist_str));
if (napi_stats->napi_schedules != 0)
QDF_TRACE(QDF_MODULE_ID_HIF,
QDF_TRACE_LEVEL_INFO_HIGH,
"NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
ce_id, cpu,
napi_stats->napi_schedules,
napi_stats->napi_polls,
napi_stats->napi_completes,
napi_stats->napi_workdone,
qdf_do_div(napi_stats->napi_max_poll_time, 1000),
hist_str);
}
hif_print_napi_latency_stats(napii, ce_id);
}
}
}
#else
static inline void
hif_napi_update_service_start_time(struct qca_napi_info *napi_info)
{
}
static inline void
hif_napi_fill_poll_time_histogram(struct qca_napi_info *napi_info)
{
}
void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
struct qca_napi_info *napii;
struct qca_napi_stat *napi_stats;
int ce_id, cpu;
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
if (!hif_napi_enabled(hif_ctx, ce_id))
continue;
napii = scn->napi_data.napis[ce_id];
if (napii) {
for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
napi_stats = &napii->stats[cpu];
if (napi_stats->napi_schedules != 0)
QDF_TRACE(QDF_MODULE_ID_HIF,
QDF_TRACE_LEVEL_FATAL,
"NAPI[%2d]CPU[%d]: "
"%7d %7d %7d %7d ",
ce_id, cpu,
napi_stats->napi_schedules,
napi_stats->napi_polls,
napi_stats->napi_completes,
napi_stats->napi_workdone);
}
hif_print_napi_latency_stats(napii, ce_id);
}
}
}
#endif
void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
struct qca_napi_info *napii;
int ce_id;
for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
if (!hif_napi_enabled(hif_ctx, ce_id))
continue;
napii = scn->napi_data.napis[ce_id];
if (napii)
qdf_mem_set(napii->sched_latency_stats,
sizeof(napii->sched_latency_stats), 0);
}
}
#else
static inline void
hif_napi_update_service_start_time(struct qca_napi_info *napi_info)