qcacmn: Add support for rx stats from REO block

Add support for rx stats from REO block in IPA use case.

Change-Id: I90a6f57507838645dea9095615f378bd5a3a6ffc
CRs-Fixed: 3231322
此提交包含在:
Amrit Sahai
2022-06-28 20:19:19 +05:30
提交者 Madan Koyyalamudi
父節點 08067d2b67
當前提交 9ea6f8dc13
共有 4 個檔案被更改,包括 191 行新增8 行删除

查看文件

@@ -2518,6 +2518,51 @@ map_detach:
}
#endif
#ifdef IPA_OFFLOAD
/*
* dp_peer_update_tid_stats_from_reo() - update rx pkt and byte count from reo
* @soc - soc handle
* @cb_ctxt - combination of peer_id and tid
* @reo_status - reo status
*
* return: void
*/
void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status)
{
struct dp_peer *peer = NULL;
struct dp_rx_tid *rx_tid = NULL;
unsigned long comb_peer_id_tid;
struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
uint16_t tid;
uint16_t peer_id;
if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
dp_err("REO stats failure %d\n",
queue_status->header.status);
return;
}
comb_peer_id_tid = (unsigned long)cb_ctxt;
tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
if (!peer)
return;
rx_tid = &peer->rx_tid[tid];
if (!rx_tid) {
dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
return;
}
rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
}
qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
#endif
void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
union hal_reo_status *reo_status)
{
@@ -5571,6 +5616,59 @@ void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
}
#endif
#ifdef IPA_OFFLOAD
int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
{
struct dp_soc *soc = peer->vdev->pdev->soc;
struct hal_reo_cmd_params params;
int i;
int stats_cmd_sent_cnt = 0;
QDF_STATUS status;
uint16_t peer_id = peer->peer_id;
if (!dp_stats_cmd_cb)
return stats_cmd_sent_cnt;
qdf_mem_zero(&params, sizeof(params));
for (i = 0; i < DP_MAX_TIDS; i++) {
struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
if (rx_tid->hw_qdesc_vaddr_unaligned) {
params.std.need_status = 1;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.stats_params.clear = 1;
dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb,
(void *)((i << DP_PEER_REO_STATS_TID_SHIFT)
| peer_id));
if (QDF_IS_STATUS_SUCCESS(status))
stats_cmd_sent_cnt++;
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging
*/
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
}
}
return stats_cmd_sent_cnt;
}
qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
#endif
/**
* dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
* @peer: DP peer handle