qcacmn: Optimize RX hw stats flow

Only 8 RX data TIDs are used, so HW stats query only needs to update
these TIDs info to save command executing time. Change is aimed to
reduce the TID number and use only one REO flush command for overall
REO cache flush.

Change-Id: Ib642451b9f07f62198126c45644742e1908280cd
CRs-Fixed: 3325964
这个提交包含在:
Yu Tian
2022-11-07 22:27:21 -08:00
提交者 Madan Koyyalamudi
父节点 5d0b7ea33f
当前提交 5a6d2c2374
修改 4 个文件,包含 84 行新增31 行删除

查看文件

@@ -5867,14 +5867,18 @@ int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
QDF_STATUS status;
uint16_t peer_id = peer->peer_id;
unsigned long comb_peer_id_tid;
struct dp_rx_tid *rx_tid;
int last_tid = 0;
if (!dp_stats_cmd_cb)
return stats_cmd_sent_cnt;
qdf_mem_zero(&params, sizeof(params));
for (i = 0; i < DP_MAX_TIDS; i++) {
struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
continue;
rx_tid = &peer->rx_tid[i];
if (rx_tid->hw_qdesc_vaddr_unaligned) {
params.std.need_status = 1;
params.std.addr_lo =
@@ -5887,24 +5891,38 @@ int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb,
(void *)comb_peer_id_tid);
if (QDF_IS_STATUS_SUCCESS(status))
if (QDF_IS_STATUS_SUCCESS(status)) {
stats_cmd_sent_cnt++;
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging
*/
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
last_tid = i;
}
}
}
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging
*/
rx_tid = &peer->rx_tid[last_tid];
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
params.u.fl_cache_params.flush_entire_cache = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.unblk_cache_params.type = UNBLOCK_CACHE;
dp_reo_send_cmd(soc, CMD_UNBLOCK_CACHE, &params, NULL,
NULL);
return stats_cmd_sent_cnt;
}
@@ -5928,13 +5946,18 @@ int dp_peer_rxtid_stats(struct dp_peer *peer,
int i;
int stats_cmd_sent_cnt = 0;
QDF_STATUS status;
struct dp_rx_tid *rx_tid;
int last_tid = 0;
if (!dp_stats_cmd_cb)
return stats_cmd_sent_cnt;
qdf_mem_zero(&params, sizeof(params));
for (i = 0; i < DP_MAX_TIDS; i++) {
struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
continue;
rx_tid = &peer->rx_tid[i];
if (rx_tid->hw_qdesc_vaddr_unaligned) {
params.std.need_status = 1;
params.std.addr_lo =
@@ -5954,23 +5977,38 @@ int dp_peer_rxtid_stats(struct dp_peer *peer,
rx_tid);
}
if (QDF_IS_STATUS_SUCCESS(status))
if (QDF_IS_STATUS_SUCCESS(status)) {
stats_cmd_sent_cnt++;
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging */
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
last_tid = i;
}
}
}
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging
*/
rx_tid = &peer->rx_tid[last_tid];
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
params.u.fl_cache_params.flush_entire_cache = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.unblk_cache_params.type = UNBLOCK_CACHE;
dp_reo_send_cmd(soc, CMD_UNBLOCK_CACHE, &params, NULL,
NULL);
return stats_cmd_sent_cnt;
}