qcacmn: Optimize RX hw stats flow

Only 8 RX data TIDs are used, so HW stats query only needs to update
these TIDs info to save command executing time.

Change-Id: I04b65d19ddb3a9e43cb96b266708272769f7dcf2
CRs-Fixed: 3348771
这个提交包含在:
Yu Tian
2022-11-29 22:41:13 -08:00
提交者 Madan Koyyalamudi
父节点 e5534b1953
当前提交 6d7d7fcc42

查看文件

@@ -5868,7 +5868,6 @@ int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
uint16_t peer_id = peer->peer_id;
unsigned long comb_peer_id_tid;
struct dp_rx_tid *rx_tid;
int last_tid = 0;
if (!dp_stats_cmd_cb)
return stats_cmd_sent_cnt;
@@ -5891,38 +5890,24 @@ int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
&params, dp_stats_cmd_cb,
(void *)comb_peer_id_tid);
if (QDF_IS_STATUS_SUCCESS(status)) {
if (QDF_IS_STATUS_SUCCESS(status))
stats_cmd_sent_cnt++;
last_tid = i;
}
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging
*/
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
}
}
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging
*/
rx_tid = &peer->rx_tid[last_tid];
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
params.u.fl_cache_params.flush_entire_cache = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.unblk_cache_params.type = UNBLOCK_CACHE;
dp_reo_send_cmd(soc, CMD_UNBLOCK_CACHE, &params, NULL,
NULL);
return stats_cmd_sent_cnt;
}
@@ -5947,7 +5932,6 @@ int dp_peer_rxtid_stats(struct dp_peer *peer,
int stats_cmd_sent_cnt = 0;
QDF_STATUS status;
struct dp_rx_tid *rx_tid;
int last_tid = 0;
if (!dp_stats_cmd_cb)
return stats_cmd_sent_cnt;
@@ -5977,38 +5961,25 @@ int dp_peer_rxtid_stats(struct dp_peer *peer,
rx_tid);
}
if (QDF_IS_STATUS_SUCCESS(status)) {
if (QDF_IS_STATUS_SUCCESS(status))
stats_cmd_sent_cnt++;
last_tid = i;
}
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging
*/
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
}
}
/* Flush REO descriptor from HW cache to update stats
* in descriptor memory. This is to help debugging
*/
rx_tid = &peer->rx_tid[last_tid];
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.fl_cache_params.flush_no_inval = 1;
params.u.fl_cache_params.flush_entire_cache = 1;
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
NULL);
qdf_mem_zero(&params, sizeof(params));
params.std.need_status = 0;
params.std.addr_lo =
rx_tid->hw_qdesc_paddr & 0xffffffff;
params.std.addr_hi =
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
params.u.unblk_cache_params.type = UNBLOCK_CACHE;
dp_reo_send_cmd(soc, CMD_UNBLOCK_CACHE, &params, NULL,
NULL);
return stats_cmd_sent_cnt;
}