qcacmn: Optimize RX hw stats flow
Only 8 RX data TIDs are used, so HW stats query only needs to update these TIDs info to save command executing time. Change is aimed to reduce the TID number and use only one REO flush command for overall REO cache flush. Change-Id: Ib642451b9f07f62198126c45644742e1908280cd CRs-Fixed: 3325964
This commit is contained in:

committed by
Madan Koyyalamudi

parent
5d0b7ea33f
commit
5a6d2c2374
@@ -14549,7 +14549,7 @@ dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
|
|||||||
|
|
||||||
#ifdef WLAN_FEATURE_STATS_EXT
|
#ifdef WLAN_FEATURE_STATS_EXT
|
||||||
/* rx hw stats event wait timeout in ms */
|
/* rx hw stats event wait timeout in ms */
|
||||||
#define DP_REO_STATUS_STATS_TIMEOUT 1500
|
#define DP_REO_STATUS_STATS_TIMEOUT 850
|
||||||
/**
|
/**
|
||||||
* dp_txrx_ext_stats_request - request dp txrx extended stats request
|
* dp_txrx_ext_stats_request - request dp txrx extended stats request
|
||||||
* @soc_hdl: soc handle
|
* @soc_hdl: soc handle
|
||||||
@@ -14690,6 +14690,7 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
|
|||||||
last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
|
last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
|
||||||
soc->ext_stats.rx_mpdu_received = 0;
|
soc->ext_stats.rx_mpdu_received = 0;
|
||||||
|
|
||||||
|
dp_debug("HW stats query start");
|
||||||
rx_stats_sent_cnt =
|
rx_stats_sent_cnt =
|
||||||
dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
|
dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
|
||||||
if (!rx_stats_sent_cnt) {
|
if (!rx_stats_sent_cnt) {
|
||||||
@@ -14707,10 +14708,13 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
|
|||||||
|
|
||||||
status = qdf_wait_single_event(&soc->rx_hw_stats_event,
|
status = qdf_wait_single_event(&soc->rx_hw_stats_event,
|
||||||
DP_REO_STATUS_STATS_TIMEOUT);
|
DP_REO_STATUS_STATS_TIMEOUT);
|
||||||
|
dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
|
||||||
|
|
||||||
qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
|
qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
|
||||||
if (status != QDF_STATUS_SUCCESS) {
|
if (status != QDF_STATUS_SUCCESS) {
|
||||||
dp_info("rx hw stats event timeout");
|
dp_info("partial rx hw stats event collected with %d",
|
||||||
|
qdf_atomic_read(
|
||||||
|
&rx_hw_stats->pending_tid_stats_cnt));
|
||||||
if (soc->is_last_stats_ctx_init)
|
if (soc->is_last_stats_ctx_init)
|
||||||
rx_hw_stats->is_query_timeout = true;
|
rx_hw_stats->is_query_timeout = true;
|
||||||
/**
|
/**
|
||||||
@@ -14719,6 +14723,8 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
|
|||||||
*/
|
*/
|
||||||
soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
|
soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
|
||||||
soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
|
soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
|
||||||
|
DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
|
||||||
|
|
||||||
}
|
}
|
||||||
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
|
qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
|
||||||
|
|
||||||
@@ -14727,6 +14733,7 @@ out:
|
|||||||
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
|
dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
|
||||||
if (vdev)
|
if (vdev)
|
||||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
|
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
|
||||||
|
DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
@@ -5867,14 +5867,18 @@ int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
|
|||||||
QDF_STATUS status;
|
QDF_STATUS status;
|
||||||
uint16_t peer_id = peer->peer_id;
|
uint16_t peer_id = peer->peer_id;
|
||||||
unsigned long comb_peer_id_tid;
|
unsigned long comb_peer_id_tid;
|
||||||
|
struct dp_rx_tid *rx_tid;
|
||||||
|
int last_tid = 0;
|
||||||
|
|
||||||
if (!dp_stats_cmd_cb)
|
if (!dp_stats_cmd_cb)
|
||||||
return stats_cmd_sent_cnt;
|
return stats_cmd_sent_cnt;
|
||||||
|
|
||||||
qdf_mem_zero(¶ms, sizeof(params));
|
qdf_mem_zero(¶ms, sizeof(params));
|
||||||
for (i = 0; i < DP_MAX_TIDS; i++) {
|
for (i = 0; i < DP_MAX_TIDS; i++) {
|
||||||
struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
|
if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
rx_tid = &peer->rx_tid[i];
|
||||||
if (rx_tid->hw_qdesc_vaddr_unaligned) {
|
if (rx_tid->hw_qdesc_vaddr_unaligned) {
|
||||||
params.std.need_status = 1;
|
params.std.need_status = 1;
|
||||||
params.std.addr_lo =
|
params.std.addr_lo =
|
||||||
@@ -5887,24 +5891,38 @@ int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
|
|||||||
status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
|
status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
|
||||||
¶ms, dp_stats_cmd_cb,
|
¶ms, dp_stats_cmd_cb,
|
||||||
(void *)comb_peer_id_tid);
|
(void *)comb_peer_id_tid);
|
||||||
if (QDF_IS_STATUS_SUCCESS(status))
|
if (QDF_IS_STATUS_SUCCESS(status)) {
|
||||||
stats_cmd_sent_cnt++;
|
stats_cmd_sent_cnt++;
|
||||||
|
last_tid = i;
|
||||||
/* Flush REO descriptor from HW cache to update stats
|
}
|
||||||
* in descriptor memory. This is to help debugging
|
|
||||||
*/
|
|
||||||
qdf_mem_zero(¶ms, sizeof(params));
|
|
||||||
params.std.need_status = 0;
|
|
||||||
params.std.addr_lo =
|
|
||||||
rx_tid->hw_qdesc_paddr & 0xffffffff;
|
|
||||||
params.std.addr_hi =
|
|
||||||
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
|
|
||||||
params.u.fl_cache_params.flush_no_inval = 1;
|
|
||||||
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL,
|
|
||||||
NULL);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Flush REO descriptor from HW cache to update stats
|
||||||
|
* in descriptor memory. This is to help debugging
|
||||||
|
*/
|
||||||
|
rx_tid = &peer->rx_tid[last_tid];
|
||||||
|
qdf_mem_zero(¶ms, sizeof(params));
|
||||||
|
params.std.need_status = 0;
|
||||||
|
params.std.addr_lo =
|
||||||
|
rx_tid->hw_qdesc_paddr & 0xffffffff;
|
||||||
|
params.std.addr_hi =
|
||||||
|
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
|
||||||
|
params.u.fl_cache_params.flush_no_inval = 1;
|
||||||
|
params.u.fl_cache_params.flush_entire_cache = 1;
|
||||||
|
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL,
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
qdf_mem_zero(¶ms, sizeof(params));
|
||||||
|
params.std.need_status = 0;
|
||||||
|
params.std.addr_lo =
|
||||||
|
rx_tid->hw_qdesc_paddr & 0xffffffff;
|
||||||
|
params.std.addr_hi =
|
||||||
|
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
|
||||||
|
params.u.unblk_cache_params.type = UNBLOCK_CACHE;
|
||||||
|
dp_reo_send_cmd(soc, CMD_UNBLOCK_CACHE, ¶ms, NULL,
|
||||||
|
NULL);
|
||||||
|
|
||||||
return stats_cmd_sent_cnt;
|
return stats_cmd_sent_cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5928,13 +5946,18 @@ int dp_peer_rxtid_stats(struct dp_peer *peer,
|
|||||||
int i;
|
int i;
|
||||||
int stats_cmd_sent_cnt = 0;
|
int stats_cmd_sent_cnt = 0;
|
||||||
QDF_STATUS status;
|
QDF_STATUS status;
|
||||||
|
struct dp_rx_tid *rx_tid;
|
||||||
|
int last_tid = 0;
|
||||||
|
|
||||||
if (!dp_stats_cmd_cb)
|
if (!dp_stats_cmd_cb)
|
||||||
return stats_cmd_sent_cnt;
|
return stats_cmd_sent_cnt;
|
||||||
|
|
||||||
qdf_mem_zero(¶ms, sizeof(params));
|
qdf_mem_zero(¶ms, sizeof(params));
|
||||||
for (i = 0; i < DP_MAX_TIDS; i++) {
|
for (i = 0; i < DP_MAX_TIDS; i++) {
|
||||||
struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
|
if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
rx_tid = &peer->rx_tid[i];
|
||||||
if (rx_tid->hw_qdesc_vaddr_unaligned) {
|
if (rx_tid->hw_qdesc_vaddr_unaligned) {
|
||||||
params.std.need_status = 1;
|
params.std.need_status = 1;
|
||||||
params.std.addr_lo =
|
params.std.addr_lo =
|
||||||
@@ -5954,23 +5977,38 @@ int dp_peer_rxtid_stats(struct dp_peer *peer,
|
|||||||
rx_tid);
|
rx_tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (QDF_IS_STATUS_SUCCESS(status))
|
if (QDF_IS_STATUS_SUCCESS(status)) {
|
||||||
stats_cmd_sent_cnt++;
|
stats_cmd_sent_cnt++;
|
||||||
|
last_tid = i;
|
||||||
/* Flush REO descriptor from HW cache to update stats
|
}
|
||||||
* in descriptor memory. This is to help debugging */
|
|
||||||
qdf_mem_zero(¶ms, sizeof(params));
|
|
||||||
params.std.need_status = 0;
|
|
||||||
params.std.addr_lo =
|
|
||||||
rx_tid->hw_qdesc_paddr & 0xffffffff;
|
|
||||||
params.std.addr_hi =
|
|
||||||
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
|
|
||||||
params.u.fl_cache_params.flush_no_inval = 1;
|
|
||||||
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL,
|
|
||||||
NULL);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Flush REO descriptor from HW cache to update stats
|
||||||
|
* in descriptor memory. This is to help debugging
|
||||||
|
*/
|
||||||
|
rx_tid = &peer->rx_tid[last_tid];
|
||||||
|
qdf_mem_zero(¶ms, sizeof(params));
|
||||||
|
params.std.need_status = 0;
|
||||||
|
params.std.addr_lo =
|
||||||
|
rx_tid->hw_qdesc_paddr & 0xffffffff;
|
||||||
|
params.std.addr_hi =
|
||||||
|
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
|
||||||
|
params.u.fl_cache_params.flush_no_inval = 1;
|
||||||
|
params.u.fl_cache_params.flush_entire_cache = 1;
|
||||||
|
dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, ¶ms, NULL,
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
qdf_mem_zero(¶ms, sizeof(params));
|
||||||
|
params.std.need_status = 0;
|
||||||
|
params.std.addr_lo =
|
||||||
|
rx_tid->hw_qdesc_paddr & 0xffffffff;
|
||||||
|
params.std.addr_hi =
|
||||||
|
(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
|
||||||
|
params.u.unblk_cache_params.type = UNBLOCK_CACHE;
|
||||||
|
dp_reo_send_cmd(soc, CMD_UNBLOCK_CACHE, ¶ms, NULL,
|
||||||
|
NULL);
|
||||||
|
|
||||||
return stats_cmd_sent_cnt;
|
return stats_cmd_sent_cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -8004,6 +8004,10 @@ dp_print_soc_rx_stats(struct dp_soc *soc)
|
|||||||
DP_PRINT_STATS("Reo2rel route drop:%d",
|
DP_PRINT_STATS("Reo2rel route drop:%d",
|
||||||
soc->stats.rx.reo2rel_route_drop);
|
soc->stats.rx.reo2rel_route_drop);
|
||||||
DP_PRINT_STATS("Rx Flush count:%d", soc->stats.rx.err.rx_flush_count);
|
DP_PRINT_STATS("Rx Flush count:%d", soc->stats.rx.err.rx_flush_count);
|
||||||
|
DP_PRINT_STATS("RX HW stats request count:%d",
|
||||||
|
soc->stats.rx.rx_hw_stats_requested);
|
||||||
|
DP_PRINT_STATS("RX HW stats request timeout:%d",
|
||||||
|
soc->stats.rx.rx_hw_stats_timeout);
|
||||||
DP_PRINT_STATS("Rx invalid TID count:%d",
|
DP_PRINT_STATS("Rx invalid TID count:%d",
|
||||||
soc->stats.rx.err.rx_invalid_tid_err);
|
soc->stats.rx.err.rx_invalid_tid_err);
|
||||||
DP_PRINT_STATS("Rx Defrag Address1 Invalid:%d",
|
DP_PRINT_STATS("Rx Defrag Address1 Invalid:%d",
|
||||||
|
@@ -1151,6 +1151,10 @@ struct dp_soc_stats {
|
|||||||
/* Number of frames routed from reo*/
|
/* Number of frames routed from reo*/
|
||||||
uint32_t reo2rel_route_drop;
|
uint32_t reo2rel_route_drop;
|
||||||
uint64_t fast_recycled;
|
uint64_t fast_recycled;
|
||||||
|
/* Number of hw stats requested */
|
||||||
|
uint32_t rx_hw_stats_requested;
|
||||||
|
/* Number of hw stats request timeout */
|
||||||
|
uint32_t rx_hw_stats_timeout;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
/* Invalid RBM error count */
|
/* Invalid RBM error count */
|
||||||
|
Reference in New Issue
Block a user