ソースを参照

qcacmn: Optimize RX hw stats flow

Only 8 RX data TIDs are used, so HW stats query only needs to update
these TIDs info to save command executing time. Change is aimed to
reduce the TID number and use only one REO flush command for overall
REO cache flush.

Change-Id: Ib642451b9f07f62198126c45644742e1908280cd
CRs-Fixed: 3325964
Yu Tian 2 年 前
コミット
5a6d2c2374
4 ファイル変更84 行追加31 行削除
  1. 9 2
      dp/wifi3.0/dp_main.c
  2. 67 29
      dp/wifi3.0/dp_peer.c
  3. 4 0
      dp/wifi3.0/dp_stats.c
  4. 4 0
      dp/wifi3.0/dp_types.h

+ 9 - 2
dp/wifi3.0/dp_main.c

@@ -14549,7 +14549,7 @@ dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
 
 #ifdef WLAN_FEATURE_STATS_EXT
 /* rx hw stats event wait timeout in ms */
-#define DP_REO_STATUS_STATS_TIMEOUT 1500
+#define DP_REO_STATUS_STATS_TIMEOUT 850
 /**
  * dp_txrx_ext_stats_request - request dp txrx extended stats request
  * @soc_hdl: soc handle
@@ -14690,6 +14690,7 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
 	soc->ext_stats.rx_mpdu_received = 0;
 
+	dp_debug("HW stats query start");
 	rx_stats_sent_cnt =
 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
 	if (!rx_stats_sent_cnt) {
@@ -14707,10 +14708,13 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
 
 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
 				       DP_REO_STATUS_STATS_TIMEOUT);
+	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
 
 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
 	if (status != QDF_STATUS_SUCCESS) {
-		dp_info("rx hw stats event timeout");
+		dp_info("partial rx hw stats event collected with %d",
+			qdf_atomic_read(
+				&rx_hw_stats->pending_tid_stats_cnt));
 		if (soc->is_last_stats_ctx_init)
 			rx_hw_stats->is_query_timeout = true;
 		/**
@@ -14719,6 +14723,8 @@ dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
 		 */
 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
+		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
+
 	}
 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
 
@@ -14727,6 +14733,7 @@ out:
 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
 	if (vdev)
 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
+	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
 
 	return status;
 }

+ 67 - 29
dp/wifi3.0/dp_peer.c

@@ -5867,14 +5867,18 @@ int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
 	QDF_STATUS status;
 	uint16_t peer_id = peer->peer_id;
 	unsigned long comb_peer_id_tid;
+	struct dp_rx_tid *rx_tid;
+	int last_tid = 0;
 
 	if (!dp_stats_cmd_cb)
 		return stats_cmd_sent_cnt;
 
 	qdf_mem_zero(&params, sizeof(params));
 	for (i = 0; i < DP_MAX_TIDS; i++) {
-		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
+		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
+			continue;
 
+		rx_tid = &peer->rx_tid[i];
 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
 			params.std.need_status = 1;
 			params.std.addr_lo =
@@ -5887,24 +5891,38 @@ int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
 						 &params, dp_stats_cmd_cb,
 						 (void *)comb_peer_id_tid);
-			if (QDF_IS_STATUS_SUCCESS(status))
+			if (QDF_IS_STATUS_SUCCESS(status)) {
 				stats_cmd_sent_cnt++;
-
-			/* Flush REO descriptor from HW cache to update stats
-			 * in descriptor memory. This is to help debugging
-			 */
-			qdf_mem_zero(&params, sizeof(params));
-			params.std.need_status = 0;
-			params.std.addr_lo =
-				rx_tid->hw_qdesc_paddr & 0xffffffff;
-			params.std.addr_hi =
-				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
-			params.u.fl_cache_params.flush_no_inval = 1;
-			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
-					NULL);
+				last_tid = i;
+			}
 		}
 	}
 
+	/* Flush REO descriptor from HW cache to update stats
+	 * in descriptor memory. This is to help debugging
+	 */
+	rx_tid = &peer->rx_tid[last_tid];
+	qdf_mem_zero(&params, sizeof(params));
+	params.std.need_status = 0;
+	params.std.addr_lo =
+		rx_tid->hw_qdesc_paddr & 0xffffffff;
+	params.std.addr_hi =
+		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
+	params.u.fl_cache_params.flush_no_inval = 1;
+	params.u.fl_cache_params.flush_entire_cache = 1;
+	dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
+			NULL);
+
+	qdf_mem_zero(&params, sizeof(params));
+	params.std.need_status = 0;
+	params.std.addr_lo =
+		rx_tid->hw_qdesc_paddr & 0xffffffff;
+	params.std.addr_hi =
+		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
+	params.u.unblk_cache_params.type = UNBLOCK_CACHE;
+	dp_reo_send_cmd(soc, CMD_UNBLOCK_CACHE, &params, NULL,
+			NULL);
+
 	return stats_cmd_sent_cnt;
 }
 
@@ -5928,13 +5946,18 @@ int dp_peer_rxtid_stats(struct dp_peer *peer,
 	int i;
 	int stats_cmd_sent_cnt = 0;
 	QDF_STATUS status;
+	struct dp_rx_tid *rx_tid;
+	int last_tid = 0;
 
 	if (!dp_stats_cmd_cb)
 		return stats_cmd_sent_cnt;
 
 	qdf_mem_zero(&params, sizeof(params));
 	for (i = 0; i < DP_MAX_TIDS; i++) {
-		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
+		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
+			continue;
+
+		rx_tid = &peer->rx_tid[i];
 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
 			params.std.need_status = 1;
 			params.std.addr_lo =
@@ -5954,23 +5977,38 @@ int dp_peer_rxtid_stats(struct dp_peer *peer,
 						rx_tid);
 			}
 
-			if (QDF_IS_STATUS_SUCCESS(status))
+			if (QDF_IS_STATUS_SUCCESS(status)) {
 				stats_cmd_sent_cnt++;
-
-			/* Flush REO descriptor from HW cache to update stats
-			 * in descriptor memory. This is to help debugging */
-			qdf_mem_zero(&params, sizeof(params));
-			params.std.need_status = 0;
-			params.std.addr_lo =
-				rx_tid->hw_qdesc_paddr & 0xffffffff;
-			params.std.addr_hi =
-				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
-			params.u.fl_cache_params.flush_no_inval = 1;
-			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
-				NULL);
+				last_tid = i;
+			}
 		}
 	}
 
+	/* Flush REO descriptor from HW cache to update stats
+	 * in descriptor memory. This is to help debugging
+	 */
+	rx_tid = &peer->rx_tid[last_tid];
+	qdf_mem_zero(&params, sizeof(params));
+	params.std.need_status = 0;
+	params.std.addr_lo =
+		rx_tid->hw_qdesc_paddr & 0xffffffff;
+	params.std.addr_hi =
+		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
+	params.u.fl_cache_params.flush_no_inval = 1;
+	params.u.fl_cache_params.flush_entire_cache = 1;
+	dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
+			NULL);
+
+	qdf_mem_zero(&params, sizeof(params));
+	params.std.need_status = 0;
+	params.std.addr_lo =
+		rx_tid->hw_qdesc_paddr & 0xffffffff;
+	params.std.addr_hi =
+		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
+	params.u.unblk_cache_params.type = UNBLOCK_CACHE;
+	dp_reo_send_cmd(soc, CMD_UNBLOCK_CACHE, &params, NULL,
+			NULL);
+
 	return stats_cmd_sent_cnt;
 }
 

+ 4 - 0
dp/wifi3.0/dp_stats.c

@@ -8004,6 +8004,10 @@ dp_print_soc_rx_stats(struct dp_soc *soc)
 	DP_PRINT_STATS("Reo2rel route drop:%d",
 		       soc->stats.rx.reo2rel_route_drop);
 	DP_PRINT_STATS("Rx Flush count:%d", soc->stats.rx.err.rx_flush_count);
+	DP_PRINT_STATS("RX HW stats request count:%d",
+		       soc->stats.rx.rx_hw_stats_requested);
+	DP_PRINT_STATS("RX HW stats request timeout:%d",
+		       soc->stats.rx.rx_hw_stats_timeout);
 	DP_PRINT_STATS("Rx invalid TID count:%d",
 		       soc->stats.rx.err.rx_invalid_tid_err);
 	DP_PRINT_STATS("Rx Defrag Address1 Invalid:%d",

+ 4 - 0
dp/wifi3.0/dp_types.h

@@ -1151,6 +1151,10 @@ struct dp_soc_stats {
 		/* Number of frames routed from reo*/
 		uint32_t reo2rel_route_drop;
 		uint64_t fast_recycled;
+		/* Number of hw stats requested */
+		uint32_t rx_hw_stats_requested;
+		/* Number of hw stats request timeout */
+		uint32_t rx_hw_stats_timeout;
 
 		struct {
 			/* Invalid RBM error count */