浏览代码

qcacmn: add rx nbuf ipa smmu map/unmap trace

Currently in_use and unmapped flag from rx_desc can not know is
the rx nbuf has done IPA smmu map/unmap accurately.
add rx nbuf IPA smmu map/unmap flag in qdf_nbuf_cb, and use
this flag to check if IPA smmu map/unmap need to be done or not.

Change-Id: I19bbc6cc69ba752ef7c58d41fbe26299ca96d29e
CRs-Fixed: 2731313
Jinwei Chen 5 年之前
父节点
当前提交
d552bfa2cb

+ 49 - 2
dp/wifi3.0/dp_ipa.c

@@ -128,8 +128,29 @@ QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
 	    !qdf_mem_smmu_s1_enabled(soc->osdev))
 		return QDF_STATUS_SUCCESS;
 
-	if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
-		return QDF_STATUS_SUCCESS;
+	/**
+	 * Even if ipa pipes is disabled, but if it's unmap
+	 * operation and nbuf has done ipa smmu map before,
+	 * do ipa smmu unmap as well.
+	 */
+	if (!qdf_atomic_read(&soc->ipa_pipes_enabled)) {
+		if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) {
+			DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1);
+		} else {
+			return QDF_STATUS_SUCCESS;
+		}
+	}
+
+	if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
+		if (create) {
+			DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1);
+		} else {
+			DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1);
+		}
+		return QDF_STATUS_E_INVAL;
+	}
+
+	qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
 
 	return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create);
 }
@@ -167,6 +188,19 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
 			continue;
 		nbuf = rx_desc->nbuf;
 
+		if (qdf_unlikely(create ==
+				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
+			if (create) {
+				DP_STATS_INC(soc,
+					     rx.err.ipa_smmu_map_dup, 1);
+			} else {
+				DP_STATS_INC(soc,
+					     rx.err.ipa_smmu_unmap_dup, 1);
+			}
+			continue;
+		}
+		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
+
 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
 						 rx_pool->buf_size, create);
 	}
@@ -198,6 +232,19 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
 
 		nbuf = rx_pool->array[i].rx_desc.nbuf;
 
+		if (qdf_unlikely(create ==
+				 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
+			if (create) {
+				DP_STATS_INC(soc,
+					     rx.err.ipa_smmu_map_dup, 1);
+			} else {
+				DP_STATS_INC(soc,
+					     rx.err.ipa_smmu_unmap_dup, 1);
+			}
+			continue;
+		}
+		qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
+
 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
 						 rx_pool->buf_size, create);
 	}

+ 6 - 0
dp/wifi3.0/dp_stats.c

@@ -5959,6 +5959,12 @@ void dp_txrx_path_stats(struct dp_soc *soc)
 			       pdev->soc->stats.rx.err.nbuf_sanity_fail);
 		DP_PRINT_STATS("Rx refill duplicate link desc: %d",
 			       pdev->soc->stats.rx.err.dup_refill_link_desc);
+		DP_PRINT_STATS("Rx ipa smmu map duplicate: %d",
+			       pdev->soc->stats.rx.err.ipa_smmu_map_dup);
+		DP_PRINT_STATS("Rx ipa smmu unmap duplicate: %d",
+			       pdev->soc->stats.rx.err.ipa_smmu_unmap_dup);
+		DP_PRINT_STATS("Rx ipa smmu unmap no pipes: %d",
+			       pdev->soc->stats.rx.err.ipa_unmap_no_pipe);
 
 		DP_PRINT_STATS("Reo Statistics");
 		DP_PRINT_STATS("near_full: %u ", soc->stats.rx.near_full);

+ 6 - 0
dp/wifi3.0/dp_types.h

@@ -830,6 +830,12 @@ struct dp_soc_stats {
 			uint32_t hal_wbm_rel_dup;
 			/* HAL RXDMA error Duplicate count */
 			uint32_t hal_rxdma_err_dup;
+			/* ipa smmu map duplicate count */
+			uint32_t ipa_smmu_map_dup;
+			/* ipa smmu unmap duplicate count */
+			uint32_t ipa_smmu_unmap_dup;
+			/* ipa smmu unmap while ipa pipes is disabled */
+			uint32_t ipa_unmap_no_pipe;
 			/* REO cmd send fail/requeue count */
 			uint32_t reo_cmd_send_fail;
 			/* REO cmd send drain count */

+ 24 - 0
qdf/inc/i_qdf_nbuf_api_m.h

@@ -119,4 +119,28 @@ static inline uint8_t qdf_nbuf_is_exc_frame(qdf_nbuf_t buf)
 {
 	return QDF_NBUF_CB_RX_PACKET_EXC_FRAME(buf);
 }
+
+/**
+ * qdf_nbuf_set_rx_ipa_smmu_map() - set ipa smmu mapped flag
+ * @buf: Network buffer
+ * @value: 1 - ipa smmu mapped, 0 - ipa smmu unmapped
+ *
+ * Return: none
+ */
+static inline void qdf_nbuf_set_rx_ipa_smmu_map(qdf_nbuf_t buf,
+						uint8_t value)
+{
+	QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(buf) = value;
+}
+
+/**
+ * qdf_nbuf_is_rx_ipa_smmu_map() - check ipa smmu map flag
+ * @buf: Network buffer
+ *
+ * Return 0 or 1
+ */
+static inline uint8_t qdf_nbuf_is_rx_ipa_smmu_map(qdf_nbuf_t buf)
+{
+	return QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(buf);
+}
 #endif /* _QDF_NBUF_M_H */

+ 23 - 0
qdf/inc/i_qdf_nbuf_api_w.h

@@ -120,4 +120,27 @@ static inline uint16_t qdf_nbuf_get_rx_flow_tag(qdf_nbuf_t buf)
 static inline void qdf_nbuf_set_exc_frame(qdf_nbuf_t buf, uint8_t value)
 {
 }
+
+/**
+ * qdf_nbuf_set_rx_ipa_smmu_map() - set ipa smmu mapped flag
+ * @buf: Network buffer
+ * @value: 1 - ipa smmu mapped, 0 - ipa smmu unmapped
+ *
+ * Return: none
+ */
+static inline void qdf_nbuf_set_rx_ipa_smmu_map(qdf_nbuf_t buf,
+						uint8_t value)
+{
+}
+
+/**
+ * qdf_nbuf_is_rx_ipa_smmu_map() - check ipa smmu map flag
+ * @buf: Network buffer
+ *
+ * Return 0 or 1
+ */
+static inline uint8_t qdf_nbuf_is_rx_ipa_smmu_map(qdf_nbuf_t buf)
+{
+	return 0;
+}
 #endif /* _QDF_NBUF_W_H */

+ 3 - 1
qdf/linux/src/i_qdf_nbuf.h

@@ -112,6 +112,7 @@ typedef union {
  * @rx.dev.priv_cb_m.packet_buf_pool:  packet buff bool
  * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
  * @rx.dev.priv_cb_m.exc_frm: exception frame
+ * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
  * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
  * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
  * @rx.dev.priv_cb_m.lro_ctx: LRO context
@@ -230,7 +231,8 @@ struct qdf_nbuf_cb {
 						 l3_hdr_pad:3,
 						 /* exception frame flag */
 						 exc_frm:1,
-						 reserved:8,
+						 ipa_smmu_map:1,
+						 reserved:7,
 						 reserved1:16;
 					uint32_t tcp_seq_num;
 					uint32_t tcp_ack_num;

+ 4 - 0
qdf/linux/src/i_qdf_nbuf_m.h

@@ -77,6 +77,10 @@
 	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
 	 exc_frm)
 
+#define  QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(skb) \
+	 (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m. \
+	 ipa_smmu_map)
+
 #define __qdf_nbuf_ipa_owned_get(skb) \
 	QDF_NBUF_CB_TX_IPA_OWNED(skb)