ソースを参照

qcacmn: Fix race condition during IPA map/unmap handling

While Rx buffers are getting umapped from net rx context if IPA
pipes are enabled at same time from MC thread context this is
leading to race condition and IPA map/unmap is going out of sync.

To fix this introducing IPA mapping lock and IPA mapping need to
be handled with lock held.

Change-Id: I9fa71bdb6d4e4aa93fc795cc5dd472a181325991
CRs-Fixed: 2945063
Karthik Kantamneni 4 年 前
コミット
cfbfcf3b21

+ 41 - 0
dp/wifi3.0/dp_ipa.c

@@ -202,6 +202,28 @@ static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
 	return ret;
 }
 
+#ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
+static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
+						     bool lock_required)
+{
+	hal_ring_handle_t hal_ring_hdl;
+	int ring;
+
+	for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
+		hal_ring_hdl = soc->reo_dest_ring[ring].hal_srng;
+		hal_srng_lock(hal_ring_hdl);
+		soc->ipa_reo_ctx_lock_required[ring] = lock_required;
+		hal_srng_unlock(hal_ring_hdl);
+	}
+}
+#else
+static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
+						     bool lock_required)
+{
+}
+
+#endif
+
 #ifdef RX_DESC_MULTI_PAGE_ALLOC
 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
 							 struct dp_pdev *pdev,
@@ -225,7 +247,9 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
 	pdev_id = pdev->pdev_id;
 	rx_pool = &soc->rx_desc_buf[pdev_id];
 
+	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
 	qdf_spin_lock_bh(&rx_pool->lock);
+	dp_ipa_rx_buf_smmu_mapping_lock(soc);
 	num_desc = rx_pool->pool_size;
 	num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
 	for (i = 0; i < num_desc; i++) {
@@ -255,7 +279,9 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
 		ret = __dp_ipa_handle_buf_smmu_mapping(
 				soc, nbuf, rx_pool->buf_size, create);
 	}
+	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 	qdf_spin_unlock_bh(&rx_pool->lock);
+	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
 
 	return ret;
 }
@@ -278,7 +304,9 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
 	pdev_id = pdev->pdev_id;
 	rx_pool = &soc->rx_desc_buf[pdev_id];
 
+	dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
 	qdf_spin_lock_bh(&rx_pool->lock);
+	dp_ipa_rx_buf_smmu_mapping_lock(soc);
 	for (i = 0; i < rx_pool->pool_size; i++) {
 		if ((!(rx_pool->array[i].rx_desc.in_use)) ||
 		    rx_pool->array[i].rx_desc.unmapped)
@@ -302,7 +330,9 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
 		__dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
 						 rx_pool->buf_size, create);
 	}
+	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 	qdf_spin_unlock_bh(&rx_pool->lock);
+	dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -2107,6 +2137,9 @@ QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
 	soc->ipa_first_tx_db_access = true;
 	qdf_mem_free(pipe_in);
 
+	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
+	soc->ipa_rx_buf_map_lock_initialized = true;
+
 	return QDF_STATUS_SUCCESS;
 }
 
@@ -2323,6 +2356,9 @@ QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
 
 	soc->ipa_first_tx_db_access = true;
 
+	qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
+	soc->ipa_rx_buf_map_lock_initialized = true;
+
 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
 		  "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
 		  __func__,
@@ -2458,6 +2494,11 @@ QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
 		status = QDF_STATUS_E_FAILURE;
 	}
 
+	if (soc->ipa_rx_buf_map_lock_initialized) {
+		qdf_spinlock_destroy(&soc->ipa_rx_buf_map_lock);
+		soc->ipa_rx_buf_map_lock_initialized = false;
+	}
+
 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
 	if (qdf_unlikely(!pdev)) {
 		dp_err_rl("Invalid pdev for pdev_id %d", pdev_id);

+ 81 - 0
dp/wifi3.0/dp_ipa.h

@@ -294,6 +294,65 @@ QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
 	struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
 
+#ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
+static inline void
+dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
+{
+	if (soc->ipa_rx_buf_map_lock_initialized)
+		qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock);
+}
+
+static inline void
+dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
+{
+	if (soc->ipa_rx_buf_map_lock_initialized)
+		qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock);
+}
+
+static inline void
+dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
+				uint32_t reo_ring_num)
+{
+	if (!soc->ipa_reo_ctx_lock_required[reo_ring_num])
+		return;
+
+	qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock);
+}
+
+static inline void
+dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
+				  uint32_t reo_ring_num)
+{
+	if (!soc->ipa_reo_ctx_lock_required[reo_ring_num])
+		return;
+
+	qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock);
+}
+#else
+
+static inline void
+dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
+{
+}
+
+static inline void
+dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
+{
+}
+
+static inline void
+dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
+				uint32_t reo_ring_num)
+{
+}
+
+static inline void
+dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
+				  uint32_t reo_ring_num)
+{
+}
+#endif
+
 #else
 static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
 {
@@ -319,6 +378,28 @@ static inline QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
 	return QDF_STATUS_SUCCESS;
 }
 
+static inline void
+dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
+{
+}
+
+static inline void
+dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
+{
+}
+
+static inline void
+dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
+				uint32_t reo_ring_num)
+{
+}
+
+static inline void
+dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
+				  uint32_t reo_ring_num)
+{
+}
+
 static inline qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc,
 						       qdf_nbuf_t nbuf)
 {

+ 9 - 0
dp/wifi3.0/dp_rx.c

@@ -2529,6 +2529,9 @@ more_data:
 		if (QDF_IS_STATUS_ERROR(status)) {
 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
 				qdf_assert_always(rx_desc->unmapped);
+				dp_ipa_reo_ctx_buf_mapping_lock(
+							soc,
+							reo_ring_num);
 				dp_ipa_handle_rx_buf_smmu_mapping(
 							soc,
 							rx_desc->nbuf,
@@ -2540,6 +2543,9 @@ more_data:
 							QDF_DMA_FROM_DEVICE,
 							RX_DATA_BUFFER_SIZE);
 				rx_desc->unmapped = 1;
+				dp_ipa_reo_ctx_buf_mapping_unlock(
+								soc,
+								reo_ring_num);
 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
 							    rx_desc->pool_id);
 				dp_rx_add_to_free_desc_list(
@@ -2695,6 +2701,8 @@ more_data:
 		 * in case double skb unmap happened.
 		 */
 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
+		dp_ipa_reo_ctx_buf_mapping_lock(soc,
+						reo_ring_num);
 		dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
 						  rx_desc_pool->buf_size,
 						  false);
@@ -2702,6 +2710,7 @@ more_data:
 					     QDF_DMA_FROM_DEVICE,
 					     rx_desc_pool->buf_size);
 		rx_desc->unmapped = 1;
+		dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
 		DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
 				   ebuf_tail, rx_desc);
 		/*

+ 2 - 0
dp/wifi3.0/dp_rx_defrag.c

@@ -1971,6 +1971,7 @@ uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
 	if (rx_desc->unmapped)
 		return rx_bufs_used;
 
+	dp_ipa_rx_buf_smmu_mapping_lock(soc);
 	dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
 					  rx_desc_pool->buf_size,
 					  false);
@@ -1978,6 +1979,7 @@ uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
 				     QDF_DMA_FROM_DEVICE,
 				     rx_desc_pool->buf_size);
 	rx_desc->unmapped = 1;
+	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
 	rx_desc->rx_buf_start = qdf_nbuf_data(msdu);
 

+ 12 - 0
dp/wifi3.0/dp_rx_err.c

@@ -321,6 +321,7 @@ more_msdu_link_desc:
 		}
 
 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
+		dp_ipa_rx_buf_smmu_mapping_lock(soc);
 		dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
 						  rx_desc_pool->buf_size,
 						  false);
@@ -328,6 +329,7 @@ more_msdu_link_desc:
 					     QDF_DMA_FROM_DEVICE,
 					     rx_desc_pool->buf_size);
 		rx_desc->unmapped = 1;
+		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
 		rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
 
@@ -569,6 +571,7 @@ more_msdu_link_desc:
 		}
 
 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
+		dp_ipa_rx_buf_smmu_mapping_lock(soc);
 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
 						  rx_desc_pool->buf_size,
 						  false);
@@ -576,6 +579,7 @@ more_msdu_link_desc:
 					     QDF_DMA_FROM_DEVICE,
 					     rx_desc_pool->buf_size);
 		rx_desc->unmapped = 1;
+		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
 		QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
 		rx_bufs_used++;
@@ -851,6 +855,7 @@ dp_rx_bar_frame_handle(struct dp_soc *soc,
 
 	nbuf = rx_desc->nbuf;
 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
+	dp_ipa_rx_buf_smmu_mapping_lock(soc);
 	dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
 					  rx_desc_pool->buf_size,
 					  false);
@@ -858,6 +863,7 @@ dp_rx_bar_frame_handle(struct dp_soc *soc,
 				     QDF_DMA_FROM_DEVICE,
 				     rx_desc_pool->buf_size);
 	rx_desc->unmapped = 1;
+	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
 	peer_id =
 		hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
@@ -2072,6 +2078,7 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
 		hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
 		nbuf = rx_desc->nbuf;
 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
+		dp_ipa_rx_buf_smmu_mapping_lock(soc);
 		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
 						  rx_desc_pool->buf_size,
 						  false);
@@ -2079,6 +2086,7 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
 					     QDF_DMA_FROM_DEVICE,
 					     rx_desc_pool->buf_size);
 		rx_desc->unmapped = 1;
+		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
 				 dp_rx_is_sg_formation_required(&wbm_err_info))) {
@@ -2517,6 +2525,7 @@ dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
 
 					rx_desc_pool = &soc->
 						rx_desc_buf[rx_desc->pool_id];
+					dp_ipa_rx_buf_smmu_mapping_lock(soc);
 					dp_ipa_handle_rx_buf_smmu_mapping(
 							soc, msdu,
 							rx_desc_pool->buf_size,
@@ -2526,6 +2535,7 @@ dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
 						QDF_DMA_FROM_DEVICE,
 						rx_desc_pool->buf_size);
 					rx_desc->unmapped = 1;
+					dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
 					dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
 							soc, msdu);
@@ -2749,6 +2759,7 @@ dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
 
 		if (rx_desc && rx_desc->nbuf) {
 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
+			dp_ipa_rx_buf_smmu_mapping_lock(soc);
 			dp_ipa_handle_rx_buf_smmu_mapping(
 						soc, rx_desc->nbuf,
 						rx_desc_pool->buf_size,
@@ -2757,6 +2768,7 @@ dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
 						     QDF_DMA_FROM_DEVICE,
 						     rx_desc_pool->buf_size);
 			rx_desc->unmapped = 1;
+			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
 			dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
 						    rx_desc->pool_id);

+ 3 - 0
dp/wifi3.0/dp_types.h

@@ -1798,6 +1798,9 @@ struct dp_soc {
 
 	qdf_atomic_t ipa_pipes_enabled;
 	bool ipa_first_tx_db_access;
+	qdf_spinlock_t ipa_rx_buf_map_lock;
+	bool ipa_rx_buf_map_lock_initialized;
+	uint8_t ipa_reo_ctx_lock_required[MAX_REO_DEST_RINGS];
 #endif
 
 #ifdef WLAN_FEATURE_STATS_EXT

+ 26 - 0
hal/wifi3.0/hal_api.h

@@ -1440,6 +1440,32 @@ void *hal_srng_dst_get_next_cached(void *hal_soc,
 	return (void *)desc;
 }
 
+static inline int hal_srng_lock(hal_ring_handle_t hal_ring_hdl)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
+
+	if (qdf_unlikely(!hal_ring_hdl)) {
+		qdf_print("error: invalid hal_ring\n");
+		return -EINVAL;
+	}
+
+	SRNG_LOCK(&(srng->lock));
+	return 0;
+}
+
+static inline int hal_srng_unlock(hal_ring_handle_t hal_ring_hdl)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
+
+	if (qdf_unlikely(!hal_ring_hdl)) {
+		qdf_print("error: invalid hal_ring\n");
+		return -EINVAL;
+	}
+
+	SRNG_UNLOCK(&(srng->lock));
+	return 0;
+}
+
 /**
  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
  * cached head pointer