Parcourir la source

qcacmn: introduce simple nbuf_free API in RX path

simple Alloc is being used in RX path which avoids
certain debug logic. during free of nbuf we should
avoid this debug logic else it will report it as
double free, this will be triggered only when debug
is enabled

Change-Id: Iadb40071fb733cc4de3291784df5075d5a099a8e
Tallapragada Kalyan il y a 3 ans
Parent
commit
b265fdd7fc

+ 10 - 32
dp/wifi3.0/be/dp_be_rx.c

@@ -260,23 +260,8 @@ more_data:
 		if (QDF_IS_STATUS_ERROR(status)) {
 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
 				qdf_assert_always(!rx_desc->unmapped);
-				dp_ipa_reo_ctx_buf_mapping_lock(
-							soc,
-							reo_ring_num);
-				dp_ipa_handle_rx_buf_smmu_mapping(
-							soc,
-							rx_desc->nbuf,
-							RX_DATA_BUFFER_SIZE,
-							false);
-				qdf_nbuf_unmap_nbytes_single(
-							soc->osdev,
-							rx_desc->nbuf,
-							QDF_DMA_FROM_DEVICE,
-							RX_DATA_BUFFER_SIZE);
+				dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
 				rx_desc->unmapped = 1;
-				dp_ipa_reo_ctx_buf_mapping_unlock(
-								soc,
-								reo_ring_num);
 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
 							    rx_desc->pool_id);
 				dp_rx_add_to_free_desc_list(
@@ -436,15 +421,8 @@ more_data:
 		 * in case double skb unmap happened.
 		 */
 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
-		dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num);
-		dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
-						  rx_desc_pool->buf_size,
-						  false);
-		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
-					     QDF_DMA_FROM_DEVICE,
-					     rx_desc_pool->buf_size);
+		dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
 		rx_desc->unmapped = 1;
-		dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
 		DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
 				   ebuf_tail, rx_desc);
 		/*
@@ -559,7 +537,7 @@ done:
 		}
 
 		if (qdf_unlikely(!vdev)) {
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 			nbuf = next;
 			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
 			continue;
@@ -596,7 +574,7 @@ done:
 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
 					     QDF_TRACE_LEVEL_INFO);
 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 			qdf_assert(0);
 			nbuf = next;
 			continue;
@@ -655,7 +633,7 @@ done:
 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
 				DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
 			} else {
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
 				dp_info_rl("scatter msdu len %d, dropped",
 					   msdu_len);
@@ -678,7 +656,7 @@ done:
 		if (qdf_unlikely(vdev->multipass_en)) {
 			if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
 				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			}
@@ -689,7 +667,7 @@ done:
 			DP_STATS_INC(peer, rx.policy_check_drop, 1);
 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
 			/* Drop & free packet */
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 			/* Statistics */
 			nbuf = next;
 			continue;
@@ -702,7 +680,7 @@ done:
 				  false))) {
 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
 			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 			nbuf = next;
 			continue;
 		}
@@ -718,7 +696,7 @@ done:
 			if (!is_eapol) {
 				DP_STATS_INC(peer,
 					     rx.peer_unauth_rx_pkt_drop, 1);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			}
@@ -745,7 +723,7 @@ done:
 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
 					     1);
 
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			}

+ 1 - 1
dp/wifi3.0/dp_main.c

@@ -5386,7 +5386,7 @@ static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
 	curr_nbuf = pdev->invalid_peer_head_msdu;
 	while (curr_nbuf) {
 		next_nbuf = qdf_nbuf_next(curr_nbuf);
-		qdf_nbuf_free(curr_nbuf);
+		dp_rx_nbuf_free(curr_nbuf);
 		curr_nbuf = next_nbuf;
 	}
 	pdev->invalid_peer_head_msdu = NULL;

+ 7 - 7
dp/wifi3.0/dp_rx.c

@@ -845,7 +845,7 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_peer *ta_peer,
 		       ta_peer->vdev->vdev_id, nbuf_copy)) {
 		DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
 		tid_stats->fail_cnt[INTRABSS_DROP]++;
-		qdf_nbuf_free(nbuf_copy);
+		dp_rx_nbuf_free(nbuf_copy);
 	} else {
 		DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
 		tid_stats->intrabss_cnt++;
@@ -1172,7 +1172,7 @@ free:
 	curr_nbuf = mpdu;
 	while (curr_nbuf) {
 		next_nbuf = qdf_nbuf_next(curr_nbuf);
-		qdf_nbuf_free(curr_nbuf);
+		dp_rx_nbuf_free(curr_nbuf);
 		curr_nbuf = next_nbuf;
 	}
 
@@ -1257,7 +1257,7 @@ free:
 	curr_nbuf = mpdu;
 	while (curr_nbuf) {
 		next_nbuf = qdf_nbuf_next(curr_nbuf);
-		qdf_nbuf_free(curr_nbuf);
+		dp_rx_nbuf_free(curr_nbuf);
 		curr_nbuf = next_nbuf;
 	}
 
@@ -1594,7 +1594,7 @@ static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
 			stats->fail_cnt[INVALID_PEER_VDEV]++;
 			stats->delivered_to_stack--;
 		}
-		qdf_nbuf_free(buf);
+		dp_rx_nbuf_free(buf);
 		buf = next_buf;
 		num_dropped++;
 	}
@@ -2193,7 +2193,7 @@ void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
 deliver_fail:
 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
-	qdf_nbuf_free(nbuf);
+	dp_rx_nbuf_free(nbuf);
 	if (vdev)
 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
 }
@@ -2202,7 +2202,7 @@ void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
 {
 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
-	qdf_nbuf_free(nbuf);
+	dp_rx_nbuf_free(nbuf);
 }
 #endif
 
@@ -2288,7 +2288,7 @@ void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
 {
 	if (qdf_nbuf_is_raw_frame(nbuf)) {
-		qdf_nbuf_free(nbuf);
+		dp_rx_nbuf_free(nbuf);
 		return true;
 	}
 

+ 39 - 0
dp/wifi3.0/dp_rx.h

@@ -2230,6 +2230,16 @@ void dp_rx_nbuf_unmap(struct dp_soc *soc,
 	qdf_nbuf_dma_inv_range((void *)nbuf->data,
 			       (void *)(nbuf->data + rx_desc_pool->buf_size));
 }
+
+static inline
+void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
+			   struct rx_desc_pool *rx_desc_pool,
+			   qdf_nbuf_t nbuf)
+{
+	qdf_nbuf_dma_inv_range((void *)nbuf->data,
+			       (void *)(nbuf->data + rx_desc_pool->buf_size));
+}
+
 #else
 static inline
 void dp_rx_nbuf_unmap(struct dp_soc *soc,
@@ -2237,6 +2247,13 @@ void dp_rx_nbuf_unmap(struct dp_soc *soc,
 		      uint8_t reo_ring_num)
 {
 }
+
+static inline
+void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
+			   struct rx_desc_pool *rx_desc_pool,
+			   qdf_nbuf_t nbuf)
+{
+}
 #endif
 
 static inline
@@ -2252,6 +2269,11 @@ qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
 	return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size);
 }
 
+static inline
+void  dp_rx_nbuf_free(qdf_nbuf_t nbuf)
+{
+	qdf_nbuf_free_simple(nbuf);
+}
 #else
 static inline
 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
@@ -2325,6 +2347,17 @@ void dp_rx_nbuf_unmap(struct dp_soc *soc,
 	dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
 }
 
+static inline
+void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
+			   struct rx_desc_pool *rx_desc_pool,
+			   qdf_nbuf_t nbuf)
+{
+	dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, rx_desc_pool->buf_size,
+					  false);
+	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE,
+				     rx_desc_pool->buf_size);
+}
+
 static inline
 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
 				 uint32_t bufs_reaped)
@@ -2341,6 +2374,12 @@ qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
 			      RX_BUFFER_RESERVATION,
 			      rx_desc_pool->buf_alignment, FALSE);
 }
+
+static inline
+void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
+{
+	qdf_nbuf_free(nbuf);
+}
 #endif
 
 static inline

+ 9 - 14
dp/wifi3.0/dp_rx_defrag.c

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -81,7 +81,7 @@ static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
 
 	while (frag) {
 		next = qdf_nbuf_next(frag);
-		qdf_nbuf_free(frag);
+		dp_rx_nbuf_free(frag);
 		frag = next;
 	}
 }
@@ -367,7 +367,7 @@ static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned ti
 							      rx_desc_info);
 
 		if (cur_fragno == head_fragno) {
-			qdf_nbuf_free(frag);
+			dp_rx_nbuf_free(frag);
 			goto insert_fail;
 		} else if (head_fragno > cur_fragno) {
 			qdf_nbuf_set_next(frag, cur);
@@ -387,7 +387,7 @@ static QDF_STATUS dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned ti
 			}
 
 			if (cur_fragno == head_fragno) {
-				qdf_nbuf_free(frag);
+				dp_rx_nbuf_free(frag);
 				goto insert_fail;
 			}
 
@@ -832,7 +832,7 @@ static QDF_STATUS dp_rx_defrag_tkip_demic(struct dp_soc *soc,
 	pktlen -= dp_f_tkip.ic_miclen;
 
 	if (((qdf_nbuf_len(prev) - hdrlen) == 0) && prev != msdu) {
-		qdf_nbuf_free(prev);
+		dp_rx_nbuf_free(prev);
 		qdf_nbuf_set_next(prev0, NULL);
 	}
 
@@ -1943,7 +1943,7 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 	return QDF_STATUS_SUCCESS;
 
 discard_frag:
-	qdf_nbuf_free(frag);
+	dp_rx_nbuf_free(frag);
 err_free_desc:
 	dp_rx_add_to_free_desc_list(head, tail, rx_desc);
 	if (dp_rx_link_desc_return(soc, ring_desc,
@@ -2026,12 +2026,7 @@ uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
 		return rx_bufs_used;
 
 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
-	dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
-					  rx_desc_pool->buf_size,
-					  false);
-	qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
-				     QDF_DMA_FROM_DEVICE,
-				     rx_desc_pool->buf_size);
+	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
 	rx_desc->unmapped = 1;
 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
@@ -2082,7 +2077,7 @@ QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
 			peer->peer_id,
 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
 		DP_STATS_INC(soc, rx.err.defrag_peer_uninit, 1);
-		qdf_nbuf_free(nbuf);
+		dp_rx_nbuf_free(nbuf);
 		goto fail;
 	}
 
@@ -2096,7 +2091,7 @@ QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 			  "%s: No list found for TID %d Seq# %d",
 				__func__, tid, rxseq);
-		qdf_nbuf_free(nbuf);
+		dp_rx_nbuf_free(nbuf);
 		goto fail;
 	}
 

+ 7 - 20
dp/wifi3.0/dp_rx_desc.c

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -220,14 +221,14 @@ static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
 			dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
 					     QDF_DMA_BIDIRECTIONAL, buf_size);
-		qdf_nbuf_free(nbuf);
+		dp_rx_nbuf_free(nbuf);
 		nbuf = next;
 	}
 
 	nbuf = nbuf_free_list;
 	while (nbuf) {
 		next = nbuf->next;
-		qdf_nbuf_free(nbuf);
+		dp_rx_nbuf_free(nbuf);
 		nbuf = next;
 	}
 }
@@ -410,17 +411,10 @@ void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
 
 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
-				dp_ipa_handle_rx_buf_smmu_mapping(
-							soc, nbuf,
-							rx_desc_pool->buf_size,
-							false);
-				qdf_nbuf_unmap_nbytes_single(
-							soc->osdev, nbuf,
-							QDF_DMA_FROM_DEVICE,
-							rx_desc_pool->buf_size);
+				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
 			}
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 		}
 	}
 	qdf_mem_free(rx_desc_pool->array);
@@ -440,17 +434,10 @@ void dp_rx_desc_nbuf_free(struct dp_soc *soc,
 			nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
 
 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
-				dp_ipa_handle_rx_buf_smmu_mapping(
-						soc, nbuf,
-						rx_desc_pool->buf_size,
-						false);
-				qdf_nbuf_unmap_nbytes_single(
-							soc->osdev, nbuf,
-							QDF_DMA_FROM_DEVICE,
-							rx_desc_pool->buf_size);
+				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
 			}
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 		}
 	}
 	qdf_spin_unlock_bh(&rx_desc_pool->lock);

+ 36 - 81
dp/wifi3.0/dp_rx_err.c

@@ -354,12 +354,7 @@ more_msdu_link_desc:
 
 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
-		dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
-						  rx_desc_pool->buf_size,
-						  false);
-		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
-					     QDF_DMA_FROM_DEVICE,
-					     rx_desc_pool->buf_size);
+		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
 		rx_desc->unmapped = 1;
 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
@@ -518,7 +513,7 @@ free_nbuf:
 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
 
 	DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
-	qdf_nbuf_free(nbuf);
+	dp_rx_nbuf_free(nbuf);
 }
 
 /**
@@ -732,12 +727,7 @@ more_msdu_link_desc:
 
 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
-		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
-						  rx_desc_pool->buf_size,
-						  false);
-		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
-					     QDF_DMA_FROM_DEVICE,
-					     rx_desc_pool->buf_size);
+		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
 		rx_desc->unmapped = 1;
 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
@@ -772,7 +762,7 @@ more_msdu_link_desc:
 			if (QDF_IS_STATUS_ERROR(status)) {
 				DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
 					     1);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				goto process_next_msdu;
 			}
 
@@ -810,7 +800,7 @@ more_msdu_link_desc:
 			break;
 		default:
 			dp_err_rl("Non-support error code %d", err_code);
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 		}
 
 process_next_msdu:
@@ -927,7 +917,7 @@ dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
 		curr_nbuf = dp_pdev->invalid_peer_head_msdu;
 		while (curr_nbuf) {
 			tmp_nbuf = curr_nbuf->next;
-			qdf_nbuf_free(curr_nbuf);
+			dp_rx_nbuf_free(curr_nbuf);
 			curr_nbuf = tmp_nbuf;
 		}
 
@@ -992,12 +982,7 @@ dp_rx_bar_frame_handle(struct dp_soc *soc,
 	nbuf = rx_desc->nbuf;
 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
-	dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
-					  rx_desc_pool->buf_size,
-					  false);
-	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
-				     QDF_DMA_FROM_DEVICE,
-				     rx_desc_pool->buf_size);
+	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
 	rx_desc->unmapped = 1;
 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
@@ -1100,7 +1085,7 @@ free_nbuf:
 	if (peer)
 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
 	DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
-	qdf_nbuf_free(nbuf);
+	dp_rx_nbuf_free(nbuf);
 }
 
 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
@@ -1149,7 +1134,7 @@ dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
 				     QDF_TRACE_LEVEL_DEBUG);
 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
 				 1, qdf_nbuf_len(nbuf));
-		qdf_nbuf_free(nbuf);
+		dp_rx_nbuf_free(nbuf);
 
 		dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
 		return true;
@@ -1463,7 +1448,7 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
 	return QDF_STATUS_SUCCESS;
 
 drop_nbuf:
-	qdf_nbuf_free(nbuf);
+	dp_rx_nbuf_free(nbuf);
 	return QDF_STATUS_E_FAILURE;
 }
 
@@ -1513,7 +1498,7 @@ dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
 
 	if (dp_rx_check_pkt_len(soc, pkt_len)) {
 		/* Drop & free packet */
-		qdf_nbuf_free(nbuf);
+		dp_rx_nbuf_free(nbuf);
 		return;
 	}
 	/* Set length in nbuf */
@@ -1538,7 +1523,7 @@ dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
 		dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
 				 vdev);
 		/* Drop & free packet */
-		qdf_nbuf_free(nbuf);
+		dp_rx_nbuf_free(nbuf);
 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
 		return;
 	}
@@ -1587,7 +1572,7 @@ dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
 process_mesh:
 
 	if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
-		qdf_nbuf_free(nbuf);
+		dp_rx_nbuf_free(nbuf);
 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
 		return;
 	}
@@ -1598,7 +1583,7 @@ process_mesh:
 			dp_rx_err_info("%pK: mesh pkt filtered", soc);
 			DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
 
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 			return;
 		}
 		dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
@@ -1721,7 +1706,7 @@ void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
 				   &mic_failure_info);
 
 fail:
-	qdf_nbuf_free(nbuf);
+	dp_rx_nbuf_free(nbuf);
 	return;
 }
 
@@ -1905,7 +1890,7 @@ drop_nbuf:
 	DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
 		      err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
 
-	qdf_nbuf_free(nbuf);
+	dp_rx_nbuf_free(nbuf);
 }
 
 #ifndef QCA_HOST_MODE_WIFI_DISABLED
@@ -1997,6 +1982,7 @@ static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
 	struct dp_rx_desc *rx_desc;
 	struct hal_buf_info hbi;
 	struct dp_pdev *pdev;
+	struct rx_desc_pool *rx_desc_pool;
 
 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
 
@@ -2021,17 +2007,11 @@ static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
 		goto assert_return;
 	}
 
+	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
 	/* After this point the rx_desc and nbuf are valid */
 	dp_ipa_rx_buf_smmu_mapping_lock(soc);
 	qdf_assert_always(!rx_desc->unmapped);
-	dp_ipa_handle_rx_buf_smmu_mapping(soc,
-					  rx_desc->nbuf,
-					  RX_DATA_BUFFER_SIZE,
-					  false);
-	qdf_nbuf_unmap_nbytes_single(soc->osdev,
-				     rx_desc->nbuf,
-				     QDF_DMA_FROM_DEVICE,
-				     RX_DATA_BUFFER_SIZE);
+	dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
 	rx_desc->unmapped = 1;
 	dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 	dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
@@ -2564,12 +2544,7 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
 		nbuf = rx_desc->nbuf;
 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
-		dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
-						  rx_desc_pool->buf_size,
-						  false);
-		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
-					     QDF_DMA_FROM_DEVICE,
-					     rx_desc_pool->buf_size);
+		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
 		rx_desc->unmapped = 1;
 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
@@ -2707,7 +2682,7 @@ done:
 			 * SG error handling is not done correctly,
 			 * drop SG frames for now.
 			 */
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 			dp_info_rl("scattered msdu dropped");
 			nbuf = next;
 			if (peer)
@@ -2786,7 +2761,7 @@ done:
 						dp_rx_err_handle_bar(soc,
 								     peer,
 								     nbuf);
-					qdf_nbuf_free(nbuf);
+					dp_rx_nbuf_free(nbuf);
 					break;
 
 				case HAL_REO_ERR_PN_CHECK_FAILED:
@@ -2794,13 +2769,13 @@ done:
 					if (peer)
 						DP_STATS_INC(peer,
 							     rx.err.pn_err, 1);
-					qdf_nbuf_free(nbuf);
+					dp_rx_nbuf_free(nbuf);
 					break;
 
 				default:
 					dp_info_rl("Got pkt with REO ERROR: %d",
 						   wbm_err_info.reo_err_code);
-					qdf_nbuf_free(nbuf);
+					dp_rx_nbuf_free(nbuf);
 				}
 			} else if (wbm_err_info.reo_psh_rsn
 					== HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
@@ -2811,7 +2786,7 @@ done:
 				/* should not enter here */
 				dp_rx_err_alert("invalid reo push reason %u",
 						wbm_err_info.reo_psh_rsn);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				qdf_assert_always(0);
 			}
 		} else if (wbm_err_info.wbm_err_src ==
@@ -2859,12 +2834,12 @@ done:
 					if (peer) {
 						DP_STATS_INC(peer, rx.err.
 							     decrypt_err, 1);
-						qdf_nbuf_free(nbuf);
+						dp_rx_nbuf_free(nbuf);
 						break;
 					}
 
 					if (!dp_handle_rxdma_decrypt_err()) {
-						qdf_nbuf_free(nbuf);
+						dp_rx_nbuf_free(nbuf);
 						break;
 					}
 
@@ -2879,7 +2854,7 @@ done:
 				case HAL_RXDMA_MULTICAST_ECHO:
 					DP_STATS_INC_PKT(peer, rx.mec_drop, 1,
 							 qdf_nbuf_len(nbuf));
-					qdf_nbuf_free(nbuf);
+					dp_rx_nbuf_free(nbuf);
 					break;
 				case HAL_RXDMA_UNAUTHORIZED_WDS:
 					pool_id = wbm_err_info.pool_id;
@@ -2891,7 +2866,7 @@ done:
 								pool_id);
 					break;
 				default:
-					qdf_nbuf_free(nbuf);
+					dp_rx_nbuf_free(nbuf);
 					dp_err_rl("RXDMA error %d",
 						  wbm_err_info.rxdma_err_code);
 				}
@@ -2905,12 +2880,12 @@ done:
 				dp_rx_err_err("rxdma push reason %u",
 						wbm_err_info.rxdma_psh_rsn);
 				DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 			} else {
 				/* should not enter here */
 				dp_rx_err_alert("invalid rxdma push reason %u",
 						wbm_err_info.rxdma_psh_rsn);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				qdf_assert_always(0);
 			}
 		} else {
@@ -3058,14 +3033,8 @@ dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
 					rx_desc_pool = &soc->
 						rx_desc_buf[rx_desc->pool_id];
 					dp_ipa_rx_buf_smmu_mapping_lock(soc);
-					dp_ipa_handle_rx_buf_smmu_mapping(
-							soc, msdu,
-							rx_desc_pool->buf_size,
-							false);
-					qdf_nbuf_unmap_nbytes_single(
-						soc->osdev, msdu,
-						QDF_DMA_FROM_DEVICE,
-						rx_desc_pool->buf_size);
+					dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
+							      msdu);
 					rx_desc->unmapped = 1;
 					dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
@@ -3223,16 +3192,7 @@ dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
 				msdu = rx_desc->nbuf;
 
 				dp_ipa_rx_buf_smmu_mapping_lock(soc);
-				dp_ipa_handle_rx_buf_smmu_mapping(
-						soc, msdu,
-						rx_desc_pool->buf_size,
-						false);
-
-				qdf_nbuf_unmap_nbytes_single(
-							soc->osdev,
-							msdu,
-							QDF_DMA_FROM_DEVICE,
-							rx_desc_pool->buf_size);
+				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
 				rx_desc->unmapped = 1;
 				dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 
@@ -3315,13 +3275,8 @@ dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
 		if (rx_desc && rx_desc->nbuf) {
 			rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
 			dp_ipa_rx_buf_smmu_mapping_lock(soc);
-			dp_ipa_handle_rx_buf_smmu_mapping(
-						soc, rx_desc->nbuf,
-						rx_desc_pool->buf_size,
-						false);
-			qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
-						     QDF_DMA_FROM_DEVICE,
-						     rx_desc_pool->buf_size);
+			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
+					      rx_desc->nbuf);
 			rx_desc->unmapped = 1;
 			dp_ipa_rx_buf_smmu_mapping_unlock(soc);
 

+ 12 - 25
dp/wifi3.0/li/dp_li_rx.c

@@ -348,21 +348,8 @@ more_data:
 		if (QDF_IS_STATUS_ERROR(status)) {
 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
 				qdf_assert_always(!rx_desc->unmapped);
-				dp_ipa_reo_ctx_buf_mapping_lock(soc,
-								reo_ring_num);
-				dp_ipa_handle_rx_buf_smmu_mapping(
-							soc,
-							rx_desc->nbuf,
-							RX_DATA_BUFFER_SIZE,
-							false);
-				qdf_nbuf_unmap_nbytes_single(
-							soc->osdev,
-							rx_desc->nbuf,
-							QDF_DMA_FROM_DEVICE,
-							RX_DATA_BUFFER_SIZE);
+				dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
 				rx_desc->unmapped = 1;
-				dp_ipa_reo_ctx_buf_mapping_unlock(soc,
-								  reo_ring_num);
 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
 							    rx_desc->pool_id);
 				dp_rx_add_to_free_desc_list(
@@ -608,7 +595,7 @@ done:
 			tid = qdf_nbuf_get_tid_val(nbuf);
 			if (tid >= CDP_MAX_DATA_TIDS) {
 				DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			}
@@ -679,13 +666,13 @@ done:
 						     QDF_TRACE_LEVEL_INFO);
 				tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
 				qdf_assert(0);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			} else if (qdf_unlikely(hal_rx_attn_msdu_len_err_get_li(
 								 rx_tlv_hdr))) {
 				DP_STATS_INC(soc, rx.err.msdu_len_err, 1);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			}
@@ -744,7 +731,7 @@ done:
 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
 				DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
 			} else {
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
 				dp_info_rl("scatter msdu len %d, dropped",
 					   msdu_len);
@@ -767,7 +754,7 @@ done:
 		if (qdf_unlikely(vdev->multipass_en)) {
 			if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
 				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			}
@@ -778,7 +765,7 @@ done:
 			DP_STATS_INC(peer, rx.policy_check_drop, 1);
 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
 			/* Drop & free packet */
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 			/* Statistics */
 			nbuf = next;
 			continue;
@@ -791,7 +778,7 @@ done:
 				  false))) {
 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
 			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
-			qdf_nbuf_free(nbuf);
+			dp_rx_nbuf_free(nbuf);
 			nbuf = next;
 			continue;
 		}
@@ -807,7 +794,7 @@ done:
 			if (!is_eapol) {
 				DP_STATS_INC(peer,
 					     rx.peer_unauth_rx_pkt_drop, 1);
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			}
@@ -834,7 +821,7 @@ done:
 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
 					     1);
 
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			}
@@ -856,7 +843,7 @@ done:
 			 */
 			if (!is_sa_da_idx_valid(max_ast, nbuf,
 						msdu_metadata)) {
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
 				continue;
@@ -868,7 +855,7 @@ done:
 				/* this is a looped back MCBC pkt,drop it */
 				DP_STATS_INC_PKT(peer, rx.mec_drop, 1,
 						 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
-				qdf_nbuf_free(nbuf);
+				dp_rx_nbuf_free(nbuf);
 				nbuf = next;
 				continue;
 			}

+ 3 - 0
qdf/inc/qdf_nbuf.h

@@ -1790,6 +1790,9 @@ qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
 					    const char *func, uint32_t line);
 
+#define qdf_nbuf_free_simple(d) \
+	__qdf_nbuf_free(d)
+
 #define qdf_nbuf_free(d) \
 	qdf_nbuf_free_debug(d, __func__, __LINE__)