Jelajahi Sumber

qcacmn: Handle frag based ops based on rx_Desc_pool flag

Rx desc pool have a flag to identify whether frag or nbuf
operation needs to be performed for alloc, map, prep and
free buffer for monitor dest buffer.

This flag will be set only for mon destination desc pool,
if RX_MON_MEM_FRAG feature is enabled.
In all other case, It will be set to Zero and default nbuf
operation will be taken.
This flag get initialized at the time of pdev rx_desc_pool
initialization and gets reset while pdev deinit.

Mon destination buffer will have support for frag if
RX_MON_MEM_FRAG flag is set.

Change-Id: I67c6c823ee4f114035b884c024a1a9054a40665b
CRs-Fixed: 2741757
Ankit Kumar 5 tahun lalu
induk
melakukan
526bbe81de

+ 2 - 0
dp/inc/cdp_txrx_stats_struct.h

@@ -1801,6 +1801,7 @@ struct cdp_cfr_rcc_stats {
  * @pkts: total packets replenished
  * @rxdma_err: rxdma errors for replenished
  * @nbuf_alloc_fail: nbuf alloc failed
+ * @frag_alloc_fail: frag alloc failed
  * @map_err: Mapping failure
  * @x86_fail: x86 failures
  * @low_thresh_intrs: low threshold interrupts
@@ -1846,6 +1847,7 @@ struct cdp_pdev_stats {
 		struct cdp_pkt_info pkts;
 		uint32_t rxdma_err;
 		uint32_t nbuf_alloc_fail;
+		uint32_t frag_alloc_fail;
 		uint32_t map_err;
 		uint32_t x86_fail;
 		uint32_t low_thresh_intrs;

+ 230 - 64
dp/wifi3.0/dp_rx.c

@@ -136,6 +136,129 @@ QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
 }
 #endif
 
+/**
+ * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
+ *
+ * @dp_soc: struct dp_soc *
+ * @nbuf_frag_info_t: nbuf frag info
+ * @dp_pdev: struct dp_pdev *
+ * @rx_desc_pool: Rx desc pool
+ *
+ * Return: QDF_STATUS
+ */
+#ifdef DP_RX_MON_MEM_FRAG
+static inline QDF_STATUS
+dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
+			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
+			   struct dp_pdev *dp_pdev,
+			   struct rx_desc_pool *rx_desc_pool)
+{
+	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
+
+	(nbuf_frag_info_t->virt_addr).vaddr =
+			qdf_frag_alloc(rx_desc_pool->buf_size);
+
+	if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
+		dp_err("Frag alloc failed");
+		DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	ret = qdf_mem_map_page(dp_soc->osdev,
+			       (nbuf_frag_info_t->virt_addr).vaddr,
+			       QDF_DMA_FROM_DEVICE,
+			       rx_desc_pool->buf_size,
+			       &nbuf_frag_info_t->paddr);
+
+	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
+		qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
+		dp_err("Frag map failed");
+		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+#else
+static inline QDF_STATUS
+dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
+			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
+			   struct dp_pdev *dp_pdev,
+			   struct rx_desc_pool *rx_desc_pool)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#endif /* DP_RX_MON_MEM_FRAG */
+
+/**
+ * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
+ *
+ * @dp_soc: struct dp_soc *
+ * @mac_id: Mac id
+ * @num_entries_avail: num_entries_avail
+ * @nbuf_frag_info_t: nbuf frag info
+ * @dp_pdev: struct dp_pdev *
+ * @rx_desc_pool: Rx desc pool
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS
+dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
+				     uint32_t mac_id,
+				     uint32_t num_entries_avail,
+				     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
+				     struct dp_pdev *dp_pdev,
+				     struct rx_desc_pool *rx_desc_pool)
+{
+	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
+
+	(nbuf_frag_info_t->virt_addr).nbuf =
+		dp_rx_buffer_pool_nbuf_alloc(dp_soc,
+					     mac_id,
+					     rx_desc_pool,
+					     num_entries_avail);
+	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
+		dp_err("nbuf alloc failed");
+		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
+					 (nbuf_frag_info_t->virt_addr).nbuf,
+					 QDF_DMA_FROM_DEVICE,
+					 rx_desc_pool->buf_size);
+
+	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
+		dp_rx_buffer_pool_nbuf_free(dp_soc,
+			(nbuf_frag_info_t->virt_addr).nbuf, mac_id);
+		dp_err("nbuf map failed");
+		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	nbuf_frag_info_t->paddr =
+		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
+
+	dp_ipa_handle_rx_buf_smmu_mapping(dp_soc,
+			(qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
+					  rx_desc_pool->buf_size,
+					  true);
+
+	ret = check_x86_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
+			      &nbuf_frag_info_t->paddr,
+			      rx_desc_pool);
+	if (ret == QDF_STATUS_E_FAILURE) {
+		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
+					     (nbuf_frag_info_t->virt_addr).nbuf,
+					     QDF_DMA_FROM_DEVICE,
+					     rx_desc_pool->buf_size);
+		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
+		return QDF_STATUS_E_ADDRNOTAVAIL;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
 /*
  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  *			       called during dp rx initialization
@@ -167,13 +290,10 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 	uint32_t num_entries_avail;
 	uint32_t count;
 	int sync_hw_ptr = 1;
-	qdf_dma_addr_t paddr;
-	qdf_nbuf_t rx_netbuf;
+	struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
 	void *rxdma_ring_entry;
 	union dp_rx_desc_list_elem_t *next;
 	QDF_STATUS ret;
-	uint16_t buf_size = rx_desc_pool->buf_size;
-
 	void *rxdma_srng;
 
 	rxdma_srng = dp_rxdma_srng->hal_srng;
@@ -239,37 +359,21 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 	count = 0;
 
 	while (count < num_req_buffers) {
-		rx_netbuf = dp_rx_buffer_pool_nbuf_alloc(dp_soc, mac_id,
-							 rx_desc_pool,
-							 num_entries_avail);
-
-		if (qdf_unlikely(!rx_netbuf)) {
-			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
-			break;
-		}
-
-		ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, rx_netbuf,
-						 QDF_DMA_FROM_DEVICE, buf_size);
+		/* Flag is set while pdev rx_desc_pool initialization */
+		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
+			ret = dp_pdev_frag_alloc_and_map(dp_soc,
+							 &nbuf_frag_info,
+							 dp_pdev,
+							 rx_desc_pool);
+		else
+			ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
+								   mac_id,
+					num_entries_avail, &nbuf_frag_info,
+					dp_pdev, rx_desc_pool);
 
 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
-			dp_rx_buffer_pool_nbuf_free(dp_soc, rx_netbuf, mac_id);
-			DP_STATS_INC(dp_pdev, replenish.map_err, 1);
-			continue;
-		}
-
-		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
-
-		dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf,
-						  buf_size, true);
-		/*
-		 * check if the physical address of nbuf->data is
-		 * less then 0x50000000 then free the nbuf and try
-		 * allocating new nbuf. We can try for 100 times.
-		 * this is a temp WAR till we fix it properly.
-		 */
-		ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, rx_desc_pool);
-		if (ret == QDF_STATUS_E_FAILURE) {
-			DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
+			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
+				continue;
 			break;
 		}
 
@@ -281,7 +385,13 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 
 		next = (*desc_list)->next;
 
-		dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
+		/* Flag is set while pdev rx_desc_pool initialization */
+		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
+			dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
+					     &nbuf_frag_info);
+		else
+			dp_rx_desc_prep(&((*desc_list)->rx_desc),
+					&nbuf_frag_info);
 
 		/* rx_desc.in_use should be zero at this time*/
 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
@@ -290,12 +400,13 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 		(*desc_list)->rx_desc.in_err_state = 0;
 		dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
 					   func_name, RX_DESC_REPLENISHED);
-		dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
-				 rx_netbuf, qdf_nbuf_data(rx_netbuf),
-				 (unsigned long long)paddr,
+		dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
+				 nbuf_frag_info.virt_addr.nbuf,
+				 (unsigned long long)(nbuf_frag_info.paddr),
 				 (*desc_list)->rx_desc.cookie);
 
-		hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
+		hal_rxdma_buff_addr_info_set(rxdma_ring_entry,
+					     nbuf_frag_info.paddr,
 						(*desc_list)->rx_desc.cookie,
 						rx_desc_pool->owner);
 
@@ -2698,41 +2809,47 @@ QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
 }
 
 static QDF_STATUS
-dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
+dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
+			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
 			   struct dp_pdev *dp_pdev,
 			   struct rx_desc_pool *rx_desc_pool)
 {
-	qdf_dma_addr_t paddr;
 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
 
-	*nbuf = qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
+	(nbuf_frag_info_t->virt_addr).nbuf =
+		qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
 			       RX_BUFFER_RESERVATION,
 			       rx_desc_pool->buf_alignment, FALSE);
-	if (!(*nbuf)) {
+	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
 		dp_err("nbuf alloc failed");
 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
 		return ret;
 	}
 
-	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, *nbuf,
+	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
+					 (nbuf_frag_info_t->virt_addr).nbuf,
 					 QDF_DMA_FROM_DEVICE,
 					 rx_desc_pool->buf_size);
 
 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
-		qdf_nbuf_free(*nbuf);
+		qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
 		dp_err("nbuf map failed");
 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
 		return ret;
 	}
 
-	paddr = qdf_nbuf_get_frag_paddr(*nbuf, 0);
+	nbuf_frag_info_t->paddr =
+		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
 
-	ret = check_x86_paddr(dp_soc, nbuf, &paddr, rx_desc_pool);
+	ret = check_x86_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
+			      &nbuf_frag_info_t->paddr,
+			      rx_desc_pool);
 	if (ret == QDF_STATUS_E_FAILURE) {
-		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, *nbuf,
+		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
+					     (nbuf_frag_info_t->virt_addr).nbuf,
 					     QDF_DMA_FROM_DEVICE,
 					     rx_desc_pool->buf_size);
-		qdf_nbuf_free(*nbuf);
+		qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
 		dp_err("nbuf check x86 failed");
 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
 		return ret;
@@ -2752,7 +2869,7 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
 	union dp_rx_desc_list_elem_t *next;
 	void *rxdma_ring_entry;
 	qdf_dma_addr_t paddr;
-	qdf_nbuf_t *rx_nbuf_arr;
+	struct dp_rx_nbuf_frag_info *nf_info;
 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
 	uint32_t buffer_index, nbuf_ptrs_per_page;
 	qdf_nbuf_t nbuf;
@@ -2802,24 +2919,24 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
 	 * have been allocated to fit in one page across each
 	 * iteration to index into the nbuf.
 	 */
-	total_pages = (nr_descs * sizeof(*rx_nbuf_arr)) / PAGE_SIZE;
+	total_pages = (nr_descs * sizeof(*nf_info)) / PAGE_SIZE;
 
 	/*
 	 * Add an extra page to store the remainder if any
 	 */
-	if ((nr_descs * sizeof(*rx_nbuf_arr)) % PAGE_SIZE)
+	if ((nr_descs * sizeof(*nf_info)) % PAGE_SIZE)
 		total_pages++;
-	rx_nbuf_arr = qdf_mem_malloc(PAGE_SIZE);
-	if (!rx_nbuf_arr) {
+	nf_info = qdf_mem_malloc(PAGE_SIZE);
+	if (!nf_info) {
 		dp_err("failed to allocate nbuf array");
 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
 		QDF_BUG(0);
 		return QDF_STATUS_E_NOMEM;
 	}
-	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*rx_nbuf_arr);
+	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*nf_info);
 
 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
-		qdf_mem_zero(rx_nbuf_arr, PAGE_SIZE);
+		qdf_mem_zero(nf_info, PAGE_SIZE);
 
 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
 			/*
@@ -2830,9 +2947,15 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
 			 */
 			if (nr_nbuf_total >= nr_descs)
 				break;
-			ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
-							 &rx_nbuf_arr[nr_nbuf],
-							 dp_pdev, rx_desc_pool);
+			/* Flag is set while pdev rx_desc_pool initialization */
+			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
+				ret = dp_pdev_frag_alloc_and_map(dp_soc,
+						&nf_info[nr_nbuf], dp_pdev,
+						rx_desc_pool);
+			else
+				ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
+						&nf_info[nr_nbuf], dp_pdev,
+						rx_desc_pool);
 			if (QDF_IS_STATUS_ERROR(ret))
 				break;
 
@@ -2848,10 +2971,16 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
 			qdf_assert_always(rxdma_ring_entry);
 
 			next = desc_list->next;
-			nbuf = rx_nbuf_arr[buffer_index];
-			paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
-
-			dp_rx_desc_prep(&desc_list->rx_desc, nbuf);
+			paddr = nf_info[buffer_index].paddr;
+			nbuf = nf_info[buffer_index].virt_addr.nbuf;
+
+			/* Flag is set while pdev rx_desc_pool initialization */
+			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
+				dp_rx_desc_frag_prep(&desc_list->rx_desc,
+						     &nf_info[buffer_index]);
+			else
+				dp_rx_desc_prep(&desc_list->rx_desc,
+						&nf_info[buffer_index]);
 			desc_list->rx_desc.in_use = 1;
 			dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
 			dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
@@ -2861,7 +2990,6 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
 			hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
 						     desc_list->rx_desc.cookie,
 						     rx_desc_pool->owner);
-
 			dp_ipa_handle_rx_buf_smmu_mapping(
 						dp_soc, nbuf,
 						rx_desc_pool->buf_size,
@@ -2874,7 +3002,7 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
 	}
 
 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
-	qdf_mem_free(rx_nbuf_arr);
+	qdf_mem_free(nf_info);
 
 	if (!nr_nbuf_total) {
 		dp_err("No nbuf's allocated");
@@ -2890,6 +3018,35 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
 	return QDF_STATUS_SUCCESS;
 }
 
+/**
+ * dp_rx_enable_mon_dest_frag() - Enable frag processing for
+ *              monitor destination ring via frag.
+ *
+ * Enable this flag only for monitor destination buffer processing
+ * if DP_RX_MON_MEM_FRAG feature is enabled.
+ * If flag is set then frag based function will be called for alloc,
+ * map, prep desc and free ops for desc buffer else normal nbuf based
+ * function will be called.
+ *
+ * @rx_desc_pool: Rx desc pool
+ * @is_mon_dest_desc: Is it for monitor dest buffer
+ *
+ * Return: None
+ */
+#ifdef DP_RX_MON_MEM_FRAG
+void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
+				bool is_mon_dest_desc)
+{
+	rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
+}
+#else
+void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
+				bool is_mon_dest_desc)
+{
+	rx_desc_pool->rx_mon_dest_frag_enable = false;
+}
+#endif
+
 /*
  * dp_rx_pdev_desc_pool_alloc() -  allocate memory for software rx descriptor
  *				   pool
@@ -2966,6 +3123,13 @@ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
 	struct rx_desc_pool *rx_desc_pool;
 
 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
+		/**
+		 * If NSS is enabled, rx_desc_pool is already filled.
+		 * Hence, just disable desc_pool frag flag.
+		 */
+		rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
+		dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
+
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
 			  "nss-wifi<4> skip Rx refil %d", mac_for_pdev);
 		return QDF_STATUS_SUCCESS;
@@ -2986,6 +3150,8 @@ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
 	rx_desc_pool->owner = DP_WBM2SW_RBM;
 	rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
+	/* Disable monitor dest processing via frag */
+	dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
 
 	dp_rx_desc_pool_init(soc, mac_for_pdev,
 			     rx_sw_desc_num, rx_desc_pool);

+ 80 - 5
dp/wifi3.0/dp_rx.h

@@ -97,6 +97,8 @@ struct dp_rx_desc_dbg_info {
  * @nbuf		: VA of the "skb" posted
  * @rx_buf_start	: VA of the original Rx buffer, before
  *			  movement of any skb->data pointer
+ * @paddr_buf_start     : PA of the original Rx buffer, before
+ *                        movement of any frag pointer
  * @cookie		: index into the sw array which holds
  *			  the sw Rx descriptors
  *			  Cookie space is 21 bits:
@@ -113,6 +115,7 @@ struct dp_rx_desc_dbg_info {
 struct dp_rx_desc {
 	qdf_nbuf_t nbuf;
 	uint8_t *rx_buf_start;
+	qdf_dma_addr_t paddr_buf_start;
 	uint32_t cookie;
 	uint8_t	 pool_id;
 #ifdef RX_DESC_DEBUG_CHECK
@@ -683,6 +686,25 @@ void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
 			  struct rx_desc_pool *rx_desc_pool);
 
+#ifdef DP_RX_MON_MEM_FRAG
+/*
+ * dp_rx_desc_frag_free() - free the sw rx desc frag called during
+ *			    de-initialization of wifi module.
+ *
+ * @soc: core txrx main context
+ * @rx_desc_pool: rx descriptor pool pointer
+ *
+ * Return: None
+ */
+void dp_rx_desc_frag_free(struct dp_soc *soc,
+			  struct rx_desc_pool *rx_desc_pool);
+#else
+static inline
+void dp_rx_desc_frag_free(struct dp_soc *soc,
+			  struct rx_desc_pool *rx_desc_pool)
+{
+}
+#endif
 /*
  * dp_rx_desc_pool_free() - free the sw rx desc array called during
  *			    de-initialization of wifi module.
@@ -1278,19 +1300,48 @@ static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
 /**
  * dp_rx_desc_prep() - prepare rx desc
  * @rx_desc: rx descriptor pointer to be prepared
- * @nbuf: nbuf to be associated with rx_desc
+ * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
  *
  * Note: assumption is that we are associating a nbuf which is mapped
  *
  * Return: none
  */
-static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
+static inline
+void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
+		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
 {
 	rx_desc->magic = DP_RX_DESC_MAGIC;
-	rx_desc->nbuf = nbuf;
+	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
 	rx_desc->unmapped = 0;
 }
 
+/**
+ * dp_rx_desc_frag_prep() - prepare rx desc
+ * @rx_desc: rx descriptor pointer to be prepared
+ * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
+ *
+ * Note: assumption is that we frag address is mapped
+ *
+ * Return: none
+ */
+#ifdef DP_RX_MON_MEM_FRAG
+static inline
+void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
+			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
+{
+	rx_desc->magic = DP_RX_DESC_MAGIC;
+	rx_desc->rx_buf_start =
+		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
+	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
+	rx_desc->unmapped = 0;
+}
+#else
+static inline
+void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
+			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
+{
+}
+#endif /* DP_RX_MON_MEM_FRAG */
 #else
 
 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
@@ -1298,13 +1349,37 @@ static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
 	return true;
 }
 
-static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
+static inline
+void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
+		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
 {
-	rx_desc->nbuf = nbuf;
+	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
 	rx_desc->unmapped = 0;
 }
+
+#ifdef DP_RX_MON_MEM_FRAG
+static inline
+void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
+			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
+{
+	rx_desc->rx_buf_start =
+		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
+	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
+	rx_desc->unmapped = 0;
+}
+#else
+static inline
+void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
+			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
+{
+}
+#endif /* DP_RX_MON_MEM_FRAG */
+
 #endif /* RX_DESC_DEBUG_CHECK */
 
+void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
+				bool is_mon_dest_desc);
+
 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
 			     uint8_t err_code, uint8_t mac_id);

+ 43 - 0
dp/wifi3.0/dp_rx_desc.c

@@ -229,6 +229,10 @@ void dp_rx_desc_pool_deinit(struct dp_soc *soc,
 
 	rx_desc_pool->freelist = NULL;
 	rx_desc_pool->pool_size = 0;
+
+	/* Deinitialize rx mon desr frag flag */
+	rx_desc_pool->rx_mon_dest_frag_enable = false;
+
 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
 	qdf_spinlock_destroy(&rx_desc_pool->lock);
 }
@@ -374,6 +378,41 @@ void dp_rx_desc_nbuf_free(struct dp_soc *soc,
 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
 }
 
+/**
+ * dp_rx_desc_frag_free() - Free desc frag buffer
+ *
+ * @soc: core txrx main context
+ * @rx_desc_pool: rx descriptor pool pointer
+ *
+ * Return: None
+ */
+#ifdef DP_RX_MON_MEM_FRAG
+void dp_rx_desc_frag_free(struct dp_soc *soc,
+			  struct rx_desc_pool *rx_desc_pool)
+{
+	qdf_dma_addr_t paddr;
+	qdf_frag_t vaddr;
+	int i;
+
+	qdf_spin_lock_bh(&rx_desc_pool->lock);
+	for (i = 0; i < rx_desc_pool->pool_size; i++) {
+		if (rx_desc_pool->array[i].rx_desc.in_use) {
+			paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
+			vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
+
+			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
+				qdf_mem_unmap_page(soc->osdev, paddr,
+						   rx_desc_pool->buf_size,
+						   QDF_DMA_FROM_DEVICE);
+				rx_desc_pool->array[i].rx_desc.unmapped = 1;
+			}
+			qdf_frag_free(vaddr);
+		}
+	}
+	qdf_spin_unlock_bh(&rx_desc_pool->lock);
+}
+#endif
+
 void dp_rx_desc_pool_free(struct dp_soc *soc,
 			  struct rx_desc_pool *rx_desc_pool)
 {
@@ -387,6 +426,10 @@ void dp_rx_desc_pool_deinit(struct dp_soc *soc,
 
 	rx_desc_pool->freelist = NULL;
 	rx_desc_pool->pool_size = 0;
+
+	/* Deinitialize rx mon desr frag flag */
+	rx_desc_pool->rx_mon_dest_frag_enable = false;
+
 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
 	qdf_spinlock_destroy(&rx_desc_pool->lock);
 }

+ 6 - 1
dp/wifi3.0/dp_rx_mon_dest.c

@@ -1244,6 +1244,8 @@ dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
 	rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
 	rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE;
 	rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT;
+	/* Enable frag processing if feature is enabled */
+	dp_rx_enable_mon_dest_frag(rx_desc_pool, true);
 
 	dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool);
 
@@ -1339,7 +1341,10 @@ void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
 
 	dp_debug("Mon RX Buf buffers Free pdev[%d]", pdev_id);
 
-	dp_rx_desc_nbuf_free(soc, rx_desc_pool);
+	if (rx_desc_pool->rx_mon_dest_frag_enable)
+		dp_rx_desc_frag_free(soc, rx_desc_pool);
+	else
+		dp_rx_desc_nbuf_free(soc, rx_desc_pool);
 }
 
 static QDF_STATUS

+ 2 - 0
dp/wifi3.0/dp_rx_mon_status.c

@@ -2029,6 +2029,8 @@ dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
 	rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
 	rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
+	/* Disable frag processing flag */
+	dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
 
 	dp_rx_desc_pool_init(soc, mac_id, num_entries + 1, rx_desc_pool);
 

+ 2 - 0
dp/wifi3.0/dp_stats.c

@@ -6282,6 +6282,8 @@ dp_print_pdev_rx_stats(struct dp_pdev *pdev)
 		       pdev->stats.err.ip_csum_err);
 	DP_PRINT_STATS("	TCP/UDP checksum error = %u",
 		       pdev->stats.err.tcp_udp_csum_err);
+	DP_PRINT_STATS("	Failed frag alloc = %u",
+		       pdev->stats.replenish.frag_alloc_fail);
 
 	/* Get bar_recv_cnt */
 	dp_aggregate_pdev_ctrl_frames_stats(pdev);

+ 17 - 0
dp/wifi3.0/dp_types.h

@@ -296,6 +296,21 @@ enum dp_cpu_ring_map_types {
 	DP_NSS_CPU_RING_MAP_MAX
 };
 
+/**
+ * dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
+ *
+ * paddr: Physical address of buffer allocated.
+ * nbuf: Allocated nbuf in case of nbuf approach.
+ * vaddr: Virtual address of frag allocated in case of frag approach.
+ */
+struct dp_rx_nbuf_frag_info {
+	qdf_dma_addr_t paddr;
+	union {
+		qdf_nbuf_t nbuf;
+		qdf_frag_t vaddr;
+	} virt_addr;
+};
+
 /**
  * struct rx_desc_pool
  * @pool_size: number of RX descriptor in the pool
@@ -307,6 +322,7 @@ enum dp_cpu_ring_map_types {
  * @owner: owner for nbuf
  * @buf_size: Buffer size
  * @buf_alignment: Buffer alignment
+ * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
  */
 struct rx_desc_pool {
 	uint32_t pool_size;
@@ -321,6 +337,7 @@ struct rx_desc_pool {
 	uint8_t owner;
 	uint16_t buf_size;
 	uint8_t buf_alignment;
+	bool rx_mon_dest_frag_enable;
 };
 
 /**