Pārlūkot izejas kodu

qcacmn: Restrict DMA Map/UnMap upto buffer size

Restrict DMA Map/UnMap upto buffer size for packets in rx process.
This gives 2-3% cpu gain in peak throughput.

Change-Id: Iaf5e9f6f734d80b6d2c234bd8e679cf2a81c7e2c
CRs-Fixed: 2660698
Varsha Mishra 5 gadi atpakaļ
vecāks
revīzija
c71df5eef4
3 mainītis faili ar 35 papildinājumiem un 24 dzēšanām
  1. 22 16
      dp/wifi3.0/dp_rx.c
  2. 5 1
      dp/wifi3.0/dp_rx_defrag.c
  3. 8 7
      dp/wifi3.0/dp_rx_mon_status.c

+ 22 - 16
dp/wifi3.0/dp_rx.c

@@ -247,8 +247,9 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 			break;
 		}
 
-		ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
-					  QDF_DMA_FROM_DEVICE);
+		ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, rx_netbuf,
+						 QDF_DMA_FROM_DEVICE, buf_size);
+
 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
 			qdf_nbuf_free(rx_netbuf);
 			DP_STATS_INC(dp_pdev, replenish.map_err, 1);
@@ -1992,14 +1993,6 @@ more_data:
 
 		}
 
-		/*
-		 * move unmap after scattered msdu waiting break logic
-		 * in case double skb unmap happened.
-		 */
-		qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
-				      QDF_DMA_FROM_DEVICE);
-		rx_desc->unmapped = 1;
-
 		core_id = smp_processor_id();
 		DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
 
@@ -2057,6 +2050,15 @@ more_data:
 
 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
 
+		/*
+		 * move unmap after scattered msdu waiting break logic
+		 * in case double skb unmap happened.
+		 */
+		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
+		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
+					     QDF_DMA_FROM_DEVICE,
+					     rx_desc_pool->buf_size);
+		rx_desc->unmapped = 1;
 		DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
 
 		/*
@@ -2500,8 +2502,10 @@ dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
 		return ret;
 	}
 
-	ret = qdf_nbuf_map_single(dp_soc->osdev, *nbuf,
-				  QDF_DMA_FROM_DEVICE);
+	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, *nbuf,
+					 QDF_DMA_FROM_DEVICE,
+					 rx_desc_pool->buf_size);
+
 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
 		qdf_nbuf_free(*nbuf);
 		dp_err("nbuf map failed");
@@ -2513,8 +2517,9 @@ dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
 
 	ret = check_x86_paddr(dp_soc, nbuf, &paddr, rx_desc_pool);
 	if (ret == QDF_STATUS_E_FAILURE) {
-		qdf_nbuf_unmap_single(dp_soc->osdev, *nbuf,
-				      QDF_DMA_FROM_DEVICE);
+		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, *nbuf,
+					     QDF_DMA_FROM_DEVICE,
+					     rx_desc_pool->buf_size);
 		qdf_nbuf_free(*nbuf);
 		dp_err("nbuf check x86 failed");
 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
@@ -2751,8 +2756,9 @@ dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
 
 		memset(buf, 0, RX_DATA_BUFFER_SIZE);
 
-		ret = qdf_nbuf_map_single(soc->osdev, nbuf,
-				    QDF_DMA_FROM_DEVICE);
+		ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
+						 QDF_DMA_FROM_DEVICE,
+						 RX_DATA_BUFFER_SIZE);
 
 		/* nbuf map failed */
 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {

+ 5 - 1
dp/wifi3.0/dp_rx_defrag.c

@@ -1703,6 +1703,7 @@ uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
 	uint32_t rx_bfs = 0;
 	struct dp_pdev *pdev;
 	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct rx_desc_pool *rx_desc_pool;
 
 	qdf_assert(soc);
 	qdf_assert(mpdu_desc_info);
@@ -1730,7 +1731,10 @@ uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
 
 	msdu = rx_desc->nbuf;
 
-	qdf_nbuf_unmap_single(soc->osdev, msdu,	QDF_DMA_FROM_DEVICE);
+	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
+	qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
+				     QDF_DMA_FROM_DEVICE,
+				     rx_desc_pool->buf_size);
 	rx_desc->unmapped = 1;
 
 	rx_desc->rx_buf_start = qdf_nbuf_data(msdu);

+ 8 - 7
dp/wifi3.0/dp_rx_mon_status.c

@@ -1689,7 +1689,9 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
 		uint8_t *status_buf;
 		qdf_dma_addr_t paddr;
 		uint64_t buf_addr;
+		struct rx_desc_pool *rx_desc_pool;
 
+		rx_desc_pool = &soc->rx_desc_status[mac_id];
 		buf_addr =
 			(HAL_RX_BUFFER_ADDR_31_0_GET(
 				rxdma_mon_status_ring_entry) |
@@ -1741,8 +1743,9 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
 			}
 			qdf_nbuf_set_pktlen(status_nbuf, RX_DATA_BUFFER_SIZE);
 
-			qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
-				QDF_DMA_FROM_DEVICE);
+			qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
+						     QDF_DMA_FROM_DEVICE,
+						     rx_desc_pool->buf_size);
 
 			/* Put the status_nbuf to queue */
 			qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
@@ -1750,11 +1753,8 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
 		} else {
 			union dp_rx_desc_list_elem_t *desc_list = NULL;
 			union dp_rx_desc_list_elem_t *tail = NULL;
-			struct rx_desc_pool *rx_desc_pool;
 			uint32_t num_alloc_desc;
 
-			rx_desc_pool = &soc->rx_desc_status[mac_id];
-
 			num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
 							rx_desc_pool,
 							1,
@@ -2005,8 +2005,9 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 					"[%s][%d] rxdma_ring_entry is NULL, count - %d",
 					__func__, __LINE__, count);
-			qdf_nbuf_unmap_single(dp_soc->osdev, rx_netbuf,
-					      QDF_DMA_FROM_DEVICE);
+			qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, rx_netbuf,
+						     QDF_DMA_FROM_DEVICE,
+						     rx_desc_pool->buf_size);
 			qdf_nbuf_free(rx_netbuf);
 			break;
 		}