Explorar o código

qcacmn: Optimize refill buffer pool feature code

Optimize refill buffer pool feature code to be
light weight on CPU, so that refill thread will
consume less CPU resources and also replenishing
RX buffers will be done faster.

Change-Id: I849ca60558c6197003d95e7d54fdb5f837cdc217
CRs-Fixed: 2950166
Karthik Kantamneni %!s(int64=4) %!d(string=hai) anos
pai
achega
0a7f7fb6a8
Modificáronse 4 ficheiros con 81 adicións e 105 borrados
  1. 0 4
      dp/wifi3.0/dp_rx.c
  2. 54 61
      dp/wifi3.0/dp_rx_buffer_pool.c
  3. 16 34
      dp/wifi3.0/dp_rx_buffer_pool.h
  4. 11 6
      dp/wifi3.0/dp_types.h

+ 0 - 4
dp/wifi3.0/dp_rx.c

@@ -401,8 +401,6 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 
 	count = 0;
 
-	dp_rx_refill_buff_pool_lock(dp_soc);
-
 	while (count < num_req_buffers) {
 		/* Flag is set while pdev rx_desc_pool initialization */
 		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
@@ -459,8 +457,6 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 
 	}
 
-	dp_rx_refill_buff_pool_unlock(dp_soc);
-
 	dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
 				       num_req_buffers, count);
 

+ 54 - 61
dp/wifi3.0/dp_rx_buffer_pool.c

@@ -23,14 +23,6 @@
 #define DP_RX_BUFFER_POOL_SIZE 128
 #endif
 
-#ifndef DP_RX_REFILL_BUFF_POOL_SIZE
-#define DP_RX_REFILL_BUFF_POOL_SIZE 2048
-#endif
-
-#ifndef DP_RX_REFILL_BUFF_POOL_BURST
-#define DP_RX_REFILL_BUFF_POOL_BURST 64
-#endif
-
 #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
 #define DP_RX_BUFF_POOL_ALLOC_THRES 1
 #endif
@@ -123,43 +115,48 @@ void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
 {
 	struct rx_desc_pool *rx_desc_pool;
 	struct rx_refill_buff_pool *buff_pool;
-	struct dp_pdev *dp_pdev;
+	qdf_device_t dev;
 	qdf_nbuf_t nbuf;
 	QDF_STATUS ret;
 	int count, i;
-	qdf_nbuf_t nbuf_head;
-	qdf_nbuf_t nbuf_tail;
-	uint32_t num_req_refill;
+	uint16_t num_refill;
+	uint16_t total_num_refill;
+	uint16_t total_count = 0;
+	uint16_t head, tail;
 
 	if (!soc)
 		return;
 
+	dev = soc->osdev;
 	buff_pool = &soc->rx_refill_buff_pool;
+	rx_desc_pool = &soc->rx_desc_buf[0];
 	if (!buff_pool->is_initialized)
 		return;
 
-	rx_desc_pool = &soc->rx_desc_buf[0];
-	dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
-
-	num_req_refill = buff_pool->max_bufq_len - buff_pool->bufq_len;
+	head = buff_pool->head;
+	tail = buff_pool->tail;
+	if (tail > head)
+		total_num_refill = (tail - head - 1);
+	else
+		total_num_refill = (DP_RX_REFILL_BUFF_POOL_SIZE - head +
+				    tail - 1);
 
-	while (num_req_refill) {
-		if (num_req_refill > DP_RX_REFILL_BUFF_POOL_BURST)
-			num_req_refill = DP_RX_REFILL_BUFF_POOL_BURST;
+	while (total_num_refill) {
+		if (total_num_refill > DP_RX_REFILL_BUFF_POOL_BURST)
+			num_refill = DP_RX_REFILL_BUFF_POOL_BURST;
+		else
+			num_refill = total_num_refill;
 
 		count = 0;
-		nbuf_head = NULL;
-		nbuf_tail = NULL;
-		for (i = 0; i < num_req_refill; i++) {
-			nbuf = qdf_nbuf_alloc(soc->osdev,
-					      rx_desc_pool->buf_size,
+		for (i = 0; i < num_refill; i++) {
+			nbuf = qdf_nbuf_alloc(dev, rx_desc_pool->buf_size,
 					      RX_BUFFER_RESERVATION,
 					      rx_desc_pool->buf_alignment,
 					      FALSE);
-			if (!nbuf)
+			if (qdf_unlikely(!nbuf))
 				continue;
 
-			ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
+			ret = qdf_nbuf_map_nbytes_single(dev, nbuf,
 							 QDF_DMA_FROM_DEVICE,
 							 rx_desc_pool->buf_size);
 			if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
@@ -167,39 +164,38 @@ void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
 				continue;
 			}
 
-			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
+			buff_pool->buf_elem[head++] = nbuf;
+			head &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
 			count++;
 		}
-		if (count) {
-			qdf_spin_lock_bh(&buff_pool->bufq_lock);
-			DP_RX_MERGE_TWO_LIST(buff_pool->buf_head,
-					     buff_pool->buf_tail,
-					     nbuf_head, nbuf_tail);
-			buff_pool->bufq_len += count;
-
-			num_req_refill = buff_pool->max_bufq_len -
-				buff_pool->bufq_len;
-			qdf_spin_unlock_bh(&buff_pool->bufq_lock);
 
-			DP_STATS_INC(dp_pdev,
-				     rx_refill_buff_pool.num_bufs_refilled,
-				     count);
+		if (count) {
+			buff_pool->head = head;
+			total_num_refill -= count;
+			total_count += count;
 		}
 	}
+
+	DP_STATS_INC(buff_pool->dp_pdev,
+		     rx_refill_buff_pool.num_bufs_refilled,
+		     total_count);
 }
 
 static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
 {
-	qdf_nbuf_t nbuf = NULL;
 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
+	qdf_nbuf_t nbuf = NULL;
+	uint16_t head, tail;
 
-	if (!buff_pool->in_rx_refill_lock || !buff_pool->bufq_len)
-		return nbuf;
+	head = buff_pool->head;
+	tail = buff_pool->tail;
 
-	nbuf = buff_pool->buf_head;
-	buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
-	qdf_nbuf_set_next(nbuf, NULL);
-	buff_pool->bufq_len--;
+	if (head == tail)
+		return NULL;
+
+	nbuf = buff_pool->buf_elem[tail++];
+	tail &= (DP_RX_REFILL_BUFF_POOL_SIZE - 1);
+	buff_pool->tail = tail;
 
 	return nbuf;
 }
@@ -276,6 +272,7 @@ static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
 	qdf_nbuf_t nbuf;
 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
 	QDF_STATUS ret;
+	uint16_t head = 0;
 	int i;
 
 	if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
@@ -284,13 +281,11 @@ static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
 		return;
 	}
 
-	buff_pool->bufq_len = 0;
-	buff_pool->buf_head = NULL;
-	buff_pool->buf_tail = NULL;
 	buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
-	qdf_spinlock_create(&buff_pool->bufq_lock);
+	buff_pool->dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
+	buff_pool->tail = 0;
 
-	for (i = 0; i < buff_pool->max_bufq_len; i++) {
+	for (i = 0; i < (buff_pool->max_bufq_len - 1); i++) {
 		nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
 				      RX_BUFFER_RESERVATION,
 				      rx_desc_pool->buf_alignment, FALSE);
@@ -305,14 +300,15 @@ static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
 			continue;
 		}
 
-		DP_RX_LIST_APPEND(buff_pool->buf_head,
-				  buff_pool->buf_tail, nbuf);
-		buff_pool->bufq_len++;
+		buff_pool->buf_elem[head] = nbuf;
+		head++;
 	}
 
+	buff_pool->head =  head;
+
 	dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
 		buff_pool->max_bufq_len,
-		buff_pool->bufq_len);
+		buff_pool->head);
 
 	buff_pool->is_initialized = true;
 }
@@ -364,7 +360,6 @@ static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
 	if (!buff_pool->is_initialized)
 		return;
 
-	buff_pool->in_rx_refill_lock  = true;
 	while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
 					     QDF_DMA_BIDIRECTIONAL,
@@ -372,12 +367,10 @@ static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
 		qdf_nbuf_free(nbuf);
 		count++;
 	}
-	buff_pool->in_rx_refill_lock  = false;
 
-	dp_info("Rx refill buffers freed during deinit %u qlen: %u",
-		count, buff_pool->bufq_len);
+	dp_info("Rx refill buffers freed during deinit %u head: %u, tail: %u",
+		count, buff_pool->head, buff_pool->tail);
 
-	qdf_spinlock_destroy(&buff_pool->bufq_lock);
 	buff_pool->is_initialized = false;
 }
 

+ 16 - 34
dp/wifi3.0/dp_rx_buffer_pool.h

@@ -104,38 +104,24 @@ dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
  *
  */
 static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc)
-{
-	if (soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread)
-		soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread(dp_soc_to_cdp_soc_t(soc));
-}
-
-/**
- * dp_rx_refill_buff_pool_lock() - Acquire Rx refill buff pool lock
- * @soc: SoC handle
- *
- */
-static inline void dp_rx_refill_buff_pool_lock(struct dp_soc *soc)
 {
 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
-
-	if (buff_pool->is_initialized  &&
-	    qdf_spin_trylock_bh(&buff_pool->bufq_lock))
-		buff_pool->in_rx_refill_lock = true;
-}
-
-/**
- * dp_rx_refill_buff_pool_unlock() - Release Rx refill buff pool lock
- * @soc: SoC handle
- *
- */
-static inline void dp_rx_refill_buff_pool_unlock(struct dp_soc *soc)
-{
-	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
-
-	if (buff_pool->in_rx_refill_lock) {
-		qdf_spin_unlock_bh(&buff_pool->bufq_lock);
-		buff_pool->in_rx_refill_lock = false;
-	}
+	uint16_t head = buff_pool->head;
+	uint16_t tail = buff_pool->tail;
+	uint16_t num_refill;
+
+	if (!buff_pool->is_initialized)
+		return;
+
+	if (tail > head)
+		num_refill = (tail - head - 1);
+	else
+		num_refill = (DP_RX_REFILL_BUFF_POOL_SIZE - head + tail - 1);
+
+	if (soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread &&
+	    num_refill >= DP_RX_REFILL_THRD_THRESHOLD)
+		soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread(
+						dp_soc_to_cdp_soc_t(soc));
 }
 #else
 /**
@@ -235,9 +221,5 @@ dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
 
 static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc) { }
 
-static inline void dp_rx_refill_buff_pool_lock(struct dp_soc *soc) { }
-
-static inline void dp_rx_refill_buff_pool_unlock(struct dp_soc *soc) { }
-
 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
 #endif /* _DP_RX_BUFFER_POOL_H_ */

+ 11 - 6
dp/wifi3.0/dp_types.h

@@ -134,6 +134,12 @@
 #define FISA_FLOW_MAX_AGGR_COUNT        16 /* max flow aggregate count */
 #endif
 
+#ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
+#define DP_RX_REFILL_BUFF_POOL_SIZE  2048
+#define DP_RX_REFILL_BUFF_POOL_BURST 64
+#define DP_RX_REFILL_THRD_THRESHOLD  512
+#endif
+
 enum rx_pktlog_mode {
 	DP_RX_PKTLOG_DISABLED = 0,
 	DP_RX_PKTLOG_FULL,
@@ -1178,13 +1184,12 @@ struct rx_buff_pool {
 };
 
 struct rx_refill_buff_pool {
-	qdf_nbuf_t buf_head;
-	qdf_nbuf_t buf_tail;
-	qdf_spinlock_t bufq_lock;
-	uint32_t bufq_len;
-	uint32_t max_bufq_len;
-	bool in_rx_refill_lock;
 	bool is_initialized;
+	uint16_t head;
+	uint16_t tail;
+	struct dp_pdev *dp_pdev;
+	uint16_t max_bufq_len;
+	qdf_nbuf_t buf_elem[2048];
 };
 
 #ifdef DP_TX_HW_DESC_HISTORY