Browse Source

qcacmn: Avoid qdf framework for nbuf free in Tx. simple path

Avoid qdf framework for nbuf_free in Tx. simple path.

Change-Id: Ic0a82279586a16f07625fc78d3e07d1a134af3a4
CRs-Fixed: 3224285
Neha Bisht 3 years ago
parent
commit
87596da3e4
5 changed files with 88 additions and 53 deletions
  1. 3 1
      dp/wifi3.0/dp_rx.h
  2. 1 1
      dp/wifi3.0/dp_tx.c
  3. 26 8
      qdf/inc/qdf_nbuf.h
  4. 2 1
      qdf/linux/src/i_qdf_nbuf.h
  5. 56 42
      qdf/linux/src/qdf_nbuf.c

+ 3 - 1
dp/wifi3.0/dp_rx.h

@@ -2368,7 +2368,9 @@ static inline
 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
 			    struct rx_desc_pool *rx_desc_pool)
 {
-	return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size);
+	return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size,
+				     RX_BUFFER_RESERVATION,
+				     rx_desc_pool->buf_alignment, FALSE);
 }
 
 static inline

+ 1 - 1
dp/wifi3.0/dp_tx.c

@@ -4975,7 +4975,7 @@ dp_tx_comp_process_desc_list(struct dp_soc *soc,
 			dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
 					       desc->id, DP_TX_COMP_UNMAP);
 			dp_tx_nbuf_unmap(soc, desc);
-			qdf_nbuf_free(desc->nbuf);
+			qdf_nbuf_free_simple(desc->nbuf);
 			dp_tx_desc_free(soc, desc, desc->pool_id);
 			desc = next;
 			continue;

+ 26 - 8
qdf/inc/qdf_nbuf.h

@@ -1938,9 +1938,22 @@ static inline qdf_nbuf_t qdf_nbuf_next(qdf_nbuf_t buf)
 
 #define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
 
+enum qdf_nbuf_event_type {
+	QDF_NBUF_ALLOC,
+	QDF_NBUF_ALLOC_CLONE,
+	QDF_NBUF_ALLOC_COPY,
+	QDF_NBUF_ALLOC_FAILURE,
+	QDF_NBUF_FREE,
+	QDF_NBUF_MAP,
+	QDF_NBUF_UNMAP,
+	QDF_NBUF_ALLOC_COPY_EXPAND,
+};
+
 void qdf_net_buf_debug_init(void);
 void qdf_net_buf_debug_exit(void);
 void qdf_net_buf_debug_clean(void);
+void qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
+			  enum qdf_nbuf_event_type type);
 void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
 				const char *func_name, uint32_t line_num);
 /**
@@ -1997,8 +2010,8 @@ void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf);
 
 /* nbuf allocation rouines */
 
-#define qdf_nbuf_alloc_simple(d, s) \
-	__qdf_nbuf_alloc_simple(d, s)
+#define qdf_nbuf_alloc_simple(d, s, r, a, p) \
+	__qdf_nbuf_alloc_simple(d, s, __func__, __LINE__)
 
 #define qdf_nbuf_alloc(d, s, r, a, p) \
 	qdf_nbuf_alloc_debug(d, s, r, a, p, __func__, __LINE__)
@@ -2025,15 +2038,17 @@ qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
 
 qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
 					    const char *func, uint32_t line);
-
-#define qdf_nbuf_free_simple(d) \
-	__qdf_nbuf_free(d)
-
 #define qdf_nbuf_free(d) \
 	qdf_nbuf_free_debug(d, __func__, __LINE__)
 
 void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line);
 
+#define qdf_nbuf_free_simple(d) \
+	qdf_nbuf_free_debug_simple(d, __func__, __LINE__)
+
+void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
+				uint32_t line);
+
 #define qdf_nbuf_clone(buf)     \
 	qdf_nbuf_clone_debug(buf, __func__, __LINE__)
 
@@ -2136,8 +2151,9 @@ qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
 }
 /* Nbuf allocation rouines */
 
-#define qdf_nbuf_alloc_simple(d, s) \
-	__qdf_nbuf_alloc_simple(d, s)
+#define qdf_nbuf_alloc_simple(osdev, size, reserve, align, prio) \
+	qdf_nbuf_alloc_fl(osdev, size, reserve, align, prio, \
+			  __func__, __LINE__)
 
 #define qdf_nbuf_alloc(osdev, size, reserve, align, prio) \
 	qdf_nbuf_alloc_fl(osdev, size, reserve, align, prio, \
@@ -2173,6 +2189,8 @@ qdf_nbuf_alloc_no_recycler_fl(size_t size, int reserve, int align,
 	return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
 }
 
+#define qdf_nbuf_free_simple(d) qdf_nbuf_free(d)
+
 static inline void qdf_nbuf_free(qdf_nbuf_t buf)
 {
 	if (qdf_likely(buf))

+ 2 - 1
qdf/linux/src/i_qdf_nbuf.h

@@ -791,7 +791,8 @@ __qdf_nbuf_t
 __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
 		 int prio, const char *func, uint32_t line);
 
-__qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size);
+__qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
+				     const char *func, uint32_t line);
 
 /**
  * __qdf_nbuf_alloc_no_recycler() - Allocates skb

+ 56 - 42
qdf/linux/src/qdf_nbuf.c

@@ -601,36 +601,6 @@ skb_alloc:
 }
 #else
 
-struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size)
-{
-	struct sk_buff *skb;
-	int flags = GFP_KERNEL;
-
-	if (in_interrupt() || irqs_disabled() || in_atomic()) {
-		flags = GFP_ATOMIC;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
-		/*
-		 * Observed that kcompactd burns out CPU to make order-3 page.
-		 *__netdev_alloc_skb has 4k page fallback option just in case of
-		 * failing high order page allocation so we don't need to be
-		 * hard. Make kcompactd rest in piece.
-		 */
-		flags = flags & ~__GFP_KSWAPD_RECLAIM;
-#endif
-	}
-
-	skb = __netdev_alloc_skb(NULL, size, flags);
-
-	if (skb)
-		qdf_nbuf_count_inc(skb);
-	else
-		return NULL;
-
-	return skb;
-}
-
-qdf_export_symbol(__qdf_nbuf_alloc_simple);
-
 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
 				 int align, int prio, const char *func,
 				 uint32_t line)
@@ -773,17 +743,6 @@ __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
 qdf_export_symbol(__qdf_nbuf_clone);
 
 #ifdef NBUF_MEMORY_DEBUG
-enum qdf_nbuf_event_type {
-	QDF_NBUF_ALLOC,
-	QDF_NBUF_ALLOC_CLONE,
-	QDF_NBUF_ALLOC_COPY,
-	QDF_NBUF_ALLOC_FAILURE,
-	QDF_NBUF_FREE,
-	QDF_NBUF_MAP,
-	QDF_NBUF_UNMAP,
-	QDF_NBUF_ALLOC_COPY_EXPAND,
-};
-
 struct qdf_nbuf_event {
 	qdf_nbuf_t nbuf;
 	char func[QDF_MEM_FUNC_NAME_SIZE];
@@ -809,7 +768,7 @@ static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
 	return next % size;
 }
 
-static void
+void
 qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
 		     enum qdf_nbuf_event_type type)
 {
@@ -3276,6 +3235,61 @@ free_buf:
 }
 qdf_export_symbol(qdf_nbuf_free_debug);
 
+struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
+					const char *func, uint32_t line)
+{
+	struct sk_buff *skb;
+	int flags = GFP_KERNEL;
+
+	if (in_interrupt() || irqs_disabled() || in_atomic()) {
+		flags = GFP_ATOMIC;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+		/*
+		 * Observed that kcompactd burns out CPU to make order-3 page.
+		 *__netdev_alloc_skb has 4k page fallback option just in case of
+		 * failing high order page allocation so we don't need to be
+		 * hard. Make kcompactd rest in piece.
+		 */
+		flags = flags & ~__GFP_KSWAPD_RECLAIM;
+#endif
+	}
+
+	skb = __netdev_alloc_skb(NULL, size, flags);
+
+
+	if (qdf_likely(is_initial_mem_debug_disabled)) {
+		if (qdf_likely(skb))
+			qdf_nbuf_count_inc(skb);
+	} else {
+		if (qdf_likely(skb)) {
+			qdf_nbuf_count_inc(skb);
+			qdf_net_buf_debug_add_node(skb, size, func, line);
+			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
+		} else {
+			qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
+		}
+	}
+
+
+	return skb;
+}
+
+qdf_export_symbol(__qdf_nbuf_alloc_simple);
+
+void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
+				uint32_t line)
+{
+	if (qdf_likely(nbuf)) {
+		if (is_initial_mem_debug_disabled) {
+			dev_kfree_skb_any(nbuf);
+		} else {
+			qdf_nbuf_free_debug(nbuf, func, line);
+		}
+	}
+}
+
+qdf_export_symbol(qdf_nbuf_free_debug_simple);
+
 qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
 {
 	uint32_t num_nr_frags;