浏览代码

qcacmn: Add new API for custom page frag cache nbuf allocations

SKBs that are allocated using __netdev_alloc_skb() share a single
page frag cache that is maintained in the network layer and is
common for all network drivers/devices. There is always a chance
where memory can be fragmented when the WLAN driver shares a page
frag cache with other slow network devices. Over the time, lot of
memory would be wasted due to fragmentation eventually resulting
in SKB allocation failures due to OOM.

To circumvent this, define a QDF NBUF allocation API which accepts
custom page frag cache maintained in the driver as an input and
allocates memory for skb->head from the custom page frag cache.

Such an API will be of great help when used for allocating reusable
SKBs in the driver. This also avoids the aforementioned memory
fragmentation issue.

Change-Id: I33f3096bba4057fd06ef55bbed5dc7a3f0f5c759
CRs-Fixed: 3543424
Manikanta Pubbisetty 2 年之前
父节点
当前提交
d621993b84
共有 3 个文件被更改,包括 168 次插入79 次删除
  1. 21 0
      qdf/inc/qdf_nbuf.h
  2. 26 0
      qdf/linux/src/i_qdf_nbuf.h
  3. 121 79
      qdf/linux/src/qdf_nbuf.c

+ 21 - 0
qdf/inc/qdf_nbuf.h

@@ -2414,6 +2414,14 @@ void
 qdf_nbuf_dev_kfree_list_debug(qdf_nbuf_queue_head_t *nbuf_queue_head,
 			      const char *func_name,
 			      uint32_t line_num);
+
+#define qdf_nbuf_page_frag_alloc(d, s, r, a, p) \
+	qdf_nbuf_page_frag_alloc_debug(d, s, r, a, p, __func__, __LINE__)
+
+qdf_nbuf_t
+qdf_nbuf_page_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size, int reserve,
+			       int align, qdf_frag_cache_t *pf_cache,
+			       const char *func, uint32_t line);
 #else /* NBUF_MEMORY_DEBUG */
 
 static inline void qdf_net_buf_debug_init(void) {}
@@ -2569,6 +2577,19 @@ qdf_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
 {
 	__qdf_nbuf_dev_kfree_list(nbuf_queue_head);
 }
+
+#define qdf_nbuf_page_frag_alloc(osdev, size, reserve, align, pf_cache) \
+	qdf_nbuf_page_frag_alloc_fl(osdev, size, reserve, align, pf_cache, \
+			  __func__, __LINE__)
+
+static inline qdf_nbuf_t
+qdf_nbuf_page_frag_alloc_fl(qdf_device_t osdev, qdf_size_t size, int reserve,
+			    int align, qdf_frag_cache_t *pf_cache,
+			    const char *func, uint32_t line)
+{
+	return __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align, pf_cache,
+					  func, line);
+}
 #endif /* NBUF_MEMORY_DEBUG */
 
 #if defined(QCA_DP_NBUF_FAST_PPEDS)

+ 26 - 0
qdf/linux/src/i_qdf_nbuf.h

@@ -383,6 +383,32 @@ __qdf_nbuf_frag_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
 __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
 					  const char *func, uint32_t line);
 
+/**
+ * __qdf_nbuf_page_frag_alloc() - Allocate nbuf from @pf_cache page
+ *				  fragment cache
+ * @osdev: Device handle
+ * @size: Netbuf requested size
+ * @reserve: headroom to start with
+ * @align: Align
+ * @pf_cache: Reference to page fragment cache
+ * @func: Function name of the call site
+ * @line: line number of the call site
+ *
+ * This allocates a nbuf, aligns if needed and reserves some space in the front,
+ * since the reserve is done after alignment the reserve value if being
+ * unaligned will result in an unaligned address.
+ *
+ * It will call kernel page fragment APIs for allocation of skb->head, prefer
+ * this API for buffers that are allocated and freed only once i.e., for
+ * reusable buffers.
+ *
+ * Return: nbuf or %NULL if no memory
+ */
+__qdf_nbuf_t
+__qdf_nbuf_page_frag_alloc(__qdf_device_t osdev, size_t size, int reserve,
+			   int align, __qdf_frag_cache_t *pf_cache,
+			   const char *func, uint32_t line);
+
 /**
  * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
  * @nbuf: Pointer to network buffer

+ 121 - 79
qdf/linux/src/qdf_nbuf.c

@@ -479,6 +479,40 @@ qdf_export_symbol(qdf_nbuf_frag_count_dec);
 
 #endif
 
+static inline void
+qdf_nbuf_set_defaults(struct sk_buff *skb, int align, int reserve)
+{
+	unsigned long offset;
+
+	memset(skb->cb, 0x0, sizeof(skb->cb));
+	skb->dev = NULL;
+
+	/*
+	 * The default is for netbuf fragments to be interpreted
+	 * as wordstreams rather than bytestreams.
+	 */
+	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
+	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
+
+	/*
+	 * XXX:how about we reserve first then align
+	 * Align & make sure that the tail & data are adjusted properly
+	 */
+
+	if (align) {
+		offset = ((unsigned long)skb->data) % align;
+		if (offset)
+			skb_reserve(skb, align - offset);
+	}
+
+	/*
+	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
+	 * pointer
+	 */
+	skb_reserve(skb, reserve);
+	qdf_nbuf_count_inc(skb);
+}
+
 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
 	!defined(QCA_WIFI_QCN9000)
 struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
@@ -486,7 +520,6 @@ struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
 				 uint32_t line)
 {
 	struct sk_buff *skb;
-	unsigned long offset;
 	uint32_t lowmem_alloc_tries = 0;
 
 	if (align)
@@ -524,32 +557,8 @@ skb_alloc:
 			goto realloc;
 		}
 	}
-	memset(skb->cb, 0x0, sizeof(skb->cb));
 
-	/*
-	 * The default is for netbuf fragments to be interpreted
-	 * as wordstreams rather than bytestreams.
-	 */
-	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
-	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
-
-	/*
-	 * XXX:how about we reserve first then align
-	 * Align & make sure that the tail & data are adjusted properly
-	 */
-
-	if (align) {
-		offset = ((unsigned long)skb->data) % align;
-		if (offset)
-			skb_reserve(skb, align - offset);
-	}
-
-	/*
-	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
-	 * pointer
-	 */
-	skb_reserve(skb, reserve);
-	qdf_nbuf_count_inc(skb);
+	qdf_nbuf_set_defaults(skb, align, reserve);
 
 	return skb;
 }
@@ -570,7 +579,6 @@ struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
 				 uint32_t line)
 {
 	struct sk_buff *skb;
-	unsigned long offset;
 	int flags = GFP_KERNEL;
 
 	if (align)
@@ -606,32 +614,7 @@ struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
 	__qdf_nbuf_stop_replenish_timer();
 
 skb_alloc:
-	memset(skb->cb, 0x0, sizeof(skb->cb));
-	skb->dev = NULL;
-	/*
-	 * The default is for netbuf fragments to be interpreted
-	 * as wordstreams rather than bytestreams.
-	 */
-	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
-	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
-
-	/*
-	 * XXX:how about we reserve first then align
-	 * Align & make sure that the tail & data are adjusted properly
-	 */
-
-	if (align) {
-		offset = ((unsigned long)skb->data) % align;
-		if (offset)
-			skb_reserve(skb, align - offset);
-	}
-
-	/*
-	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
-	 * pointer
-	 */
-	skb_reserve(skb, reserve);
-	qdf_nbuf_count_inc(skb);
+	qdf_nbuf_set_defaults(skb, align, reserve);
 
 	return skb;
 }
@@ -645,7 +628,6 @@ struct sk_buff *__qdf_nbuf_frag_alloc(qdf_device_t osdev, size_t size,
 				      const char *func, uint32_t line)
 {
 	struct sk_buff *skb;
-	unsigned long offset;
 	int flags = GFP_KERNEL & ~__GFP_DIRECT_RECLAIM;
 
 	if (align)
@@ -681,31 +663,7 @@ struct sk_buff *__qdf_nbuf_frag_alloc(qdf_device_t osdev, size_t size,
 	__qdf_nbuf_stop_replenish_timer();
 
 skb_alloc:
-	memset(skb->cb, 0x0, sizeof(skb->cb));
-	/*
-	 * The default is for netbuf fragments to be interpreted
-	 * as wordstreams rather than bytestreams.
-	 */
-	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
-	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
-
-	/*
-	 * XXX:how about we reserve first then align
-	 * Align & make sure that the tail & data are adjusted properly
-	 */
-
-	if (align) {
-		offset = ((unsigned long)skb->data) % align;
-		if (offset)
-			skb_reserve(skb, align - offset);
-	}
-
-	/*
-	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
-	 * pointer
-	 */
-	skb_reserve(skb, reserve);
-	qdf_nbuf_count_inc(skb);
+	qdf_nbuf_set_defaults(skb, align, reserve);
 
 	return skb;
 }
@@ -773,6 +731,63 @@ __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
 
 qdf_export_symbol(__qdf_nbuf_clone);
 
+struct sk_buff *
+__qdf_nbuf_page_frag_alloc(qdf_device_t osdev, size_t size, int reserve,
+			   int align, __qdf_frag_cache_t *pf_cache,
+			   const char *func, uint32_t line)
+{
+	struct sk_buff *skb;
+	qdf_frag_t frag_data;
+	size_t orig_size = size;
+	int flags = GFP_KERNEL;
+
+	if (align)
+		size += (align - 1);
+
+	size += NET_SKB_PAD;
+	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	size = SKB_DATA_ALIGN(size);
+
+	if (in_interrupt() || irqs_disabled() || in_atomic())
+		flags = GFP_ATOMIC;
+
+	frag_data = page_frag_alloc(pf_cache, size, flags);
+	if (!frag_data) {
+		qdf_rl_nofl_err("page frag alloc failed %zuB @ %s:%d",
+				size, func, line);
+		return __qdf_nbuf_alloc(osdev, orig_size, reserve, align, 0,
+					func, line);
+	}
+
+	skb = build_skb(frag_data, size);
+	if (skb) {
+		skb_reserve(skb, NET_SKB_PAD);
+		goto skb_alloc;
+	}
+
+	/* Free the data allocated from pf_cache */
+	page_frag_free(frag_data);
+
+	size = orig_size + align - 1;
+
+	skb = pld_nbuf_pre_alloc(size);
+	if (!skb) {
+		qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
+				size, func, line);
+		__qdf_nbuf_start_replenish_timer();
+		return NULL;
+	}
+
+	__qdf_nbuf_stop_replenish_timer();
+
+skb_alloc:
+	qdf_nbuf_set_defaults(skb, align, reserve);
+
+	return skb;
+}
+
+qdf_export_symbol(__qdf_nbuf_page_frag_alloc);
+
 #ifdef QCA_DP_TX_NBUF_LIST_FREE
 void
 __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
@@ -3702,6 +3717,33 @@ qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
 }
 qdf_export_symbol(qdf_nbuf_clone_debug);
 
+qdf_nbuf_t
+qdf_nbuf_page_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size, int reserve,
+			       int align, __qdf_frag_cache_t *pf_cache,
+			       const char *func, uint32_t line)
+{
+	qdf_nbuf_t nbuf;
+
+	if (is_initial_mem_debug_disabled)
+		return __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
+						  pf_cache, func, line);
+
+	nbuf = __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
+					  pf_cache, func, line);
+
+	/* Store SKB in internal QDF tracking table */
+	if (qdf_likely(nbuf)) {
+		qdf_net_buf_debug_add_node(nbuf, size, func, line);
+		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
+	} else {
+		qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
+	}
+
+	return nbuf;
+}
+
+qdf_export_symbol(qdf_nbuf_page_frag_alloc_debug);
+
 qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
 {
 	qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);