Эх сурвалжийг харах

qcacmn: New Alloc API for DS

Requirement:
In skb recycler, if recyler module allocates the buffers
already used by DS module to DS, then memzero, shinfo
reset can be avoided, since the DS packets were not
processed by SW (host).
Hence, we will achieve good KPI with less CPU
utilization.

Fix:
Implemented an new qdf wrapper API qdf_nbuf_alloc_ppe_ds
in wifi driver, which in turn invokes
__netdev_alloc_skb_no_skb_reset API to linux module.
With the use of new netdev_alloc API shinfo reset
will be avoided for DS used buffers alone.
Skb recycler changes are delivered already via
4360331

Change-Id: I3fe8dc07ca12c01136eaee4fcdb1ff6036f80f74
CRs-Fixed: 3350807
Nanda Krishnan 2 жил өмнө
parent
commit
97f88fb874

+ 5 - 0
qdf/inc/qdf_nbuf.h

@@ -2410,6 +2410,11 @@ qdf_nbuf_dev_kfree_list(qdf_nbuf_queue_head_t *nbuf_queue_head)
 }
 #endif /* NBUF_MEMORY_DEBUG */
 
+#if defined(QCA_DP_NBUF_FAST_PPEDS)
+#define qdf_nbuf_alloc_ppe_ds(d, s, r, a, p) \
+	__qdf_nbuf_alloc_ppe_ds(d, s, __func__, __LINE__)
+#endif /* QCA_DP_NBUF_FAST_PPEDS */
+
 /**
  * qdf_nbuf_dev_queue_head() - Queue a buffer at the list head
  * @nbuf_queue_head: Pointer to buffer list head

+ 22 - 0
qdf/linux/src/i_qdf_nbuf.h

@@ -815,6 +815,28 @@ __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
 __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size,
 				     const char *func, uint32_t line);
 
+#if defined(QCA_DP_NBUF_FAST_PPEDS)
+/**
+ * __qdf_nbuf_alloc_ppe_ds() - Allocates nbuf
+ * @osdev: Device handle
+ * @size: Netbuf requested size
+ * @func: Function name of the call site
+ * @line: line number of the call site
+ *
+ * This allocates an nbuf for wifi module
+ * in DS mode and uses __netdev_alloc_skb_no_skb_reset API.
+ * The netdev API invokes skb_recycler_alloc with reset_skb
+ * as false. Hence, recycler pool will not do reset_struct
+ * when it allocates DS used buffer to DS module, which will
+ * helps to improve the performance
+ *
+ * Return: nbuf or %NULL if no memory
+ */
+
+__qdf_nbuf_t __qdf_nbuf_alloc_ppe_ds(__qdf_device_t osdev, size_t size,
+				     const char *func, uint32_t line);
+#endif /* QCA_DP_NBUF_FAST_PPEDS */
+
 /**
  * __qdf_nbuf_alloc_no_recycler() - Allocates skb
  * @size: Size to be allocated for skb

+ 41 - 0
qdf/linux/src/qdf_nbuf.c

@@ -3928,6 +3928,47 @@ qdf_nbuf_dev_kfree_list_debug(__qdf_nbuf_queue_head_t *nbuf_queue_head,
 qdf_export_symbol(qdf_nbuf_dev_kfree_list_debug);
 #endif /* NBUF_MEMORY_DEBUG */
 
+#if defined(QCA_DP_NBUF_FAST_PPEDS)
+struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
+					const char *func, uint32_t line)
+{
+	struct sk_buff *skb;
+	int flags = GFP_KERNEL;
+
+	if (in_interrupt() || irqs_disabled() || in_atomic()) {
+		flags = GFP_ATOMIC;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+		/*
+		 * Observed that kcompactd burns out CPU to make order-3
+		 * page.__netdev_alloc_skb has 4k page fallback option
+		 * just in case of
+		 * failing high order page allocation so we don't need
+		 * to be hard. Make kcompactd rest in piece.
+		 */
+		flags = flags & ~__GFP_KSWAPD_RECLAIM;
+#endif
+	}
+	skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
+	if (qdf_likely(is_initial_mem_debug_disabled)) {
+		if (qdf_likely(skb))
+			qdf_nbuf_count_inc(skb);
+	} else {
+		if (qdf_likely(skb)) {
+			qdf_nbuf_count_inc(skb);
+			qdf_net_buf_debug_add_node(skb, size, func, line);
+			qdf_nbuf_history_add(skb, func, line,
+					     QDF_NBUF_ALLOC);
+		} else {
+			qdf_nbuf_history_add(skb, func, line,
+					     QDF_NBUF_ALLOC_FAILURE);
+		}
+	}
+	return skb;
+}
+
+qdf_export_symbol(__qdf_nbuf_alloc_ppe_ds);
+#endif
+
 #if defined(FEATURE_TSO)
 
 /**