qcacmn: Add a set of nbuf fragment allocation APIs

It adds a set of frag way allocation APIs explicitly.
IPA Tx nbufs change to frag way allocation API, which saves memory.

Current __qdf_nbuf_alloc changes to non-frag way allocation by
default if SKB RECYLCER not defined.

Change-Id: I5f87ffac54c49f9af920775c13b6dfdd147476dd
CRs-Fixed: 3534452
This commit is contained in:
Sijun Wu
2023-03-31 14:53:55 +05:30
committed by Rahul Choudhary
parent e6b159369e
commit 27f8cfff08
5 changed files with 164 additions and 8 deletions

View File

@@ -644,7 +644,8 @@ static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
*/ */
for (tx_buffer_count = 0; for (tx_buffer_count = 0;
tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) { tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE); nbuf = qdf_nbuf_frag_alloc(soc->osdev, alloc_size, 0,
256, FALSE);
if (!nbuf) if (!nbuf)
break; break;
@@ -1494,7 +1495,8 @@ static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
*/ */
for (tx_buffer_count = 0; for (tx_buffer_count = 0;
tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) { tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
nbuf = qdf_nbuf_alloc(soc->osdev, alloc_size, 0, 256, FALSE); nbuf = qdf_nbuf_frag_alloc(soc->osdev, alloc_size, 0,
256, FALSE);
if (!nbuf) if (!nbuf)
break; break;

View File

@@ -2286,6 +2286,13 @@ qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
int reserve, int align, int prio, int reserve, int align, int prio,
const char *func, uint32_t line); const char *func, uint32_t line);
#define qdf_nbuf_frag_alloc(d, s, r, a, p) \
qdf_nbuf_frag_alloc_debug(d, s, r, a, p, __func__, __LINE__)
qdf_nbuf_t qdf_nbuf_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size,
int reserve, int align, int prio,
const char *func, uint32_t line);
/** /**
* qdf_nbuf_alloc_no_recycler() - Allocates skb * qdf_nbuf_alloc_no_recycler() - Allocates skb
* @size: Size to be allocated for skb * @size: Size to be allocated for skb
@@ -2461,6 +2468,18 @@ qdf_nbuf_alloc_fl(qdf_device_t osdev, qdf_size_t size, int reserve, int align,
return __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line); return __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
} }
#define qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio) \
qdf_nbuf_frag_alloc_fl(osdev, size, reserve, align, prio, \
__func__, __LINE__)
static inline qdf_nbuf_t
qdf_nbuf_frag_alloc_fl(qdf_device_t osdev, qdf_size_t size, int reserve,
int align, int prio, const char *func, uint32_t line)
{
return __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio,
func, line);
}
/** /**
* qdf_nbuf_alloc_no_recycler_fl() - Allocate SKB * qdf_nbuf_alloc_no_recycler_fl() - Allocate SKB
* @size: Size to be allocated for skb * @size: Size to be allocated for skb

View File

@@ -49,6 +49,8 @@
*/ */
#define GFP_KERNEL 0 #define GFP_KERNEL 0
#define GFP_ATOMIC 0 #define GFP_ATOMIC 0
#define __GFP_KSWAPD_RECLAIM 0
#define __GFP_DIRECT_RECLAIM 0
#define kzalloc(size, flags) NULL #define kzalloc(size, flags) NULL
#define vmalloc(size) NULL #define vmalloc(size) NULL
#define kfree(buf) #define kfree(buf)

View File

@@ -310,7 +310,7 @@ typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
* @func: Function name of the call site * @func: Function name of the call site
* @line: line number of the call site * @line: line number of the call site
* *
* This allocates an nbuf aligns if needed and reserves some space in the front, * This allocates a nbuf aligns if needed and reserves some space in the front,
* since the reserve is done after alignment the reserve value if being * since the reserve is done after alignment the reserve value if being
* unaligned will result in an unaligned address. * unaligned will result in an unaligned address.
* *
@@ -345,6 +345,28 @@ __qdf_nbuf_t __qdf_nbuf_alloc_ppe_ds(__qdf_device_t osdev, size_t size,
const char *func, uint32_t line); const char *func, uint32_t line);
#endif /* QCA_DP_NBUF_FAST_PPEDS */ #endif /* QCA_DP_NBUF_FAST_PPEDS */
/**
* __qdf_nbuf_frag_alloc() - Allocate nbuf in page fragment way.
* @osdev: Device handle
* @size: Netbuf requested size
* @reserve: headroom to start with
* @align: Align
* @prio: Priority
* @func: Function name of the call site
* @line: line number of the call site
*
* This allocates a nbuf aligns if needed and reserves some space in the front,
* since the reserve is done after alignment the reserve value if being
* unaligned will result in an unaligned address.
* It will call into kernel page fragment APIs, long time keeping for scattered
* allocations should be considered for avoidance.
*
* Return: nbuf or %NULL if no memory
*/
__qdf_nbuf_t
__qdf_nbuf_frag_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
int prio, const char *func, uint32_t line);
/** /**
* __qdf_nbuf_alloc_no_recycler() - Allocates skb * __qdf_nbuf_alloc_no_recycler() - Allocates skb
* @size: Size to be allocated for skb * @size: Size to be allocated for skb

View File

@@ -555,6 +555,16 @@ skb_alloc:
} }
#else #else
#ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
int align, int prio, const char *func,
uint32_t line)
{
return __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio, func,
line);
}
#else
struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve, struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
int align, int prio, const char *func, int align, int prio, const char *func,
uint32_t line) uint32_t line)
@@ -579,6 +589,81 @@ struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
#endif #endif
} }
skb = alloc_skb(size, flags);
if (skb)
goto skb_alloc;
skb = pld_nbuf_pre_alloc(size);
if (!skb) {
qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
size, func, line);
__qdf_nbuf_start_replenish_timer();
return NULL;
}
__qdf_nbuf_stop_replenish_timer();
skb_alloc:
memset(skb->cb, 0x0, sizeof(skb->cb));
skb->dev = NULL;
/*
* The default is for netbuf fragments to be interpreted
* as wordstreams rather than bytestreams.
*/
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
/*
* XXX:how about we reserve first then align
* Align & make sure that the tail & data are adjusted properly
*/
if (align) {
offset = ((unsigned long)skb->data) % align;
if (offset)
skb_reserve(skb, align - offset);
}
/*
* NOTE:alloc doesn't take responsibility if reserve unaligns the data
* pointer
*/
skb_reserve(skb, reserve);
qdf_nbuf_count_inc(skb);
return skb;
}
#endif
#endif
qdf_export_symbol(__qdf_nbuf_alloc);
struct sk_buff *__qdf_nbuf_frag_alloc(qdf_device_t osdev, size_t size,
int reserve, int align, int prio,
const char *func, uint32_t line)
{
struct sk_buff *skb;
unsigned long offset;
int flags = GFP_KERNEL & ~__GFP_DIRECT_RECLAIM;
if (align)
size += (align - 1);
if (in_interrupt() || irqs_disabled() || in_atomic()) {
flags = GFP_ATOMIC;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
/*
* Observed that kcompactd burns out CPU to make order-3 page.
*__netdev_alloc_skb has 4k page fallback option just in case of
* failing high order page allocation so we don't need to be
* hard. Make kcompactd rest in piece.
*/
flags = flags & ~__GFP_KSWAPD_RECLAIM;
#endif
}
skb = __netdev_alloc_skb(NULL, size, flags); skb = __netdev_alloc_skb(NULL, size, flags);
if (skb) if (skb)
@@ -591,13 +676,12 @@ struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
size, func, line); size, func, line);
__qdf_nbuf_start_replenish_timer(); __qdf_nbuf_start_replenish_timer();
return NULL; return NULL;
} else {
__qdf_nbuf_stop_replenish_timer();
} }
__qdf_nbuf_stop_replenish_timer();
skb_alloc: skb_alloc:
memset(skb->cb, 0x0, sizeof(skb->cb)); memset(skb->cb, 0x0, sizeof(skb->cb));
/* /*
* The default is for netbuf fragments to be interpreted * The default is for netbuf fragments to be interpreted
* as wordstreams rather than bytestreams. * as wordstreams rather than bytestreams.
@@ -625,8 +709,8 @@ skb_alloc:
return skb; return skb;
} }
#endif
qdf_export_symbol(__qdf_nbuf_alloc); qdf_export_symbol(__qdf_nbuf_frag_alloc);
__qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align, __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
const char *func, uint32_t line) const char *func, uint32_t line)
@@ -3377,6 +3461,33 @@ qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
} }
qdf_export_symbol(qdf_nbuf_alloc_debug); qdf_export_symbol(qdf_nbuf_alloc_debug);
qdf_nbuf_t qdf_nbuf_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size,
int reserve, int align, int prio,
const char *func, uint32_t line)
{
qdf_nbuf_t nbuf;
if (is_initial_mem_debug_disabled)
return __qdf_nbuf_frag_alloc(osdev, size,
reserve, align,
prio, func, line);
nbuf = __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio,
func, line);
/* Store SKB in internal QDF tracking table */
if (qdf_likely(nbuf)) {
qdf_net_buf_debug_add_node(nbuf, size, func, line);
qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
} else {
qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
}
return nbuf;
}
qdf_export_symbol(qdf_nbuf_frag_alloc_debug);
qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align, qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
const char *func, uint32_t line) const char *func, uint32_t line)
{ {