qcacmn: Allocate multi page memory for dp_rx_desc_pool_alloc

Allocate memory in multiple smaller chunks for dp_rx_desc_pool_alloc,
and link the pages instead of allocating one big contiguous memory to
avoid memory allocation failures.

Change-Id: Id81de10727555c4ca78963a6f01ed3b992ce9924
CRs-Fixed: 2443999
This commit is contained in:
Varun Reddy Yeturu
2019-05-16 14:03:46 -07:00
committed by nshrivas
parent 1514e796b6
commit a7c21dc7f3
9 changed files with 366 additions and 65 deletions

View File

@@ -80,6 +80,46 @@ QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
}
#ifdef RX_DESC_MULTI_PAGE_ALLOC
static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
struct dp_pdev *pdev,
bool create)
{
struct rx_desc_pool *rx_pool;
uint8_t pdev_id;
uint32_t num_desc, page_id, offset, i;
uint16_t num_desc_per_page;
union dp_rx_desc_list_elem_t *rx_desc_elem;
struct dp_rx_desc *rx_desc;
qdf_nbuf_t nbuf;
if (!qdf_mem_smmu_s1_enabled(soc->osdev))
return QDF_STATUS_SUCCESS;
pdev_id = pdev->pdev_id;
rx_pool = &soc->rx_desc_buf[pdev_id];
qdf_spin_lock_bh(&rx_pool->lock);
num_desc = rx_pool->pool_size;
num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
for (i = 0; i < num_desc; i++) {
page_id = i / num_desc_per_page;
offset = i % num_desc_per_page;
if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
break;
rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
rx_desc = &rx_desc_elem->rx_desc;
if ((!(rx_desc->in_use)) || rx_desc->unmapped)
continue;
nbuf = rx_desc->nbuf;
__dp_ipa_handle_buf_smmu_mapping(soc, nbuf, create);
}
qdf_spin_unlock_bh(&rx_pool->lock);
return QDF_STATUS_SUCCESS;
}
#else
static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
struct dp_pdev *pdev,
bool create)
@@ -109,6 +149,7 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
return QDF_STATUS_SUCCESS;
}
#endif /* RX_DESC_MULTI_PAGE_ALLOC */
/**
* dp_tx_ipa_uc_detach - Free autonomy TX resources

View File

@@ -3783,15 +3783,15 @@ static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
pdev->pdev_id);
rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
dp_rx_desc_free_array(soc, rx_desc_pool);
dp_rx_desc_pool_free(soc, rx_desc_pool);
rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev];
dp_rx_desc_free_array(soc, rx_desc_pool);
dp_rx_desc_pool_free(soc, rx_desc_pool);
}
}
if (dp_is_soc_reinit(soc)) {
rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
dp_rx_desc_free_array(soc, rx_desc_pool);
dp_rx_desc_pool_free(soc, rx_desc_pool);
}
soc->pdev_list[pdev->pdev_id] = NULL;

View File

@@ -2191,9 +2191,10 @@ dp_rx_pdev_detach(struct dp_pdev *pdev)
if (rx_desc_pool->pool_size != 0) {
if (!dp_is_soc_reinit(soc))
dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
dp_rx_desc_nbuf_and_pool_free(soc, pdev_id,
rx_desc_pool);
else
dp_rx_desc_nbuf_pool_free(soc, rx_desc_pool);
dp_rx_desc_nbuf_free(soc, rx_desc_pool);
}
return;

View File

@@ -102,6 +102,30 @@ struct dp_rx_desc {
unmapped:1;
};
/* RX Descriptor Multi Page memory alloc related */
#define DP_RX_DESC_OFFSET_NUM_BITS 8
#define DP_RX_DESC_PAGE_ID_NUM_BITS 8
#define DP_RX_DESC_POOL_ID_NUM_BITS 4
#define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
#define DP_RX_DESC_POOL_ID_SHIFT \
(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
#define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
#define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \
(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
DP_RX_DESC_PAGE_ID_SHIFT)
#define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \
(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \
DP_RX_DESC_POOL_ID_SHIFT)
#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \
(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \
DP_RX_DESC_PAGE_ID_SHIFT)
#define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \
((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
#define RX_DESC_COOKIE_INDEX_SHIFT 0
#define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */
#define RX_DESC_COOKIE_POOL_ID_SHIFT 18
@@ -276,6 +300,84 @@ union dp_rx_desc_list_elem_t {
struct dp_rx_desc rx_desc;
};
#ifdef RX_DESC_MULTI_PAGE_ALLOC
/**
* dp_rx_desc_find() - find dp rx descriptor from page ID and offset
* @page_id: Page ID
* @offset: Offset of the descriptor element
*
* Return: RX descriptor element
*/
union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
struct rx_desc_pool *rx_pool);
static inline
struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
struct rx_desc_pool *pool,
uint32_t cookie)
{
uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
struct rx_desc_pool *rx_desc_pool;
union dp_rx_desc_list_elem_t *rx_desc_elem;
if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
return NULL;
rx_desc_pool = &pool[pool_id];
rx_desc_elem = (union dp_rx_desc_list_elem_t *)
(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
rx_desc_pool->elem_size * offset);
return &rx_desc_elem->rx_desc;
}
/**
* dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
* the Rx descriptor on Rx DMA source ring buffer
* @soc: core txrx main context
* @cookie: cookie used to lookup virtual address
*
* Return: Pointer to the Rx descriptor
*/
static inline
struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
uint32_t cookie)
{
return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
}
/**
* dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
* the Rx descriptor on monitor ring buffer
* @soc: core txrx main context
* @cookie: cookie used to lookup virtual address
*
* Return: Pointer to the Rx descriptor
*/
static inline
struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
uint32_t cookie)
{
return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
}
/**
* dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
* the Rx descriptor on monitor status ring buffer
* @soc: core txrx main context
* @cookie: cookie used to lookup virtual address
*
* Return: Pointer to the Rx descriptor
*/
static inline
struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
uint32_t cookie)
{
return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
}
#else
/**
* dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
* the Rx descriptor on Rx DMA source ring buffer
@@ -337,6 +439,7 @@ void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
/* Add sanity for pool_id & index */
return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
}
#endif /* RX_DESC_MULTI_PAGE_ALLOC */
void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
union dp_rx_desc_list_elem_t **local_desc_list,
@@ -378,19 +481,56 @@ dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
*/
qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
uint32_t pool_id,
uint32_t pool_size,
/*
* dp_rx_desc_pool_alloc() - create a pool of software rx_descs
* at the time of dp rx initialization
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @pool_size: number of Rx descriptor in the pool
* @rx_desc_pool: rx descriptor pool pointer
*
* Return: QDF status
*/
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size, struct rx_desc_pool *pool);
/*
* dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
* de-initialization of wifi module.
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @rx_desc_pool: rx descriptor pool pointer
*
* Return: None
*/
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
struct rx_desc_pool *rx_desc_pool);
/*
* dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
* de-initialization of wifi module.
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @rx_desc_pool: rx descriptor pool pointer
*
* Return: None
*/
void dp_rx_desc_nbuf_free(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool);
/*
* dp_rx_desc_pool_free() - free the sw rx desc array called during
* de-initialization of wifi module.
*
* @soc: core txrx main context
* @rx_desc_pool: rx descriptor pool pointer
*
* Return: None
*/
void dp_rx_desc_pool_free(struct dp_soc *soc,
uint32_t pool_id,
struct rx_desc_pool *rx_desc_pool);
void dp_rx_desc_nbuf_pool_free(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool);
void dp_rx_desc_free_array(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool);
void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,

View File

@@ -20,17 +20,152 @@
#include "dp_rx.h"
#include "dp_ipa.h"
#ifdef RX_DESC_MULTI_PAGE_ALLOC
A_COMPILE_TIME_ASSERT(cookie_size_check,
PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
1 << DP_RX_DESC_PAGE_ID_SHIFT);
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
uint32_t num_elem,
struct rx_desc_pool *rx_desc_pool)
{
uint32_t id, page_id, offset, desc_size, num_desc_per_page;
uint32_t count = 0;
union dp_rx_desc_list_elem_t *rx_desc_elem;
desc_size = sizeof(*rx_desc_elem);
rx_desc_pool->elem_size = desc_size;
if (!dp_is_soc_reinit(soc)) {
qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
desc_size, num_elem, 0, true);
if (!rx_desc_pool->desc_pages.num_pages) {
qdf_err("Multi page alloc fail,size=%d, elem=%d",
desc_size, num_elem);
return QDF_STATUS_E_NOMEM;
}
}
num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
*rx_desc_pool->desc_pages.cacheable_pages;
if (qdf_mem_multi_page_link(soc->osdev,
&rx_desc_pool->desc_pages,
desc_size, num_elem, true)) {
qdf_err("overflow num link,size=%d, elem=%d",
desc_size, num_elem);
goto free_rx_desc_pool;
}
/* Initialize the lock */
qdf_spinlock_create(&rx_desc_pool->lock);
qdf_spin_lock_bh(&rx_desc_pool->lock);
rx_desc_pool->pool_size = num_elem;
rx_desc_elem = rx_desc_pool->freelist;
while (rx_desc_elem) {
page_id = count / num_desc_per_page;
offset = count % num_desc_per_page;
/*
* dp_rx_desc_pool_alloc() - create a pool of software rx_descs
* at the time of dp rx initialization
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @pool_size: number of Rx descriptor in the pool
* @rx_desc_pool: rx descriptor pool pointer
*
* return success or failure
* Below cookie size is from REO destination ring
* reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
* cookie size = 21 bits
* 8 bits - offset
* 8 bits - page ID
* 4 bits - pool ID
*/
id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
(page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
offset);
rx_desc_elem->rx_desc.cookie = id;
rx_desc_elem->rx_desc.pool_id = pool_id;
rx_desc_elem->rx_desc.in_use = 0;
rx_desc_elem = rx_desc_elem->next;
count++;
}
qdf_spin_unlock_bh(&rx_desc_pool->lock);
return QDF_STATUS_SUCCESS;
free_rx_desc_pool:
dp_rx_desc_pool_free(soc, rx_desc_pool);
return QDF_STATUS_E_FAULT;
}
union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
struct rx_desc_pool *rx_desc_pool)
{
return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
rx_desc_pool->elem_size * offset;
}
static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
{
uint32_t i, num_desc, page_id, offset, num_desc_per_page;
union dp_rx_desc_list_elem_t *rx_desc_elem;
struct dp_rx_desc *rx_desc;
qdf_nbuf_t nbuf;
if (qdf_unlikely(!(rx_desc_pool->
desc_pages.cacheable_pages))) {
qdf_err("No pages found on this desc pool");
return QDF_STATUS_E_INVAL;
}
num_desc = rx_desc_pool->pool_size;
num_desc_per_page =
rx_desc_pool->desc_pages.num_element_per_page;
for (i = 0; i < num_desc; i++) {
page_id = i / num_desc_per_page;
offset = i % num_desc_per_page;
rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
rx_desc = &rx_desc_elem->rx_desc;
if (rx_desc->in_use) {
nbuf = rx_desc->nbuf;
if (!rx_desc->unmapped) {
dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
false);
qdf_nbuf_unmap_single(soc->osdev, nbuf,
QDF_DMA_BIDIRECTIONAL);
}
qdf_nbuf_free(nbuf);
}
}
return QDF_STATUS_SUCCESS;
}
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
struct rx_desc_pool *rx_desc_pool)
{
QDF_STATUS qdf_status;
qdf_spin_lock_bh(&rx_desc_pool->lock);
qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
if (QDF_IS_STATUS_SUCCESS(qdf_status))
dp_rx_desc_pool_free(soc, rx_desc_pool);
qdf_spin_unlock_bh(&rx_desc_pool->lock);
qdf_spinlock_destroy(&rx_desc_pool->lock);
}
void dp_rx_desc_nbuf_free(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
{
qdf_spin_lock_bh(&rx_desc_pool->lock);
__dp_rx_desc_nbuf_free(soc, rx_desc_pool);
qdf_spin_unlock_bh(&rx_desc_pool->lock);
qdf_spinlock_destroy(&rx_desc_pool->lock);
}
void dp_rx_desc_pool_free(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
{
if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
return;
qdf_mem_multi_pages_free(soc->osdev,
&rx_desc_pool->desc_pages, 0, true);
}
#else
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
{
@@ -71,15 +206,7 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
return QDF_STATUS_SUCCESS;
}
/*
* dp_rx_desc_pool_free() - free the sw rx desc pool called during
* de-initialization of wifi module.
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @rx_desc_pool: rx descriptor pool pointer
*/
void dp_rx_desc_pool_free(struct dp_soc *soc, uint32_t pool_id,
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
struct rx_desc_pool *rx_desc_pool)
{
qdf_nbuf_t nbuf;
@@ -105,15 +232,7 @@ void dp_rx_desc_pool_free(struct dp_soc *soc, uint32_t pool_id,
qdf_spinlock_destroy(&rx_desc_pool->lock);
}
/*
* dp_rx_desc_pool_free_nbuf() - free the sw rx desc nbufs called during
* de-initialization of wifi module.
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @rx_desc_pool: rx descriptor pool pointer
*/
void dp_rx_desc_nbuf_pool_free(struct dp_soc *soc,
void dp_rx_desc_nbuf_free(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
{
qdf_nbuf_t nbuf;
@@ -139,20 +258,12 @@ void dp_rx_desc_nbuf_pool_free(struct dp_soc *soc,
qdf_spinlock_destroy(&rx_desc_pool->lock);
}
/*
* dp_rx_desc_pool_free_array() - free the sw rx desc array called during
* de-initialization of wifi module.
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @rx_desc_pool: rx descriptor pool pointer
*/
void dp_rx_desc_free_array(struct dp_soc *soc,
void dp_rx_desc_pool_free(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
{
qdf_mem_free(rx_desc_pool->array);
}
#endif /* RX_DESC_MULTI_PAGE_ALLOC */
/*
* dp_rx_get_free_desc_list() - provide a list of descriptors from
* the free rx desc pool.

View File

@@ -1192,9 +1192,10 @@ dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id)
rx_desc_pool = &soc->rx_desc_mon[mac_id];
if (rx_desc_pool->pool_size != 0) {
if (!dp_is_soc_reinit(soc))
dp_rx_desc_pool_free(soc, mac_id, rx_desc_pool);
dp_rx_desc_nbuf_and_pool_free(soc, mac_id,
rx_desc_pool);
else
dp_rx_desc_nbuf_pool_free(soc, rx_desc_pool);
dp_rx_desc_nbuf_free(soc, rx_desc_pool);
}
return QDF_STATUS_SUCCESS;

View File

@@ -888,9 +888,10 @@ dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id)
rx_desc_pool = &soc->rx_desc_status[mac_id];
if (rx_desc_pool->pool_size != 0) {
if (!dp_is_soc_reinit(soc))
dp_rx_desc_pool_free(soc, mac_id, rx_desc_pool);
dp_rx_desc_nbuf_and_pool_free(soc, mac_id,
rx_desc_pool);
else
dp_rx_desc_nbuf_pool_free(soc, rx_desc_pool);
dp_rx_desc_nbuf_free(soc, rx_desc_pool);
}
return QDF_STATUS_SUCCESS;

View File

@@ -94,7 +94,7 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
{
uint32_t id, count, page_id, offset, pool_id_32;
uint16_t num_page, num_desc_per_page;
uint16_t num_desc_per_page;
struct dp_tx_desc_s *tx_desc_elem;
uint32_t desc_size;
struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
@@ -113,7 +113,6 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
}
num_page = tx_desc_pool->desc_pages.num_pages;
num_desc_per_page =
tx_desc_pool->desc_pages.num_element_per_page;
tx_desc_pool->freelist = (struct dp_tx_desc_s *)

View File

@@ -268,6 +268,8 @@ enum dp_cpu_ring_map_types {
/**
* struct rx_desc_pool
* @pool_size: number of RX descriptor in the pool
* @elem_size: Element size
* @desc_pages: Multi page descriptors
* @array: pointer to array of RX descriptor
* @freelist: pointer to free RX descriptor link list
* @lock: Protection for the RX descriptor pool
@@ -275,7 +277,12 @@ enum dp_cpu_ring_map_types {
*/
struct rx_desc_pool {
uint32_t pool_size;
#ifdef RX_DESC_MULTI_PAGE_ALLOC
uint16_t elem_size;
struct qdf_mem_multi_page_t desc_pages;
#else
union dp_rx_desc_list_elem_t *array;
#endif
union dp_rx_desc_list_elem_t *freelist;
qdf_spinlock_t lock;
uint8_t owner;