qcacmn: support multiple pages prealloc for descriptor

Support multiple pages prealloc for DP descriptor

Change-Id: I66d4cef3acf69acf4b6fc8e5a6d01c3d67921dca
CRs-Fixed: 2751338
This commit is contained in:
Jinwei Chen
2020-08-17 01:20:57 -07:00
committed by snandini
parent 21d6fd0ac6
commit 18989f8ede
9 changed files with 225 additions and 49 deletions

View File

@@ -1131,6 +1131,13 @@ struct ol_if_ops {
void (*dp_prealloc_put_consistent)(qdf_size_t size, void (*dp_prealloc_put_consistent)(qdf_size_t size,
void *vaddr_unligned, void *vaddr_unligned,
qdf_dma_addr_t paddr); qdf_dma_addr_t paddr);
void (*dp_get_multi_pages)(uint32_t desc_type,
size_t element_size,
uint16_t element_num,
struct qdf_mem_multi_page_t *pages,
bool cacheable);
void (*dp_put_multi_pages)(uint32_t desc_type,
struct qdf_mem_multi_page_t *pages);
#endif #endif
int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle); int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle);

View File

@@ -2516,4 +2516,76 @@ QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
ol_osif_peer_handle osif_peer); ol_osif_peer_handle osif_peer);
#endif /* QCA_SUPPORT_WDS_EXTENDED */ #endif /* QCA_SUPPORT_WDS_EXTENDED */
#ifdef DP_MEM_PRE_ALLOC
/**
* dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages
* @soc: datapath soc handle
* @desc_type: memory request source type
* @pages: multi page information storage
* @element_size: each element size
* @element_num: total number of elements should be allocated
* @memctxt: memory context
* @cacheable: coherent memory or cacheable memory
*
* This function is a wrapper for memory allocation over multiple
* pages, if dp prealloc method is registered, then will try prealloc
* firstly. if prealloc failed, fall back to regular way over
* qdf_mem_multi_pages_alloc().
*
* Return: None
*/
void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
enum dp_desc_type desc_type,
struct qdf_mem_multi_page_t *pages,
size_t element_size,
uint16_t element_num,
qdf_dma_context_t memctxt,
bool cacheable);
/**
* dp_desc_multi_pages_mem_free() - free multiple pages memory
* @soc: datapath soc handle
* @desc_type: memory request source type
* @pages: multi page information storage
* @memctxt: memory context
* @cacheable: coherent memory or cacheable memory
*
* This function is a wrapper for multiple pages memory free,
* if memory is got from prealloc pool, put it back to pool.
* otherwise free by qdf_mem_multi_pages_free().
*
* Return: None
*/
void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
enum dp_desc_type desc_type,
struct qdf_mem_multi_page_t *pages,
qdf_dma_context_t memctxt,
bool cacheable);
#else
static inline
void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
enum dp_desc_type desc_type,
struct qdf_mem_multi_page_t *pages,
size_t element_size,
uint16_t element_num,
qdf_dma_context_t memctxt,
bool cacheable)
{
qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
element_num, memctxt, cacheable);
}
static inline
void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
enum dp_desc_type desc_type,
struct qdf_mem_multi_page_t *pages,
qdf_dma_context_t memctxt,
bool cacheable)
{
qdf_mem_multi_pages_free(soc->osdev, pages,
memctxt, cacheable);
}
#endif
#endif /* #ifndef _DP_INTERNAL_H_ */ #endif /* #ifndef _DP_INTERNAL_H_ */

View File

@@ -1495,6 +1495,59 @@ static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
} }
} }
void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
enum dp_desc_type desc_type,
struct qdf_mem_multi_page_t *pages,
size_t element_size,
uint16_t element_num,
qdf_dma_context_t memctxt,
bool cacheable)
{
if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
dp_warn("dp_get_multi_pages is null!");
goto qdf;
}
pages->num_pages = 0;
pages->is_mem_prealloc = 0;
soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
element_size,
element_num,
pages,
cacheable);
if (pages->num_pages)
goto end;
qdf:
qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
element_num, memctxt, cacheable);
end:
dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
desc_type, (int)element_size, element_num, cacheable);
}
void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
enum dp_desc_type desc_type,
struct qdf_mem_multi_page_t *pages,
qdf_dma_context_t memctxt,
bool cacheable)
{
if (pages->is_mem_prealloc) {
if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
dp_warn("dp_put_multi_pages is null!");
QDF_BUG(0);
return;
}
soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
qdf_mem_zero(pages, sizeof(*pages));
} else {
qdf_mem_multi_pages_free(soc->osdev, pages,
memctxt, cacheable);
}
}
#else #else
static inline static inline
@@ -2490,7 +2543,8 @@ void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
if (pages->dma_pages) { if (pages->dma_pages) {
wlan_minidump_remove((void *) wlan_minidump_remove((void *)
pages->dma_pages->page_v_addr_start); pages->dma_pages->page_v_addr_start);
qdf_mem_multi_pages_free(soc->osdev, pages, 0, false); dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
pages, 0, false);
} }
} }
@@ -2578,11 +2632,11 @@ QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
FL("total_mem_size: %d"), total_mem_size); FL("total_mem_size: %d"), total_mem_size);
dp_set_max_page_size(pages, max_alloc_size); dp_set_max_page_size(pages, max_alloc_size);
qdf_mem_multi_pages_alloc(soc->osdev, dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
pages, pages,
link_desc_size, link_desc_size,
*total_link_descs, *total_link_descs,
0, false); 0, false);
if (!pages->num_pages) { if (!pages->num_pages) {
dp_err("Multi page alloc fail for hw link desc pool"); dp_err("Multi page alloc fail for hw link desc pool");
return QDF_STATUS_E_FAULT; return QDF_STATUS_E_FAULT;

View File

@@ -3159,6 +3159,7 @@ dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx); rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
status = dp_rx_desc_pool_alloc(soc, status = dp_rx_desc_pool_alloc(soc,
rx_sw_desc_num, rx_sw_desc_num,
rx_desc_pool); rx_desc_pool);

View File

@@ -65,8 +65,9 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
desc_size = sizeof(*rx_desc_elem); desc_size = sizeof(*rx_desc_elem);
rx_desc_pool->elem_size = desc_size; rx_desc_pool->elem_size = desc_size;
qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages, dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
desc_size, num_elem, 0, true); &rx_desc_pool->desc_pages,
desc_size, num_elem, 0, true);
if (!rx_desc_pool->desc_pages.num_pages) { if (!rx_desc_pool->desc_pages.num_pages) {
qdf_err("Multi page alloc fail,size=%d, elem=%d", qdf_err("Multi page alloc fail,size=%d, elem=%d",
desc_size, num_elem); desc_size, num_elem);
@@ -218,8 +219,9 @@ void dp_rx_desc_pool_free(struct dp_soc *soc,
{ {
if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
return; return;
qdf_mem_multi_pages_free(soc->osdev,
&rx_desc_pool->desc_pages, 0, true); dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
&rx_desc_pool->desc_pages, 0, true);
} }
void dp_rx_desc_pool_deinit(struct dp_soc *soc, void dp_rx_desc_pool_deinit(struct dp_soc *soc,

View File

@@ -2152,6 +2152,7 @@ dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries); dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
rx_desc_pool->desc_type = DP_RX_DESC_STATUS_TYPE;
return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool); return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool);
} }

View File

@@ -98,10 +98,10 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s)); desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
tx_desc_pool = &((soc)->tx_desc[(pool_id)]); tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
qdf_mem_multi_pages_alloc(soc->osdev, dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
&tx_desc_pool->desc_pages, &tx_desc_pool->desc_pages,
desc_size, num_elem, desc_size, num_elem,
0, true); 0, true);
if (!tx_desc_pool->desc_pages.num_pages) { if (!tx_desc_pool->desc_pages.num_pages) {
dp_err("Multi page alloc fail, tx desc"); dp_err("Multi page alloc fail, tx desc");
@@ -123,8 +123,9 @@ void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
tx_desc_pool = &((soc)->tx_desc[pool_id]); tx_desc_pool = &((soc)->tx_desc[pool_id]);
if (tx_desc_pool->desc_pages.num_pages) if (tx_desc_pool->desc_pages.num_pages)
qdf_mem_multi_pages_free(soc->osdev, dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
&tx_desc_pool->desc_pages, 0, true); &tx_desc_pool->desc_pages, 0,
true);
} }
/** /**
@@ -221,7 +222,8 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
for (pool_id = 0; pool_id < num_pool; pool_id++) { for (pool_id = 0; pool_id < num_pool; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx); memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
qdf_mem_multi_pages_alloc(soc->osdev, dp_desc_multi_pages_mem_alloc(
soc, DP_TX_EXT_DESC_TYPE,
&dp_tx_ext_desc_pool->desc_pages, &dp_tx_ext_desc_pool->desc_pages,
elem_size, elem_size,
num_elem, num_elem,
@@ -243,7 +245,9 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
*/ */
for (pool_id = 0; pool_id < num_pool; pool_id++) { for (pool_id = 0; pool_id < num_pool; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
qdf_mem_multi_pages_alloc(soc->osdev, dp_desc_multi_pages_mem_alloc(
soc,
DP_TX_EXT_DESC_LINK_TYPE,
&dp_tx_ext_desc_pool->desc_link_pages, &dp_tx_ext_desc_pool->desc_link_pages,
link_elem_size, link_elem_size,
num_elem, num_elem,
@@ -261,9 +265,10 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
free_ext_desc_page: free_ext_desc_page:
for (count = 0; count < pool_id; pool_id++) { for (count = 0; count < pool_id; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
qdf_mem_multi_pages_free(soc->osdev, dp_desc_multi_pages_mem_free(
&dp_tx_ext_desc_pool->desc_link_pages, soc, DP_TX_EXT_DESC_LINK_TYPE,
0, true); &dp_tx_ext_desc_pool->desc_link_pages,
0, true);
} }
pool_id = num_pool; pool_id = num_pool;
@@ -271,9 +276,10 @@ fail_exit:
for (count = 0; count < pool_id; pool_id++) { for (count = 0; count < pool_id; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx); memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
qdf_mem_multi_pages_free(soc->osdev, dp_desc_multi_pages_mem_free(
&dp_tx_ext_desc_pool->desc_pages, soc, DP_TX_EXT_DESC_TYPE,
memctx, false); &dp_tx_ext_desc_pool->desc_pages,
memctx, false);
} }
return status; return status;
} }
@@ -381,13 +387,15 @@ void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx); memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
qdf_mem_multi_pages_free(soc->osdev, dp_desc_multi_pages_mem_free(
&dp_tx_ext_desc_pool->desc_link_pages, soc, DP_TX_EXT_DESC_LINK_TYPE,
0, true); &dp_tx_ext_desc_pool->desc_link_pages,
0, true);
qdf_mem_multi_pages_free(soc->osdev, dp_desc_multi_pages_mem_free(
&dp_tx_ext_desc_pool->desc_pages, soc, DP_TX_EXT_DESC_TYPE,
memctx, false); &dp_tx_ext_desc_pool->desc_pages,
memctx, false);
} }
} }
@@ -428,10 +436,12 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
for (pool_id = 0; pool_id < num_pool; pool_id++) { for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_desc_pool = &soc->tx_tso_desc[pool_id]; tso_desc_pool = &soc->tx_tso_desc[pool_id];
tso_desc_pool->num_free = 0; tso_desc_pool->num_free = 0;
qdf_mem_multi_pages_alloc(soc->osdev, dp_desc_multi_pages_mem_alloc(
&tso_desc_pool->desc_pages, soc,
desc_size, DP_TX_TSO_DESC_TYPE,
num_elem, 0, true); &tso_desc_pool->desc_pages,
desc_size,
num_elem, 0, true);
if (!tso_desc_pool->desc_pages.num_pages) { if (!tso_desc_pool->desc_pages.num_pages) {
dp_err("Multi page alloc fail, tx desc"); dp_err("Multi page alloc fail, tx desc");
@@ -443,9 +453,9 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
fail: fail:
for (i = 0; i < pool_id; i++) { for (i = 0; i < pool_id; i++) {
tso_desc_pool = &soc->tx_tso_desc[i]; tso_desc_pool = &soc->tx_tso_desc[i];
qdf_mem_multi_pages_free(soc->osdev, dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
&tso_desc_pool->desc_pages, &tso_desc_pool->desc_pages,
0, true); 0, true);
} }
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_NOMEM;
} }
@@ -463,8 +473,9 @@ void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
for (pool_id = 0; pool_id < num_pool; pool_id++) { for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_desc_pool = &soc->tx_tso_desc[pool_id]; tso_desc_pool = &soc->tx_tso_desc[pool_id];
qdf_mem_multi_pages_free(soc->osdev, dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
&tso_desc_pool->desc_pages, 0, true); &tso_desc_pool->desc_pages,
0, true);
} }
} }
@@ -554,10 +565,10 @@ QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
for (pool_id = 0; pool_id < num_pool; pool_id++) { for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
tso_num_seg_pool->num_free = 0; tso_num_seg_pool->num_free = 0;
qdf_mem_multi_pages_alloc(soc->osdev, dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
&tso_num_seg_pool->desc_pages, &tso_num_seg_pool->desc_pages,
desc_size, desc_size,
num_elem, 0, true); num_elem, 0, true);
if (!tso_num_seg_pool->desc_pages.num_pages) { if (!tso_num_seg_pool->desc_pages.num_pages) {
dp_err("Multi page alloc fail, tso_num_seg_pool"); dp_err("Multi page alloc fail, tso_num_seg_pool");
@@ -569,9 +580,9 @@ QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
fail: fail:
for (i = 0; i < pool_id; i++) { for (i = 0; i < pool_id; i++) {
tso_num_seg_pool = &soc->tx_tso_num_seg[i]; tso_num_seg_pool = &soc->tx_tso_num_seg[i];
qdf_mem_multi_pages_free(soc->osdev, dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
&tso_num_seg_pool->desc_pages, &tso_num_seg_pool->desc_pages,
0, true); 0, true);
} }
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_NOMEM;
} }
@@ -590,9 +601,9 @@ void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
for (pool_id = 0; pool_id < num_pool; pool_id++) { for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
qdf_mem_multi_pages_free(soc->osdev, dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
&tso_num_seg_pool->desc_pages, &tso_num_seg_pool->desc_pages,
0, true); 0, true);
} }
} }

View File

@@ -366,6 +366,28 @@ struct dp_rx_nbuf_frag_info {
} virt_addr; } virt_addr;
}; };
/**
* enum dp_desc_type - source type for multiple pages allocation
* @DP_TX_DESC_TYPE: DP SW TX descriptor
* @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
* @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
* @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
* @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
* @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
* @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
* @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
*/
enum dp_desc_type {
DP_TX_DESC_TYPE,
DP_TX_EXT_DESC_TYPE,
DP_TX_EXT_DESC_LINK_TYPE,
DP_TX_TSO_DESC_TYPE,
DP_TX_TSO_NUM_SEG_TYPE,
DP_RX_DESC_BUF_TYPE,
DP_RX_DESC_STATUS_TYPE,
DP_HW_LINK_DESC_TYPE,
};
/** /**
* struct rx_desc_pool * struct rx_desc_pool
* @pool_size: number of RX descriptor in the pool * @pool_size: number of RX descriptor in the pool
@@ -378,6 +400,7 @@ struct dp_rx_nbuf_frag_info {
* @buf_size: Buffer size * @buf_size: Buffer size
* @buf_alignment: Buffer alignment * @buf_alignment: Buffer alignment
* @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
* @desc_type: type of desc this pool serves
*/ */
struct rx_desc_pool { struct rx_desc_pool {
uint32_t pool_size; uint32_t pool_size;
@@ -393,6 +416,7 @@ struct rx_desc_pool {
uint16_t buf_size; uint16_t buf_size;
uint8_t buf_alignment; uint8_t buf_alignment;
bool rx_mon_dest_frag_enable; bool rx_mon_dest_frag_enable;
enum dp_desc_type desc_type;
}; };
/** /**

View File

@@ -59,6 +59,7 @@ struct qdf_mem_dma_page_t {
* @num_pages: Number of allocation needed pages * @num_pages: Number of allocation needed pages
* @dma_pages: page information storage in case of coherent memory * @dma_pages: page information storage in case of coherent memory
* @cacheable_pages: page information storage in case of cacheable memory * @cacheable_pages: page information storage in case of cacheable memory
* @is_mem_prealloc: flag for multiple pages pre-alloc or not
*/ */
struct qdf_mem_multi_page_t { struct qdf_mem_multi_page_t {
uint16_t num_element_per_page; uint16_t num_element_per_page;
@@ -66,6 +67,9 @@ struct qdf_mem_multi_page_t {
struct qdf_mem_dma_page_t *dma_pages; struct qdf_mem_dma_page_t *dma_pages;
void **cacheable_pages; void **cacheable_pages;
qdf_size_t page_size; qdf_size_t page_size;
#ifdef DP_MEM_PRE_ALLOC
uint8_t is_mem_prealloc;
#endif
}; };