qcacmn: support multiple pages prealloc for descriptor
Support multiple pages prealloc for DP descriptor Change-Id: I66d4cef3acf69acf4b6fc8e5a6d01c3d67921dca CRs-Fixed: 2751338
This commit is contained in:
@@ -1131,6 +1131,13 @@ struct ol_if_ops {
|
||||
void (*dp_prealloc_put_consistent)(qdf_size_t size,
|
||||
void *vaddr_unligned,
|
||||
qdf_dma_addr_t paddr);
|
||||
void (*dp_get_multi_pages)(uint32_t desc_type,
|
||||
size_t element_size,
|
||||
uint16_t element_num,
|
||||
struct qdf_mem_multi_page_t *pages,
|
||||
bool cacheable);
|
||||
void (*dp_put_multi_pages)(uint32_t desc_type,
|
||||
struct qdf_mem_multi_page_t *pages);
|
||||
#endif
|
||||
int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle);
|
||||
|
||||
|
@@ -2516,4 +2516,76 @@ QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
|
||||
ol_osif_peer_handle osif_peer);
|
||||
#endif /* QCA_SUPPORT_WDS_EXTENDED */
|
||||
|
||||
#ifdef DP_MEM_PRE_ALLOC
|
||||
/**
|
||||
* dp_desc_multi_pages_mem_alloc() - alloc memory over multiple pages
|
||||
* @soc: datapath soc handle
|
||||
* @desc_type: memory request source type
|
||||
* @pages: multi page information storage
|
||||
* @element_size: each element size
|
||||
* @element_num: total number of elements should be allocated
|
||||
* @memctxt: memory context
|
||||
* @cacheable: coherent memory or cacheable memory
|
||||
*
|
||||
* This function is a wrapper for memory allocation over multiple
|
||||
* pages, if dp prealloc method is registered, then will try prealloc
|
||||
* firstly. if prealloc failed, fall back to regular way over
|
||||
* qdf_mem_multi_pages_alloc().
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
|
||||
enum dp_desc_type desc_type,
|
||||
struct qdf_mem_multi_page_t *pages,
|
||||
size_t element_size,
|
||||
uint16_t element_num,
|
||||
qdf_dma_context_t memctxt,
|
||||
bool cacheable);
|
||||
|
||||
/**
|
||||
* dp_desc_multi_pages_mem_free() - free multiple pages memory
|
||||
* @soc: datapath soc handle
|
||||
* @desc_type: memory request source type
|
||||
* @pages: multi page information storage
|
||||
* @memctxt: memory context
|
||||
* @cacheable: coherent memory or cacheable memory
|
||||
*
|
||||
* This function is a wrapper for multiple pages memory free,
|
||||
* if memory is got from prealloc pool, put it back to pool.
|
||||
* otherwise free by qdf_mem_multi_pages_free().
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
|
||||
enum dp_desc_type desc_type,
|
||||
struct qdf_mem_multi_page_t *pages,
|
||||
qdf_dma_context_t memctxt,
|
||||
bool cacheable);
|
||||
|
||||
#else
|
||||
static inline
|
||||
void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
|
||||
enum dp_desc_type desc_type,
|
||||
struct qdf_mem_multi_page_t *pages,
|
||||
size_t element_size,
|
||||
uint16_t element_num,
|
||||
qdf_dma_context_t memctxt,
|
||||
bool cacheable)
|
||||
{
|
||||
qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
|
||||
element_num, memctxt, cacheable);
|
||||
}
|
||||
|
||||
static inline
|
||||
void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
|
||||
enum dp_desc_type desc_type,
|
||||
struct qdf_mem_multi_page_t *pages,
|
||||
qdf_dma_context_t memctxt,
|
||||
bool cacheable)
|
||||
{
|
||||
qdf_mem_multi_pages_free(soc->osdev, pages,
|
||||
memctxt, cacheable);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* #ifndef _DP_INTERNAL_H_ */
|
||||
|
@@ -1495,6 +1495,59 @@ static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
|
||||
}
|
||||
}
|
||||
|
||||
void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
|
||||
enum dp_desc_type desc_type,
|
||||
struct qdf_mem_multi_page_t *pages,
|
||||
size_t element_size,
|
||||
uint16_t element_num,
|
||||
qdf_dma_context_t memctxt,
|
||||
bool cacheable)
|
||||
{
|
||||
if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
|
||||
dp_warn("dp_get_multi_pages is null!");
|
||||
goto qdf;
|
||||
}
|
||||
|
||||
pages->num_pages = 0;
|
||||
pages->is_mem_prealloc = 0;
|
||||
soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
|
||||
element_size,
|
||||
element_num,
|
||||
pages,
|
||||
cacheable);
|
||||
if (pages->num_pages)
|
||||
goto end;
|
||||
|
||||
qdf:
|
||||
qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
|
||||
element_num, memctxt, cacheable);
|
||||
end:
|
||||
dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
|
||||
pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
|
||||
desc_type, (int)element_size, element_num, cacheable);
|
||||
}
|
||||
|
||||
void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
|
||||
enum dp_desc_type desc_type,
|
||||
struct qdf_mem_multi_page_t *pages,
|
||||
qdf_dma_context_t memctxt,
|
||||
bool cacheable)
|
||||
{
|
||||
if (pages->is_mem_prealloc) {
|
||||
if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
|
||||
dp_warn("dp_put_multi_pages is null!");
|
||||
QDF_BUG(0);
|
||||
return;
|
||||
}
|
||||
|
||||
soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
|
||||
qdf_mem_zero(pages, sizeof(*pages));
|
||||
} else {
|
||||
qdf_mem_multi_pages_free(soc->osdev, pages,
|
||||
memctxt, cacheable);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline
|
||||
@@ -2490,7 +2543,8 @@ void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
|
||||
if (pages->dma_pages) {
|
||||
wlan_minidump_remove((void *)
|
||||
pages->dma_pages->page_v_addr_start);
|
||||
qdf_mem_multi_pages_free(soc->osdev, pages, 0, false);
|
||||
dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
|
||||
pages, 0, false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2578,7 +2632,7 @@ QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
|
||||
FL("total_mem_size: %d"), total_mem_size);
|
||||
|
||||
dp_set_max_page_size(pages, max_alloc_size);
|
||||
qdf_mem_multi_pages_alloc(soc->osdev,
|
||||
dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
|
||||
pages,
|
||||
link_desc_size,
|
||||
*total_link_descs,
|
||||
|
@@ -3159,6 +3159,7 @@ dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
|
||||
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
|
||||
rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
|
||||
|
||||
rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
|
||||
status = dp_rx_desc_pool_alloc(soc,
|
||||
rx_sw_desc_num,
|
||||
rx_desc_pool);
|
||||
|
@@ -65,7 +65,8 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
|
||||
desc_size = sizeof(*rx_desc_elem);
|
||||
rx_desc_pool->elem_size = desc_size;
|
||||
|
||||
qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
|
||||
dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
|
||||
&rx_desc_pool->desc_pages,
|
||||
desc_size, num_elem, 0, true);
|
||||
if (!rx_desc_pool->desc_pages.num_pages) {
|
||||
qdf_err("Multi page alloc fail,size=%d, elem=%d",
|
||||
@@ -218,7 +219,8 @@ void dp_rx_desc_pool_free(struct dp_soc *soc,
|
||||
{
|
||||
if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
|
||||
return;
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
|
||||
dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
|
||||
&rx_desc_pool->desc_pages, 0, true);
|
||||
}
|
||||
|
||||
|
@@ -2152,6 +2152,7 @@ dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
|
||||
|
||||
dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
|
||||
|
||||
rx_desc_pool->desc_type = DP_RX_DESC_STATUS_TYPE;
|
||||
return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool);
|
||||
}
|
||||
|
||||
|
@@ -98,7 +98,7 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
|
||||
|
||||
desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
|
||||
tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
|
||||
qdf_mem_multi_pages_alloc(soc->osdev,
|
||||
dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
|
||||
&tx_desc_pool->desc_pages,
|
||||
desc_size, num_elem,
|
||||
0, true);
|
||||
@@ -123,8 +123,9 @@ void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
|
||||
tx_desc_pool = &((soc)->tx_desc[pool_id]);
|
||||
|
||||
if (tx_desc_pool->desc_pages.num_pages)
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
&tx_desc_pool->desc_pages, 0, true);
|
||||
dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
|
||||
&tx_desc_pool->desc_pages, 0,
|
||||
true);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -221,7 +222,8 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
|
||||
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
|
||||
qdf_mem_multi_pages_alloc(soc->osdev,
|
||||
dp_desc_multi_pages_mem_alloc(
|
||||
soc, DP_TX_EXT_DESC_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_pages,
|
||||
elem_size,
|
||||
num_elem,
|
||||
@@ -243,7 +245,9 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
*/
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
|
||||
qdf_mem_multi_pages_alloc(soc->osdev,
|
||||
dp_desc_multi_pages_mem_alloc(
|
||||
soc,
|
||||
DP_TX_EXT_DESC_LINK_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_link_pages,
|
||||
link_elem_size,
|
||||
num_elem,
|
||||
@@ -261,7 +265,8 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
free_ext_desc_page:
|
||||
for (count = 0; count < pool_id; pool_id++) {
|
||||
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
dp_desc_multi_pages_mem_free(
|
||||
soc, DP_TX_EXT_DESC_LINK_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_link_pages,
|
||||
0, true);
|
||||
}
|
||||
@@ -271,7 +276,8 @@ fail_exit:
|
||||
for (count = 0; count < pool_id; pool_id++) {
|
||||
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
|
||||
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
dp_desc_multi_pages_mem_free(
|
||||
soc, DP_TX_EXT_DESC_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_pages,
|
||||
memctx, false);
|
||||
}
|
||||
@@ -381,11 +387,13 @@ void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
|
||||
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
|
||||
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
|
||||
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
dp_desc_multi_pages_mem_free(
|
||||
soc, DP_TX_EXT_DESC_LINK_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_link_pages,
|
||||
0, true);
|
||||
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
dp_desc_multi_pages_mem_free(
|
||||
soc, DP_TX_EXT_DESC_TYPE,
|
||||
&dp_tx_ext_desc_pool->desc_pages,
|
||||
memctx, false);
|
||||
}
|
||||
@@ -428,7 +436,9 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
tso_desc_pool = &soc->tx_tso_desc[pool_id];
|
||||
tso_desc_pool->num_free = 0;
|
||||
qdf_mem_multi_pages_alloc(soc->osdev,
|
||||
dp_desc_multi_pages_mem_alloc(
|
||||
soc,
|
||||
DP_TX_TSO_DESC_TYPE,
|
||||
&tso_desc_pool->desc_pages,
|
||||
desc_size,
|
||||
num_elem, 0, true);
|
||||
@@ -443,7 +453,7 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
fail:
|
||||
for (i = 0; i < pool_id; i++) {
|
||||
tso_desc_pool = &soc->tx_tso_desc[i];
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
|
||||
&tso_desc_pool->desc_pages,
|
||||
0, true);
|
||||
}
|
||||
@@ -463,8 +473,9 @@ void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
tso_desc_pool = &soc->tx_tso_desc[pool_id];
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
&tso_desc_pool->desc_pages, 0, true);
|
||||
dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
|
||||
&tso_desc_pool->desc_pages,
|
||||
0, true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -554,7 +565,7 @@ QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
|
||||
tso_num_seg_pool->num_free = 0;
|
||||
qdf_mem_multi_pages_alloc(soc->osdev,
|
||||
dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
|
||||
&tso_num_seg_pool->desc_pages,
|
||||
desc_size,
|
||||
num_elem, 0, true);
|
||||
@@ -569,7 +580,7 @@ QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
|
||||
fail:
|
||||
for (i = 0; i < pool_id; i++) {
|
||||
tso_num_seg_pool = &soc->tx_tso_num_seg[i];
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
|
||||
&tso_num_seg_pool->desc_pages,
|
||||
0, true);
|
||||
}
|
||||
@@ -590,7 +601,7 @@ void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
|
||||
|
||||
for (pool_id = 0; pool_id < num_pool; pool_id++) {
|
||||
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
|
||||
qdf_mem_multi_pages_free(soc->osdev,
|
||||
dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
|
||||
&tso_num_seg_pool->desc_pages,
|
||||
0, true);
|
||||
}
|
||||
|
@@ -366,6 +366,28 @@ struct dp_rx_nbuf_frag_info {
|
||||
} virt_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dp_desc_type - source type for multiple pages allocation
|
||||
* @DP_TX_DESC_TYPE: DP SW TX descriptor
|
||||
* @DP_TX_EXT_DESC_TYPE: DP TX msdu extension descriptor
|
||||
* @DP_TX_EXT_DESC_LINK_TYPE: DP link descriptor for msdu ext_desc
|
||||
* @DP_TX_TSO_DESC_TYPE: DP TX TSO descriptor
|
||||
* @DP_TX_TSO_NUM_SEG_TYPE: DP TX number of segments
|
||||
* @DP_RX_DESC_BUF_TYPE: DP RX SW descriptor
|
||||
* @DP_RX_DESC_STATUS_TYPE: DP RX SW descriptor for monitor status
|
||||
* @DP_HW_LINK_DESC_TYPE: DP HW link descriptor
|
||||
*/
|
||||
enum dp_desc_type {
|
||||
DP_TX_DESC_TYPE,
|
||||
DP_TX_EXT_DESC_TYPE,
|
||||
DP_TX_EXT_DESC_LINK_TYPE,
|
||||
DP_TX_TSO_DESC_TYPE,
|
||||
DP_TX_TSO_NUM_SEG_TYPE,
|
||||
DP_RX_DESC_BUF_TYPE,
|
||||
DP_RX_DESC_STATUS_TYPE,
|
||||
DP_HW_LINK_DESC_TYPE,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rx_desc_pool
|
||||
* @pool_size: number of RX descriptor in the pool
|
||||
@@ -378,6 +400,7 @@ struct dp_rx_nbuf_frag_info {
|
||||
* @buf_size: Buffer size
|
||||
* @buf_alignment: Buffer alignment
|
||||
* @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
|
||||
* @desc_type: type of desc this pool serves
|
||||
*/
|
||||
struct rx_desc_pool {
|
||||
uint32_t pool_size;
|
||||
@@ -393,6 +416,7 @@ struct rx_desc_pool {
|
||||
uint16_t buf_size;
|
||||
uint8_t buf_alignment;
|
||||
bool rx_mon_dest_frag_enable;
|
||||
enum dp_desc_type desc_type;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -59,6 +59,7 @@ struct qdf_mem_dma_page_t {
|
||||
* @num_pages: Number of allocation needed pages
|
||||
* @dma_pages: page information storage in case of coherent memory
|
||||
* @cacheable_pages: page information storage in case of cacheable memory
|
||||
* @is_mem_prealloc: flag for multiple pages pre-alloc or not
|
||||
*/
|
||||
struct qdf_mem_multi_page_t {
|
||||
uint16_t num_element_per_page;
|
||||
@@ -66,6 +67,9 @@ struct qdf_mem_multi_page_t {
|
||||
struct qdf_mem_dma_page_t *dma_pages;
|
||||
void **cacheable_pages;
|
||||
qdf_size_t page_size;
|
||||
#ifdef DP_MEM_PRE_ALLOC
|
||||
uint8_t is_mem_prealloc;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user