qcacmn: Split dp_rx_pdev_attach and dp_rx_pdev_detach

Split dp_rx_pdev_attach into dp_rx_pdev_desc_pool_alloc,
dp_rx_pdev_desc_pool_init, dp_rx_pdev_buffers_alloc and
dp_rx_pdev_detach into dp_rx_pdev_desc_pool_free, dp_rx
_pdev_desc_pool_deinit, dp_rx_pdev_buffers_free APIs

This split is made because dp_pdev_init is introduced
as part of this FR and these APIs will be called from
dp_pdev_init/dp_pdev_deinit or dp_pdev_attach/dp_pdev_
detach accordingly to maintain the symmetry to DP init
and deinit path

Change-Id: Ib543ddae90b90f4706004080b1f2b7d0e5cfbfbc
CRs-Fixed: 2663595
This commit is contained in:
phadiman
2020-02-19 18:20:44 +05:30
committed by nshrivas
parent 9798b16f3f
commit b133d310ec
6 changed files with 374 additions and 117 deletions

View File

@@ -4544,7 +4544,8 @@ static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
dp_pktlogmod_exit(pdev); dp_pktlogmod_exit(pdev);
dp_rx_fst_detach(soc, pdev); dp_rx_fst_detach(soc, pdev);
dp_rx_pdev_detach(pdev); dp_rx_pdev_buffers_free(pdev);
dp_rx_pdev_desc_pool_deinit(pdev);
dp_rx_pdev_mon_detach(pdev); dp_rx_pdev_mon_detach(pdev);
dp_neighbour_peers_detach(pdev); dp_neighbour_peers_detach(pdev);
qdf_spinlock_destroy(&pdev->tx_mutex); qdf_spinlock_destroy(&pdev->tx_mutex);
@@ -4652,7 +4653,7 @@ static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
struct dp_soc *soc = pdev->soc; struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool; struct rx_desc_pool *rx_desc_pool;
int mac_id, mac_for_pdev; int mac_id;
int lmac_id; int lmac_id;
if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
@@ -4668,6 +4669,7 @@ static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
dp_mon_link_free(pdev); dp_mon_link_free(pdev);
dp_rx_pdev_detach(pdev);
/* Cleanup per PDEV REO rings if configured */ /* Cleanup per PDEV REO rings if configured */
if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
wlan_minidump_remove( wlan_minidump_remove(
@@ -4691,21 +4693,13 @@ static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
RXDMA_DST, 0); RXDMA_DST, 0);
if (dp_is_soc_reinit(soc)) { if (dp_is_soc_reinit(soc)) {
mac_for_pdev = rx_desc_pool = &soc->rx_desc_status[lmac_id];
dp_get_lmac_id_for_pdev_id(soc, mac_id,
pdev->pdev_id);
rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
dp_rx_desc_pool_free(soc, rx_desc_pool); dp_rx_desc_pool_free(soc, rx_desc_pool);
rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev]; rx_desc_pool = &soc->rx_desc_mon[lmac_id];
dp_rx_desc_pool_free(soc, rx_desc_pool); dp_rx_desc_pool_free(soc, rx_desc_pool);
} }
} }
if (dp_is_soc_reinit(soc)) {
rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
dp_rx_desc_pool_free(soc, rx_desc_pool);
}
/* only do soc common cleanup when last pdev do detach */ /* only do soc common cleanup when last pdev do detach */
if (!(soc->pdev_count)) if (!(soc->pdev_count))
dp_soc_cmn_cleanup(soc); dp_soc_cmn_cleanup(soc);

View File

@@ -2468,21 +2468,7 @@ QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
void void
dp_rx_pdev_detach(struct dp_pdev *pdev) dp_rx_pdev_detach(struct dp_pdev *pdev)
{ {
uint8_t mac_for_pdev = pdev->lmac_id; dp_rx_pdev_desc_pool_free(pdev);
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
if (rx_desc_pool->pool_size != 0) {
if (!dp_is_soc_reinit(soc))
dp_rx_desc_nbuf_and_pool_free(soc, mac_for_pdev,
rx_desc_pool);
else
dp_rx_desc_nbuf_free(soc, rx_desc_pool);
}
return;
} }
static QDF_STATUS static QDF_STATUS
@@ -2655,6 +2641,127 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
/*
* dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor
* pool
*
* @pdev: core txrx pdev context
*
* Return: QDF_STATUS - QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS
dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
{
struct dp_soc *soc = pdev->soc;
uint32_t rxdma_entries;
uint32_t rx_sw_desc_weight;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
uint32_t status = QDF_STATUS_SUCCESS;
int mac_for_pdev;
mac_for_pdev = pdev->lmac_id;
if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"nss-wifi<4> skip Rx refil %d", mac_for_pdev);
status = QDF_STATUS_SUCCESS;
}
dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
rxdma_entries = dp_rxdma_srng->num_entries;
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
status = dp_rx_desc_pool_alloc(soc,
rx_sw_desc_weight * rxdma_entries,
rx_desc_pool);
if (status != QDF_STATUS_SUCCESS)
return status;
rx_desc_pool->owner = DP_WBM2SW_RBM;
rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
return status;
}
/*
* dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
*
* @pdev: core txrx pdev context
*/
void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
{
int mac_for_pdev = pdev->lmac_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
dp_rx_desc_pool_free(soc, rx_desc_pool);
}
/*
* dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
*
* @pdev: core txrx pdev context
*
* Return: QDF_STATUS - QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
{
int mac_for_pdev = pdev->lmac_id;
struct dp_soc *soc = pdev->soc;
uint32_t rxdma_entries;
uint32_t rx_sw_desc_weight;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"nss-wifi<4> skip Rx refil %d", mac_for_pdev);
return QDF_STATUS_SUCCESS;
}
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
return QDF_STATUS_E_NOMEM;
dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
rxdma_entries = dp_rxdma_srng->num_entries;
soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
rx_sw_desc_weight =
wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
dp_rx_desc_pool_init(soc, mac_for_pdev,
rx_sw_desc_weight * rxdma_entries,
rx_desc_pool);
return QDF_STATUS_SUCCESS;
}
/*
* dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
* @pdev: core txrx pdev context
*
* This function resets the freelist of rx descriptors and destroys locks
* associated with this list of descriptors.
*/
void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
{
int mac_for_pdev = pdev->lmac_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
dp_rx_desc_pool_deinit(soc, rx_desc_pool);
}
/** /**
* dp_rx_attach() - attach DP RX * dp_rx_attach() - attach DP RX
* @pdev: core txrx pdev context * @pdev: core txrx pdev context
@@ -2669,55 +2776,79 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
QDF_STATUS QDF_STATUS
dp_rx_pdev_attach(struct dp_pdev *pdev) dp_rx_pdev_attach(struct dp_pdev *pdev)
{ {
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc; struct dp_soc *soc = pdev->soc;
uint32_t rxdma_entries;
uint32_t rx_sw_desc_weight;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
QDF_STATUS ret_val; QDF_STATUS ret_val;
int mac_for_pdev;
if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"nss-wifi<4> skip Rx refil %d", pdev_id); "nss-wifi<4> skip Rx refil %d",
pdev->pdev_id);
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }
pdev = soc->pdev_list[pdev_id]; if (!dp_is_soc_reinit(soc)) {
mac_for_pdev = pdev->lmac_id; ret_val = dp_rx_pdev_desc_pool_alloc(pdev);
dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; if (ret_val != QDF_STATUS_SUCCESS)
return ret_val;
}
rxdma_entries = dp_rxdma_srng->num_entries; dp_rx_pdev_desc_pool_init(pdev);
soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
dp_rx_desc_pool_alloc(soc, mac_for_pdev,
rx_sw_desc_weight * rxdma_entries,
rx_desc_pool);
rx_desc_pool->owner = DP_WBM2SW_RBM;
rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
/* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
ret_val = dp_rx_fst_attach(soc, pdev); ret_val = dp_rx_fst_attach(soc, pdev);
if ((ret_val != QDF_STATUS_SUCCESS) && if ((ret_val != QDF_STATUS_SUCCESS) &&
(ret_val != QDF_STATUS_E_NOSUPPORT)) { (ret_val != QDF_STATUS_E_NOSUPPORT)) {
QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
"RX Flow Search Table attach failed: pdev %d err %d", "RX Flow Search Table attach failed: pdev %d err %d",
pdev_id, ret_val); pdev->pdev_id, ret_val);
return ret_val; return ret_val;
} }
return dp_rx_pdev_buffers_alloc(pdev);
}
/*
* dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
*
* @pdev: core txrx pdev context
*
* Return: QDF_STATUS - QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS
dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
{
int mac_for_pdev = pdev->lmac_id;
struct dp_soc *soc = pdev->soc;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
uint32_t rxdma_entries;
dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
rxdma_entries = dp_rxdma_srng->num_entries;
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng, return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
rx_desc_pool, rxdma_entries - 1); rx_desc_pool, rxdma_entries - 1);
} }
/*
* dp_rx_pdev_buffers_free - Free nbufs (skbs)
*
* @pdev: core txrx pdev context
*/
void
dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
{
int mac_for_pdev = pdev->lmac_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
dp_rx_desc_nbuf_free(soc, rx_desc_pool);
}
/* /*
* dp_rx_nbuf_prepare() - prepare RX nbuf * dp_rx_nbuf_prepare() - prepare RX nbuf
* @soc: core txrx main context * @soc: core txrx main context

View File

@@ -431,6 +431,11 @@ struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie); return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
} }
#else #else
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size,
struct rx_desc_pool *rx_desc_pool);
/** /**
* dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
* the Rx descriptor on Rx DMA source ring buffer * the Rx descriptor on Rx DMA source ring buffer
@@ -494,6 +499,15 @@ void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
} }
#endif /* RX_DESC_MULTI_PAGE_ALLOC */ #endif /* RX_DESC_MULTI_PAGE_ALLOC */
QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
uint32_t pool_size,
struct rx_desc_pool *rx_desc_pool);
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size,
struct rx_desc_pool *rx_desc_pool);
void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
union dp_rx_desc_list_elem_t **local_desc_list, union dp_rx_desc_list_elem_t **local_desc_list,
union dp_rx_desc_list_elem_t **tail, union dp_rx_desc_list_elem_t **tail,
@@ -507,7 +521,17 @@ uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
union dp_rx_desc_list_elem_t **tail); union dp_rx_desc_list_elem_t **tail);
QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
void dp_rx_desc_pool_deinit(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool);
QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
void dp_rx_pdev_detach(struct dp_pdev *pdev); void dp_rx_pdev_detach(struct dp_pdev *pdev);
@@ -570,19 +594,6 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
*/ */
qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf); qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf);
/*
* dp_rx_desc_pool_alloc() - create a pool of software rx_descs
* at the time of dp rx initialization
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @pool_size: number of Rx descriptor in the pool
* @rx_desc_pool: rx descriptor pool pointer
*
* Return: QDF status
*/
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size, struct rx_desc_pool *pool);
/* /*
* dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during

View File

@@ -25,29 +25,54 @@ A_COMPILE_TIME_ASSERT(cookie_size_check,
PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <= PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
1 << DP_RX_DESC_PAGE_ID_SHIFT); 1 << DP_RX_DESC_PAGE_ID_SHIFT);
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, /*
* dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
* rx descriptor pool
*
* @rx_desc_pool: rx descriptor pool pointer
* Return: QDF_STATUS QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
{
if (!rx_desc_pool->desc_pages.num_pages) {
dp_err("Multi page alloc fail, size=%d, elem=%d",
rx_desc_pool->elem_size, rx_desc_pool->pool_size);
return QDF_STATUS_E_NOMEM;
}
return QDF_STATUS_SUCCESS;
}
/*
* dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
* descriptors
*
* @soc: core txrx main context
* @num_elem: number of rx descriptors (size of the pool)
* @rx_desc_pool: rx descriptor pool pointer
*
* Return: QDF_STATUS QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
* QDF_STATUS_E_FAULT
*/
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
uint32_t num_elem, uint32_t num_elem,
struct rx_desc_pool *rx_desc_pool) struct rx_desc_pool *rx_desc_pool)
{ {
uint32_t id, page_id, offset, desc_size, num_desc_per_page; uint32_t desc_size;
uint32_t count = 0;
union dp_rx_desc_list_elem_t *rx_desc_elem; union dp_rx_desc_list_elem_t *rx_desc_elem;
desc_size = sizeof(*rx_desc_elem); desc_size = sizeof(*rx_desc_elem);
rx_desc_pool->elem_size = desc_size; rx_desc_pool->elem_size = desc_size;
if (!dp_is_soc_reinit(soc)) {
qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages, qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
desc_size, num_elem, 0, true); desc_size, num_elem, 0, true);
if (!rx_desc_pool->desc_pages.num_pages) { if (!rx_desc_pool->desc_pages.num_pages) {
qdf_err("Multi page alloc fail,size=%d, elem=%d", qdf_err("Multi page alloc fail,size=%d, elem=%d",
desc_size, num_elem); desc_size, num_elem);
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_NOMEM;
}
} }
num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
*rx_desc_pool->desc_pages.cacheable_pages;
if (qdf_mem_multi_page_link(soc->osdev, if (qdf_mem_multi_page_link(soc->osdev,
&rx_desc_pool->desc_pages, &rx_desc_pool->desc_pages,
desc_size, num_elem, true)) { desc_size, num_elem, true)) {
@@ -55,10 +80,41 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
desc_size, num_elem); desc_size, num_elem);
goto free_rx_desc_pool; goto free_rx_desc_pool;
} }
return QDF_STATUS_SUCCESS;
free_rx_desc_pool:
dp_rx_desc_pool_free(soc, rx_desc_pool);
return QDF_STATUS_E_FAULT;
}
/*
* dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
* convert the pool of memory into a list of
* rx descriptors and create locks to access this
* list of rx descriptors.
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @pool_size: size of the rx descriptor pool
* @rx_desc_pool: rx descriptor pool pointer
*/
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
{
uint32_t id, page_id, offset, num_desc_per_page;
uint32_t count = 0;
union dp_rx_desc_list_elem_t *rx_desc_elem;
/* Initialize the lock */ /* Initialize the lock */
qdf_spinlock_create(&rx_desc_pool->lock); qdf_spinlock_create(&rx_desc_pool->lock);
qdf_spin_lock_bh(&rx_desc_pool->lock); qdf_spin_lock_bh(&rx_desc_pool->lock);
rx_desc_pool->pool_size = num_elem; rx_desc_pool->pool_size = pool_size;
num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
*rx_desc_pool->desc_pages.cacheable_pages;
rx_desc_elem = rx_desc_pool->freelist; rx_desc_elem = rx_desc_pool->freelist;
while (rx_desc_elem) { while (rx_desc_elem) {
@@ -82,12 +138,6 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
count++; count++;
} }
qdf_spin_unlock_bh(&rx_desc_pool->lock); qdf_spin_unlock_bh(&rx_desc_pool->lock);
return QDF_STATUS_SUCCESS;
free_rx_desc_pool:
dp_rx_desc_pool_free(soc, rx_desc_pool);
return QDF_STATUS_E_FAULT;
} }
union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset, union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
@@ -142,6 +192,7 @@ void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool); qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
if (QDF_IS_STATUS_SUCCESS(qdf_status)) if (QDF_IS_STATUS_SUCCESS(qdf_status))
dp_rx_desc_pool_free(soc, rx_desc_pool); dp_rx_desc_pool_free(soc, rx_desc_pool);
qdf_spin_unlock_bh(&rx_desc_pool->lock); qdf_spin_unlock_bh(&rx_desc_pool->lock);
qdf_spinlock_destroy(&rx_desc_pool->lock); qdf_spinlock_destroy(&rx_desc_pool->lock);
@@ -153,8 +204,6 @@ void dp_rx_desc_nbuf_free(struct dp_soc *soc,
qdf_spin_lock_bh(&rx_desc_pool->lock); qdf_spin_lock_bh(&rx_desc_pool->lock);
__dp_rx_desc_nbuf_free(soc, rx_desc_pool); __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
qdf_spin_unlock_bh(&rx_desc_pool->lock); qdf_spin_unlock_bh(&rx_desc_pool->lock);
qdf_spinlock_destroy(&rx_desc_pool->lock);
} }
void dp_rx_desc_pool_free(struct dp_soc *soc, void dp_rx_desc_pool_free(struct dp_soc *soc,
@@ -165,25 +214,78 @@ void dp_rx_desc_pool_free(struct dp_soc *soc,
qdf_mem_multi_pages_free(soc->osdev, qdf_mem_multi_pages_free(soc->osdev,
&rx_desc_pool->desc_pages, 0, true); &rx_desc_pool->desc_pages, 0, true);
} }
#else
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id, void dp_rx_desc_pool_deinit(struct dp_soc *soc,
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool) struct rx_desc_pool *rx_desc_pool)
{ {
uint32_t i; qdf_spin_lock_bh(&rx_desc_pool->lock);
if (!dp_is_soc_reinit(soc)) { rx_desc_pool->freelist = NULL;
rx_desc_pool->array = rx_desc_pool->pool_size = 0;
qdf_mem_malloc(pool_size * qdf_spin_unlock_bh(&rx_desc_pool->lock);
sizeof(union dp_rx_desc_list_elem_t)); qdf_spinlock_destroy(&rx_desc_pool->lock);
}
if (!(rx_desc_pool->array)) { #else
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL, /*
"%s: RX Desc Pool[%d] allocation failed", * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
__func__, pool_id); * rx descriptor pool
return QDF_STATUS_E_NOMEM; *
} * @rx_desc_pool: rx descriptor pool pointer
*
* Return: QDF_STATUS QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
{
if (!rx_desc_pool->array) {
dp_err("nss-wifi<4> skip Rx refil");
return QDF_STATUS_E_NOMEM;
} }
return QDF_STATUS_SUCCESS;
}
/*
* dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
* descriptors
*
* @soc: core txrx main context
* @num_elem: number of rx descriptors (size of the pool)
* @rx_desc_pool: rx descriptor pool pointer
*
* Return: QDF_STATUS QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
* QDF_STATUS_E_FAULT
*/
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
uint32_t pool_size,
struct rx_desc_pool *rx_desc_pool)
{
rx_desc_pool->array = qdf_mem_malloc(pool_size *
sizeof(union dp_rx_desc_list_elem_t));
if (!(rx_desc_pool->array)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s: RX Desc Pool allocation failed");
return QDF_STATUS_E_NOMEM;
}
return QDF_STATUS_SUCCESS;
}
/*
* dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
* convert the pool of memory into a list of
* rx descriptors and create locks to access this
* list of rx descriptors.
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @pool_size: size of the rx descriptor pool
* @rx_desc_pool: rx descriptor pool pointer
*/
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
{
int i;
/* Initialize the lock */ /* Initialize the lock */
qdf_spinlock_create(&rx_desc_pool->lock); qdf_spinlock_create(&rx_desc_pool->lock);
@@ -192,6 +294,7 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
/* link SW rx descs into a freelist */ /* link SW rx descs into a freelist */
rx_desc_pool->freelist = &rx_desc_pool->array[0]; rx_desc_pool->freelist = &rx_desc_pool->array[0];
qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) { for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
if (i == rx_desc_pool->pool_size - 1) if (i == rx_desc_pool->pool_size - 1)
rx_desc_pool->array[i].next = NULL; rx_desc_pool->array[i].next = NULL;
@@ -204,7 +307,6 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
} }
qdf_spin_unlock_bh(&rx_desc_pool->lock); qdf_spin_unlock_bh(&rx_desc_pool->lock);
return QDF_STATUS_SUCCESS;
} }
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
@@ -251,12 +353,10 @@ void dp_rx_desc_nbuf_free(struct dp_soc *soc,
qdf_nbuf_unmap_single(soc->osdev, nbuf, qdf_nbuf_unmap_single(soc->osdev, nbuf,
QDF_DMA_FROM_DEVICE); QDF_DMA_FROM_DEVICE);
} }
qdf_nbuf_free(nbuf); qdf_nbuf_free(nbuf);
} }
} }
qdf_spin_unlock_bh(&rx_desc_pool->lock); qdf_spin_unlock_bh(&rx_desc_pool->lock);
qdf_spinlock_destroy(&rx_desc_pool->lock);
} }
void dp_rx_desc_pool_free(struct dp_soc *soc, void dp_rx_desc_pool_free(struct dp_soc *soc,
@@ -264,6 +364,18 @@ void dp_rx_desc_pool_free(struct dp_soc *soc,
{ {
qdf_mem_free(rx_desc_pool->array); qdf_mem_free(rx_desc_pool->array);
} }
void dp_rx_desc_pool_deinit(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
{
qdf_spin_lock_bh(&rx_desc_pool->lock);
rx_desc_pool->freelist = NULL;
rx_desc_pool->pool_size = 0;
qdf_spin_unlock_bh(&rx_desc_pool->lock);
qdf_spinlock_destroy(&rx_desc_pool->lock);
}
#endif /* RX_DESC_MULTI_PAGE_ALLOC */ #endif /* RX_DESC_MULTI_PAGE_ALLOC */
/* /*
* dp_rx_get_free_desc_list() - provide a list of descriptors from * dp_rx_get_free_desc_list() - provide a list of descriptors from

View File

@@ -1151,12 +1151,14 @@ dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) {
dp_debug("Mon RX Desc Pool[%d] entries=%u", dp_debug("Mon RX Desc Pool[%d] entries=%u",
pdev_id, num_entries); pdev_id, num_entries);
rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx) * num_entries; rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx) * num_entries;
status = dp_rx_desc_pool_alloc(soc, mac_id, rx_desc_pool_size,
rx_desc_pool); if (!dp_is_soc_reinit(soc)) {
if (!QDF_IS_STATUS_SUCCESS(status)) status = dp_rx_desc_pool_alloc(soc, rx_desc_pool_size,
return status; rx_desc_pool);
if (!QDF_IS_STATUS_SUCCESS(status))
return status;
}
rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM; rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE;
@@ -1164,6 +1166,9 @@ dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) {
replenish_size = ((num_entries - 1) < MON_BUF_MIN_ALLOC_ENTRIES) ? replenish_size = ((num_entries - 1) < MON_BUF_MIN_ALLOC_ENTRIES) ?
(num_entries - 1) : MON_BUF_MIN_ALLOC_ENTRIES; (num_entries - 1) : MON_BUF_MIN_ALLOC_ENTRIES;
dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool);
status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring, status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring,
rx_desc_pool, replenish_size); rx_desc_pool, replenish_size);

View File

@@ -2078,10 +2078,14 @@ dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) {
dp_info("Mon RX Status Pool[%d] entries=%d", dp_info("Mon RX Status Pool[%d] entries=%d",
ring_id, num_entries); ring_id, num_entries);
status = dp_rx_desc_pool_alloc(soc, ring_id, num_entries + 1, if (!dp_is_soc_reinit(soc)) {
rx_desc_pool); status = dp_rx_desc_pool_alloc(soc, num_entries + 1,
if (!QDF_IS_STATUS_SUCCESS(status)) rx_desc_pool);
return status; if (!QDF_IS_STATUS_SUCCESS(status))
return status;
}
dp_rx_desc_pool_init(soc, ring_id, num_entries + 1, rx_desc_pool);
rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;