qcacld-3.0: Add ref_cnt implementation for flow pool
Add ref_cnt implementation for flow pool to synchronize usage and deletion of flow pool. Change-Id: Idb52c74df6a771b776e3df6bf4aea5c150b5fc7a CRs-Fixed: 2025757
Цей коміт міститься в:
@@ -531,7 +531,33 @@ void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
|
||||
uint8_t flow_pool_id);
|
||||
struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
|
||||
uint16_t flow_pool_size);
|
||||
int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force);
|
||||
|
||||
/**
|
||||
* ol_tx_inc_pool_ref() - increment pool ref count
|
||||
* @pool: flow pool pointer
|
||||
*
|
||||
* Increments pool's ref count, used to make sure that no one is using
|
||||
* pool when it is being deleted.
|
||||
* As this function is taking pool->flow_pool_lock inside it, it should
|
||||
* always be called outside this spinlock.
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS - in case of success
|
||||
*/
|
||||
QDF_STATUS ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool);
|
||||
|
||||
/**
|
||||
* ol_tx_dec_pool_ref() - decrement pool ref count
|
||||
* @pool: flow pool pointer
|
||||
* @force: free pool forcefully
|
||||
*
|
||||
* Decrements pool's ref count and deletes the pool if ref count gets 0.
|
||||
* As this function is taking pdev->tx_desc.flow_pool_list_lock and
|
||||
* pool->flow_pool_lock inside it, it should always be called outside
|
||||
* these two spinlocks.
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS - in case of success
|
||||
*/
|
||||
QDF_STATUS ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force);
|
||||
#else
|
||||
|
||||
static inline void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
|
||||
@@ -565,10 +591,15 @@ static inline struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool,
|
||||
bool force)
|
||||
static inline QDF_STATUS
|
||||
ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool)
|
||||
{
|
||||
return 0;
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
static inline QDF_STATUS
|
||||
ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -81,7 +81,7 @@ ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
|
||||
static void
|
||||
ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
|
||||
{
|
||||
ol_tx_delete_flow_pool(pdev->mgmt_pool, false);
|
||||
ol_tx_dec_pool_ref(pdev->mgmt_pool, false);
|
||||
return;
|
||||
}
|
||||
#else
|
||||
@@ -135,13 +135,145 @@ void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
|
||||
ol_txrx_info("flow pool list is not empty %d!!!\n", i++);
|
||||
if (i == 1)
|
||||
ol_tx_dump_flow_pool_info();
|
||||
ol_tx_delete_flow_pool(pool, true);
|
||||
ol_tx_dec_pool_ref(pool, true);
|
||||
qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
qdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* ol_tx_delete_flow_pool() - delete flow pool
|
||||
* @pool: flow pool pointer
|
||||
* @force: free pool forcefully
|
||||
*
|
||||
* Delete flow_pool if all tx descriptors are available.
|
||||
* Otherwise put it in FLOW_POOL_INVALID state.
|
||||
* If force is set then pull all available descriptors to
|
||||
* global pool.
|
||||
*
|
||||
* Return: 0 for success or error
|
||||
*/
|
||||
static int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force)
|
||||
{
|
||||
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
||||
uint16_t i, size;
|
||||
union ol_tx_desc_list_elem_t *temp_list = NULL;
|
||||
struct ol_tx_desc_t *tx_desc = NULL;
|
||||
|
||||
if (!pool) {
|
||||
ol_txrx_err(
|
||||
"%s: pool is NULL\n", __func__);
|
||||
QDF_ASSERT(0);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!pdev) {
|
||||
ol_txrx_err(
|
||||
"%s: pdev is NULL\n", __func__);
|
||||
QDF_ASSERT(0);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
if (pool->avail_desc == pool->flow_pool_size || force == true)
|
||||
pool->status = FLOW_POOL_INACTIVE;
|
||||
else
|
||||
pool->status = FLOW_POOL_INVALID;
|
||||
|
||||
/* Take all free descriptors and put it in temp_list */
|
||||
temp_list = pool->freelist;
|
||||
size = pool->avail_desc;
|
||||
pool->freelist = NULL;
|
||||
pool->avail_desc = 0;
|
||||
|
||||
if (pool->status == FLOW_POOL_INACTIVE) {
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
/* Free flow_pool */
|
||||
qdf_spinlock_destroy(&pool->flow_pool_lock);
|
||||
qdf_mem_free(pool);
|
||||
} else { /* FLOW_POOL_INVALID case*/
|
||||
pool->flow_pool_size -= size;
|
||||
pool->flow_pool_id = INVALID_FLOW_ID;
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
ol_tx_inc_pool_ref(pool);
|
||||
|
||||
pdev->tx_desc.num_invalid_bin++;
|
||||
ol_txrx_info(
|
||||
"%s: invalid pool created %d\n",
|
||||
__func__, pdev->tx_desc.num_invalid_bin);
|
||||
if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
|
||||
ASSERT(0);
|
||||
|
||||
qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
|
||||
flow_pool_list_elem);
|
||||
qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
}
|
||||
|
||||
/* put free descriptors to global pool */
|
||||
qdf_spin_lock_bh(&pdev->tx_mutex);
|
||||
for (i = 0; i < size; i++) {
|
||||
tx_desc = &temp_list->tx_desc;
|
||||
temp_list = temp_list->next;
|
||||
|
||||
ol_tx_put_desc_global_pool(pdev, tx_desc);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->tx_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
QDF_STATUS ol_tx_inc_pool_ref(struct ol_tx_flow_pool_t *pool)
|
||||
{
|
||||
if (!pool) {
|
||||
ol_txrx_err("flow pool is NULL");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
qdf_atomic_inc(&pool->ref_cnt);
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
ol_txrx_dbg("pool %p, ref_cnt %x",
|
||||
pool, qdf_atomic_read(&pool->ref_cnt));
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force)
|
||||
{
|
||||
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
||||
|
||||
if (!pool) {
|
||||
ol_txrx_err("flow pool is NULL");
|
||||
QDF_ASSERT(0);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
if (!pdev) {
|
||||
ol_txrx_err("pdev is NULL");
|
||||
QDF_ASSERT(0);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
if (qdf_atomic_dec_and_test(&pool->ref_cnt)) {
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool,
|
||||
flow_pool_list_elem);
|
||||
qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
ol_txrx_dbg("Deleting pool %p", pool);
|
||||
ol_tx_delete_flow_pool(pool, force);
|
||||
} else {
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
ol_txrx_dbg("pool %p, ref_cnt %x",
|
||||
pool, qdf_atomic_read(&pool->ref_cnt));
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
|
||||
*
|
||||
@@ -150,7 +282,7 @@ void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
|
||||
void ol_tx_dump_flow_pool_info(void)
|
||||
{
|
||||
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
||||
struct ol_tx_flow_pool_t *pool = NULL;
|
||||
struct ol_tx_flow_pool_t *pool = NULL, *pool_prev = NULL;
|
||||
struct ol_tx_flow_pool_t tmp_pool;
|
||||
|
||||
|
||||
@@ -180,10 +312,15 @@ void ol_tx_dump_flow_pool_info(void)
|
||||
qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
|
||||
flow_pool_list_elem) {
|
||||
ol_tx_inc_pool_ref(pool);
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
|
||||
if (pool_prev)
|
||||
ol_tx_dec_pool_ref(pool_prev, false);
|
||||
|
||||
ol_txrx_info("\n");
|
||||
ol_txrx_info(
|
||||
"Flow_pool_id %d :: status %d",
|
||||
@@ -201,10 +338,16 @@ void ol_tx_dump_flow_pool_info(void)
|
||||
ol_txrx_info(
|
||||
"Pkt dropped due to unavailablity of descriptors %d",
|
||||
tmp_pool.pkt_drop_no_desc);
|
||||
|
||||
pool_prev = pool;
|
||||
qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
|
||||
/* decrement ref count for last pool in list */
|
||||
if (pool_prev)
|
||||
ol_tx_dec_pool_ref(pool_prev, false);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -373,6 +516,8 @@ struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
|
||||
pool->start_th = (start_threshold * flow_pool_size)/100;
|
||||
pool->stop_th = (stop_threshold * flow_pool_size)/100;
|
||||
qdf_spinlock_create(&pool->flow_pool_lock);
|
||||
qdf_atomic_init(&pool->ref_cnt);
|
||||
ol_tx_inc_pool_ref(pool);
|
||||
|
||||
/* Take TX descriptor from global_pool and put it in temp_list*/
|
||||
qdf_spin_lock_bh(&pdev->tx_mutex);
|
||||
@@ -404,91 +549,6 @@ struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
|
||||
return pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* ol_tx_delete_flow_pool() - delete flow pool
|
||||
* @pool: flow pool pointer
|
||||
* @force: free pool forcefully
|
||||
*
|
||||
* Delete flow_pool if all tx descriptors are available.
|
||||
* Otherwise put it in FLOW_POOL_INVALID state.
|
||||
* If force is set then pull all available descriptors to
|
||||
* global pool.
|
||||
*
|
||||
* Return: 0 for success or error
|
||||
*/
|
||||
int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force)
|
||||
{
|
||||
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
||||
uint16_t i, size;
|
||||
union ol_tx_desc_list_elem_t *temp_list = NULL;
|
||||
struct ol_tx_desc_t *tx_desc = NULL;
|
||||
|
||||
if (!pool) {
|
||||
ol_txrx_err(
|
||||
"%s: pool is NULL\n", __func__);
|
||||
QDF_ASSERT(0);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!pdev) {
|
||||
ol_txrx_err(
|
||||
"%s: pdev is NULL\n", __func__);
|
||||
QDF_ASSERT(0);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool, flow_pool_list_elem);
|
||||
qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
if (pool->avail_desc == pool->flow_pool_size || force == true)
|
||||
pool->status = FLOW_POOL_INACTIVE;
|
||||
else
|
||||
pool->status = FLOW_POOL_INVALID;
|
||||
|
||||
/* Take all free descriptors and put it in temp_list */
|
||||
temp_list = pool->freelist;
|
||||
size = pool->avail_desc;
|
||||
pool->freelist = NULL;
|
||||
pool->avail_desc = 0;
|
||||
|
||||
if (pool->status == FLOW_POOL_INACTIVE) {
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
/* Free flow_pool */
|
||||
qdf_spinlock_destroy(&pool->flow_pool_lock);
|
||||
qdf_mem_free(pool);
|
||||
} else { /* FLOW_POOL_INVALID case*/
|
||||
pool->flow_pool_size -= size;
|
||||
pool->flow_pool_id = INVALID_FLOW_ID;
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
|
||||
pdev->tx_desc.num_invalid_bin++;
|
||||
ol_txrx_info(
|
||||
"%s: invalid pool created %d\n",
|
||||
__func__, pdev->tx_desc.num_invalid_bin);
|
||||
if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
|
||||
ASSERT(0);
|
||||
|
||||
qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
|
||||
flow_pool_list_elem);
|
||||
qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
||||
}
|
||||
|
||||
/* put free descriptors to global pool */
|
||||
qdf_spin_lock_bh(&pdev->tx_mutex);
|
||||
for (i = 0; i < size; i++) {
|
||||
tx_desc = &temp_list->tx_desc;
|
||||
temp_list = temp_list->next;
|
||||
|
||||
ol_tx_put_desc_global_pool(pdev, tx_desc);
|
||||
}
|
||||
qdf_spin_unlock_bh(&pdev->tx_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ol_tx_free_invalid_flow_pool() - free invalid pool
|
||||
* @pool: pool
|
||||
@@ -517,7 +577,7 @@ int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
|
||||
"%s: invalid pool deleted %d\n",
|
||||
__func__, pdev->tx_desc.num_invalid_bin);
|
||||
|
||||
return ol_tx_delete_flow_pool(pool, false);
|
||||
return ol_tx_dec_pool_ref(pool, false);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -669,7 +729,7 @@ void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
|
||||
break;
|
||||
default:
|
||||
if (pool_create)
|
||||
ol_tx_delete_flow_pool(pool, false);
|
||||
ol_tx_dec_pool_ref(pool, false);
|
||||
ol_txrx_err(
|
||||
"%s: flow type %d not supported !!!\n",
|
||||
__func__, type);
|
||||
@@ -728,8 +788,11 @@ void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
|
||||
return;
|
||||
}
|
||||
|
||||
/* only delete if all descriptors are available */
|
||||
ol_tx_delete_flow_pool(pool, false);
|
||||
/*
|
||||
* only delete if all descriptors are available
|
||||
* and pool ref count becomes 0
|
||||
*/
|
||||
ol_tx_dec_pool_ref(pool, false);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@@ -452,6 +452,7 @@ struct ol_txrx_pool_stats {
|
||||
* @start_th: start threshold
|
||||
* @freelist: tx descriptor freelist
|
||||
* @pkt_drop_no_desc: drop due to no descriptors
|
||||
* @ref_cnt: pool's ref count
|
||||
*/
|
||||
struct ol_tx_flow_pool_t {
|
||||
TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
|
||||
@@ -467,6 +468,7 @@ struct ol_tx_flow_pool_t {
|
||||
uint16_t start_th;
|
||||
union ol_tx_desc_list_elem_t *freelist;
|
||||
uint16_t pkt_drop_no_desc;
|
||||
qdf_atomic_t ref_cnt;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
Посилання в новій задачі
Заблокувати користувача