qcacmn: Avoid per packet Rx refill buffer pool lock

Avoid per packet Rx refill buff pool lock, instead
acquire/release lock for burst of nbuf's replenishment.

Change-Id: I778cb9702d31d4b8911cca4e7f06f12c50ef6df9
CRs-Fixed: 2884208
This commit is contained in:
Karthik Kantamneni
2021-02-23 16:57:44 +05:30
committed by snandini
parent 58b2f08534
commit c9cec76270
4 changed files with 41 additions and 4 deletions

View File

@@ -303,6 +303,7 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
dp_soc, num_req_buffers);
hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
rxdma_srng,
sync_hw_ptr);
@@ -349,6 +350,8 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
count = 0;
dp_rx_refill_buff_pool_lock(dp_soc);
while (count < num_req_buffers) {
/* Flag is set while pdev rx_desc_pool initialization */
if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
@@ -405,6 +408,8 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
}
dp_rx_refill_buff_pool_unlock(dp_soc);
hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
dp_rx_schedule_refill_thread(dp_soc);

View File

@@ -195,15 +195,13 @@ static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
qdf_nbuf_t nbuf = NULL;
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
if (!buff_pool->is_initialized || !buff_pool->bufq_len)
if (!buff_pool->in_rx_refill_lock || !buff_pool->bufq_len)
return nbuf;
qdf_spin_lock_bh(&buff_pool->bufq_lock);
nbuf = buff_pool->buf_head;
buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
qdf_nbuf_set_next(nbuf, NULL);
buff_pool->bufq_len--;
qdf_spin_unlock_bh(&buff_pool->bufq_lock);
return nbuf;
}
@@ -219,7 +217,7 @@ dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
qdf_nbuf_t nbuf;
nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
if (nbuf) {
if (qdf_likely(nbuf)) {
DP_STATS_INC(dp_pdev,
rx_refill_buff_pool.num_bufs_allocated, 1);
return nbuf;

View File

@@ -109,6 +109,34 @@ static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc)
soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread(dp_soc_to_cdp_soc_t(soc));
}
/**
* dp_rx_refill_buff_pool_lock() - Acquire Rx refill buff pool lock
* @soc: SoC handle
*
*/
static inline void dp_rx_refill_buff_pool_lock(struct dp_soc *soc)
{
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
if (buff_pool->is_initialized &&
qdf_spin_trylock_bh(&buff_pool->bufq_lock))
buff_pool->in_rx_refill_lock = true;
}
/**
* dp_rx_refill_buff_pool_unlock() - Release Rx refill buff pool lock
* @soc: SoC handle
*
*/
static inline void dp_rx_refill_buff_pool_unlock(struct dp_soc *soc)
{
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
if (buff_pool->in_rx_refill_lock) {
qdf_spin_unlock_bh(&buff_pool->bufq_lock);
buff_pool->in_rx_refill_lock = false;
}
}
#else
/**
* dp_rx_buffer_pool_init() - Initialize emergency buffer pool
@@ -206,5 +234,10 @@ dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
}
static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc) { }
static inline void dp_rx_refill_buff_pool_lock(struct dp_soc *soc) { }
static inline void dp_rx_refill_buff_pool_unlock(struct dp_soc *soc) { }
#endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
#endif /* _DP_RX_BUFFER_POOL_H_ */

View File

@@ -1144,6 +1144,7 @@ struct rx_refill_buff_pool {
qdf_spinlock_t bufq_lock;
uint32_t bufq_len;
uint32_t max_bufq_len;
bool in_rx_refill_lock;
bool is_initialized;
};