qcacmn: Avoid per packet Rx refill buffer pool lock
Avoid per packet Rx refill buff pool lock, instead acquire/release lock for burst of nbuf's replenishment. Change-Id: I778cb9702d31d4b8911cca4e7f06f12c50ef6df9 CRs-Fixed: 2884208
This commit is contained in:

committed by
snandini

parent
58b2f08534
commit
c9cec76270
@@ -303,6 +303,7 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
|||||||
dp_soc, num_req_buffers);
|
dp_soc, num_req_buffers);
|
||||||
|
|
||||||
hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
|
hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
|
||||||
|
|
||||||
num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
|
num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
|
||||||
rxdma_srng,
|
rxdma_srng,
|
||||||
sync_hw_ptr);
|
sync_hw_ptr);
|
||||||
@@ -349,6 +350,8 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
|||||||
|
|
||||||
count = 0;
|
count = 0;
|
||||||
|
|
||||||
|
dp_rx_refill_buff_pool_lock(dp_soc);
|
||||||
|
|
||||||
while (count < num_req_buffers) {
|
while (count < num_req_buffers) {
|
||||||
/* Flag is set while pdev rx_desc_pool initialization */
|
/* Flag is set while pdev rx_desc_pool initialization */
|
||||||
if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
|
if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
|
||||||
@@ -405,6 +408,8 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dp_rx_refill_buff_pool_unlock(dp_soc);
|
||||||
|
|
||||||
hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
|
hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
|
||||||
|
|
||||||
dp_rx_schedule_refill_thread(dp_soc);
|
dp_rx_schedule_refill_thread(dp_soc);
|
||||||
|
@@ -195,15 +195,13 @@ static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
|
|||||||
qdf_nbuf_t nbuf = NULL;
|
qdf_nbuf_t nbuf = NULL;
|
||||||
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||||
|
|
||||||
if (!buff_pool->is_initialized || !buff_pool->bufq_len)
|
if (!buff_pool->in_rx_refill_lock || !buff_pool->bufq_len)
|
||||||
return nbuf;
|
return nbuf;
|
||||||
|
|
||||||
qdf_spin_lock_bh(&buff_pool->bufq_lock);
|
|
||||||
nbuf = buff_pool->buf_head;
|
nbuf = buff_pool->buf_head;
|
||||||
buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
|
buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
|
||||||
qdf_nbuf_set_next(nbuf, NULL);
|
qdf_nbuf_set_next(nbuf, NULL);
|
||||||
buff_pool->bufq_len--;
|
buff_pool->bufq_len--;
|
||||||
qdf_spin_unlock_bh(&buff_pool->bufq_lock);
|
|
||||||
|
|
||||||
return nbuf;
|
return nbuf;
|
||||||
}
|
}
|
||||||
@@ -219,7 +217,7 @@ dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
|||||||
qdf_nbuf_t nbuf;
|
qdf_nbuf_t nbuf;
|
||||||
|
|
||||||
nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
|
nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
|
||||||
if (nbuf) {
|
if (qdf_likely(nbuf)) {
|
||||||
DP_STATS_INC(dp_pdev,
|
DP_STATS_INC(dp_pdev,
|
||||||
rx_refill_buff_pool.num_bufs_allocated, 1);
|
rx_refill_buff_pool.num_bufs_allocated, 1);
|
||||||
return nbuf;
|
return nbuf;
|
||||||
|
@@ -109,6 +109,34 @@ static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc)
|
|||||||
soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread(dp_soc_to_cdp_soc_t(soc));
|
soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread(dp_soc_to_cdp_soc_t(soc));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_rx_refill_buff_pool_lock() - Acquire Rx refill buff pool lock
|
||||||
|
* @soc: SoC handle
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static inline void dp_rx_refill_buff_pool_lock(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||||
|
|
||||||
|
if (buff_pool->is_initialized &&
|
||||||
|
qdf_spin_trylock_bh(&buff_pool->bufq_lock))
|
||||||
|
buff_pool->in_rx_refill_lock = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_rx_refill_buff_pool_unlock() - Release Rx refill buff pool lock
|
||||||
|
* @soc: SoC handle
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static inline void dp_rx_refill_buff_pool_unlock(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||||
|
|
||||||
|
if (buff_pool->in_rx_refill_lock) {
|
||||||
|
qdf_spin_unlock_bh(&buff_pool->bufq_lock);
|
||||||
|
buff_pool->in_rx_refill_lock = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
/**
|
/**
|
||||||
* dp_rx_buffer_pool_init() - Initialize emergency buffer pool
|
* dp_rx_buffer_pool_init() - Initialize emergency buffer pool
|
||||||
@@ -206,5 +234,10 @@ dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc) { }
|
static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc) { }
|
||||||
|
|
||||||
|
static inline void dp_rx_refill_buff_pool_lock(struct dp_soc *soc) { }
|
||||||
|
|
||||||
|
static inline void dp_rx_refill_buff_pool_unlock(struct dp_soc *soc) { }
|
||||||
|
|
||||||
#endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
|
#endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
|
||||||
#endif /* _DP_RX_BUFFER_POOL_H_ */
|
#endif /* _DP_RX_BUFFER_POOL_H_ */
|
||||||
|
@@ -1144,6 +1144,7 @@ struct rx_refill_buff_pool {
|
|||||||
qdf_spinlock_t bufq_lock;
|
qdf_spinlock_t bufq_lock;
|
||||||
uint32_t bufq_len;
|
uint32_t bufq_len;
|
||||||
uint32_t max_bufq_len;
|
uint32_t max_bufq_len;
|
||||||
|
bool in_rx_refill_lock;
|
||||||
bool is_initialized;
|
bool is_initialized;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user