qcacmn: Lock rx_desc_pool to collect nbufs and delete later
When multi page alloc is activated, spinlock for rx_desc_pool is being held for more than 2 seconds, resulting in QDF_BUG. The major proportion of the time period is used in unmapping the nbufs. To fix this, lock rx_desc_pool only to collect nbufs from rx_desc in a list and unmap and free the nbufs after releasing the lock. Change-Id: Iff2078a0de56b51712e2f9a7c5ace7a959e2445d CRs-Fixed: 2779498
This commit is contained in:
@@ -846,6 +846,12 @@ void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
|||||||
void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
|
void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
|
||||||
uint16_t peer_id, uint8_t tid);
|
uint16_t peer_id, uint8_t tid);
|
||||||
|
|
||||||
|
#define DP_RX_HEAD_APPEND(head, elem) \
|
||||||
|
do { \
|
||||||
|
qdf_nbuf_set_next((elem), (head)); \
|
||||||
|
(head) = (elem); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
#define DP_RX_LIST_APPEND(head, tail, elem) \
|
#define DP_RX_LIST_APPEND(head, tail, elem) \
|
||||||
do { \
|
do { \
|
||||||
|
@@ -148,22 +148,21 @@ union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
|
|||||||
rx_desc_pool->elem_size * offset;
|
rx_desc_pool->elem_size * offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
|
static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
|
||||||
struct rx_desc_pool *rx_desc_pool)
|
struct rx_desc_pool *rx_desc_pool,
|
||||||
|
qdf_nbuf_t *nbuf_unmap_list,
|
||||||
|
qdf_nbuf_t *nbuf_free_list)
|
||||||
{
|
{
|
||||||
uint32_t i, num_desc, page_id, offset, num_desc_per_page;
|
uint32_t i, num_desc, page_id, offset, num_desc_per_page;
|
||||||
union dp_rx_desc_list_elem_t *rx_desc_elem;
|
union dp_rx_desc_list_elem_t *rx_desc_elem;
|
||||||
struct dp_rx_desc *rx_desc;
|
struct dp_rx_desc *rx_desc;
|
||||||
qdf_nbuf_t nbuf;
|
|
||||||
|
|
||||||
if (qdf_unlikely(!(rx_desc_pool->
|
if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
|
||||||
desc_pages.cacheable_pages))) {
|
|
||||||
qdf_err("No pages found on this desc pool");
|
qdf_err("No pages found on this desc pool");
|
||||||
return QDF_STATUS_E_INVAL;
|
return QDF_STATUS_E_INVAL;
|
||||||
}
|
}
|
||||||
num_desc = rx_desc_pool->pool_size;
|
num_desc = rx_desc_pool->pool_size;
|
||||||
num_desc_per_page =
|
num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
|
||||||
rx_desc_pool->desc_pages.num_element_per_page;
|
|
||||||
for (i = 0; i < num_desc; i++) {
|
for (i = 0; i < num_desc; i++) {
|
||||||
page_id = i / num_desc_per_page;
|
page_id = i / num_desc_per_page;
|
||||||
offset = i % num_desc_per_page;
|
offset = i % num_desc_per_page;
|
||||||
@@ -171,47 +170,72 @@ static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
|
|||||||
rx_desc = &rx_desc_elem->rx_desc;
|
rx_desc = &rx_desc_elem->rx_desc;
|
||||||
dp_rx_desc_free_dbg_info(rx_desc);
|
dp_rx_desc_free_dbg_info(rx_desc);
|
||||||
if (rx_desc->in_use) {
|
if (rx_desc->in_use) {
|
||||||
nbuf = rx_desc->nbuf;
|
|
||||||
if (!rx_desc->unmapped) {
|
if (!rx_desc->unmapped) {
|
||||||
dp_ipa_handle_rx_buf_smmu_mapping(
|
DP_RX_HEAD_APPEND(*nbuf_unmap_list,
|
||||||
soc, nbuf,
|
rx_desc->nbuf);
|
||||||
rx_desc_pool->buf_size,
|
|
||||||
false);
|
|
||||||
qdf_nbuf_unmap_nbytes_single(
|
|
||||||
soc->osdev,
|
|
||||||
rx_desc->nbuf,
|
|
||||||
QDF_DMA_BIDIRECTIONAL,
|
|
||||||
rx_desc_pool->buf_size);
|
|
||||||
rx_desc->unmapped = 1;
|
rx_desc->unmapped = 1;
|
||||||
|
} else {
|
||||||
|
DP_RX_HEAD_APPEND(*nbuf_free_list,
|
||||||
|
rx_desc->nbuf);
|
||||||
}
|
}
|
||||||
qdf_nbuf_free(nbuf);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return QDF_STATUS_SUCCESS;
|
return QDF_STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
|
||||||
|
qdf_nbuf_t nbuf_unmap_list,
|
||||||
|
qdf_nbuf_t nbuf_free_list,
|
||||||
|
uint16_t buf_size)
|
||||||
|
{
|
||||||
|
qdf_nbuf_t nbuf = nbuf_unmap_list;
|
||||||
|
qdf_nbuf_t next;
|
||||||
|
|
||||||
|
while (nbuf) {
|
||||||
|
next = nbuf->next;
|
||||||
|
if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
|
||||||
|
false))
|
||||||
|
dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
|
||||||
|
qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
|
||||||
|
QDF_DMA_BIDIRECTIONAL, buf_size);
|
||||||
|
qdf_nbuf_free(nbuf);
|
||||||
|
nbuf = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
nbuf = nbuf_free_list;
|
||||||
|
while (nbuf) {
|
||||||
|
next = nbuf->next;
|
||||||
|
qdf_nbuf_free(nbuf);
|
||||||
|
nbuf = next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
|
void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
|
||||||
struct rx_desc_pool *rx_desc_pool)
|
struct rx_desc_pool *rx_desc_pool)
|
||||||
{
|
{
|
||||||
QDF_STATUS qdf_status;
|
qdf_nbuf_t nbuf_unmap_list = NULL;
|
||||||
|
qdf_nbuf_t nbuf_free_list = NULL;
|
||||||
|
|
||||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||||
qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
|
dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
|
||||||
if (QDF_IS_STATUS_SUCCESS(qdf_status))
|
&nbuf_unmap_list, &nbuf_free_list);
|
||||||
dp_rx_desc_pool_free(soc, rx_desc_pool);
|
|
||||||
|
|
||||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||||
|
dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
|
||||||
|
rx_desc_pool->buf_size);
|
||||||
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
qdf_spinlock_destroy(&rx_desc_pool->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dp_rx_desc_nbuf_free(struct dp_soc *soc,
|
void dp_rx_desc_nbuf_free(struct dp_soc *soc,
|
||||||
struct rx_desc_pool *rx_desc_pool)
|
struct rx_desc_pool *rx_desc_pool)
|
||||||
{
|
{
|
||||||
|
qdf_nbuf_t nbuf_unmap_list = NULL;
|
||||||
|
qdf_nbuf_t nbuf_free_list = NULL;
|
||||||
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
qdf_spin_lock_bh(&rx_desc_pool->lock);
|
||||||
__dp_rx_desc_nbuf_free(soc, rx_desc_pool);
|
dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
|
||||||
|
&nbuf_unmap_list, &nbuf_free_list);
|
||||||
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
qdf_spin_unlock_bh(&rx_desc_pool->lock);
|
||||||
|
dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
|
||||||
|
rx_desc_pool->buf_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dp_rx_desc_pool_free(struct dp_soc *soc,
|
void dp_rx_desc_pool_free(struct dp_soc *soc,
|
||||||
|
Reference in New Issue
Block a user