qcacmn: minimize lock contention in Rx replenish

Changes dp_rx_buffers_no_map_replenish API to
miminize the contention on RX refill ring lock

Change-Id: Ie55f034c6d01abbab21707460cd0f20548b35ba2
CRs-Fixed: 3313886
This commit is contained in:
Chaithanya Garrepalli
2022-10-17 01:08:31 +05:30
committed by Madan Koyyalamudi
parent 2a6561273c
commit 682c7e09d1

View File

@@ -395,6 +395,9 @@ __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
union dp_rx_desc_list_elem_t *next; union dp_rx_desc_list_elem_t *next;
void *rxdma_srng; void *rxdma_srng;
qdf_nbuf_t nbuf; qdf_nbuf_t nbuf;
qdf_nbuf_t nbuf_next;
qdf_nbuf_t nbuf_head = NULL;
qdf_nbuf_t nbuf_tail = NULL;
qdf_dma_addr_t paddr; qdf_dma_addr_t paddr;
rxdma_srng = dp_rxdma_srng->hal_srng; rxdma_srng = dp_rxdma_srng->hal_srng;
@@ -411,28 +414,40 @@ __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
return QDF_STATUS_E_FAILURE; return QDF_STATUS_E_FAILURE;
} }
dp_rx_debug("%pK: requested %d buffers for replenish", /* Allocate required number of nbufs */
soc, num_req_buffers);
hal_srng_access_start(soc->hal_soc, rxdma_srng);
for (count = 0; count < num_req_buffers; count++) { for (count = 0; count < num_req_buffers; count++) {
next = (*desc_list)->next;
qdf_prefetch(next);
rxdma_ring_entry = (struct dp_buffer_addr_info *)
hal_srng_src_peek(soc->hal_soc, rxdma_srng);
if (qdf_unlikely(!rxdma_ring_entry))
break;
nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool); nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
if (qdf_unlikely(!nbuf)) { if (qdf_unlikely(!nbuf)) {
DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1); DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
/* Update num_req_buffers to nbufs allocated count */
num_req_buffers = count;
break; break;
} }
paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf, paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
rx_desc_pool->buf_size); rx_desc_pool->buf_size);
QDF_NBUF_CB_PADDR(nbuf) = paddr;
DP_RX_LIST_APPEND(nbuf_head,
nbuf_tail,
nbuf);
}
qdf_dsb();
nbuf = nbuf_head;
hal_srng_access_start(soc->hal_soc, rxdma_srng);
for (count = 0; count < num_req_buffers; count++) {
next = (*desc_list)->next;
nbuf_next = nbuf->next;
qdf_prefetch(next);
rxdma_ring_entry = (struct dp_buffer_addr_info *)
hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
if (!rxdma_ring_entry)
break;
(*desc_list)->rx_desc.nbuf = nbuf; (*desc_list)->rx_desc.nbuf = nbuf;
(*desc_list)->rx_desc.rx_buf_start = nbuf->data; (*desc_list)->rx_desc.rx_buf_start = nbuf->data;
(*desc_list)->rx_desc.unmapped = 0; (*desc_list)->rx_desc.unmapped = 0;
@@ -444,14 +459,13 @@ __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
(*desc_list)->rx_desc.in_err_state = 0; (*desc_list)->rx_desc.in_err_state = 0;
hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
paddr, QDF_NBUF_CB_PADDR(nbuf),
(*desc_list)->rx_desc.cookie, (*desc_list)->rx_desc.cookie,
rx_desc_pool->owner); rx_desc_pool->owner);
*desc_list = next; *desc_list = next;
hal_srng_src_get_next(soc->hal_soc, rxdma_srng); nbuf = nbuf_next;
} }
qdf_dsb();
hal_srng_access_end(soc->hal_soc, rxdma_srng); hal_srng_access_end(soc->hal_soc, rxdma_srng);
/* No need to count the number of bytes received during replenish. /* No need to count the number of bytes received during replenish.
@@ -465,6 +479,12 @@ __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
if (*desc_list) if (*desc_list)
dp_rx_add_desc_list_to_free_list(soc, desc_list, tail, dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
mac_id, rx_desc_pool); mac_id, rx_desc_pool);
while (nbuf) {
nbuf_next = nbuf->next;
dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
qdf_nbuf_free(nbuf);
nbuf = nbuf_next;
}
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }