qcacmn: Replenish more RX buffers when refill ring runs low

When nbuf allocation failure happens, there is no retry scheme.
So RX buffers in refill ring may shrink and not got enlarged.
This change is aimed to replenish more RX buffers when watermark
is less than critical low threshold.

Change-Id: I201b9e252ba08ba1bff47e0b5ec819a45f1b1ddf
CRs-Fixed: 3245915
This commit is contained in:
Yu Tian
2022-04-25 18:54:32 +08:00
committed by Madan Koyyalamudi
parent f7a1c7e0c7
commit ed3ba3ca44
3 changed files with 29 additions and 0 deletions

View File

@@ -2689,6 +2689,7 @@ struct cdp_peer_telemetry_stats {
* @map_err: Mapping failure * @map_err: Mapping failure
* @x86_fail: x86 failures * @x86_fail: x86 failures
* @low_thresh_intrs: low threshold interrupts * @low_thresh_intrs: low threshold interrupts
* @free_list: RX descriptors moving back to free list
* @rx_raw_pkts: Rx Raw Packets * @rx_raw_pkts: Rx Raw Packets
* @mesh_mem_alloc: Mesh Rx Stats Alloc fail * @mesh_mem_alloc: Mesh Rx Stats Alloc fail
* @tso_desc_cnt: TSO descriptors * @tso_desc_cnt: TSO descriptors
@@ -2738,6 +2739,7 @@ struct cdp_pdev_stats {
uint32_t map_err; uint32_t map_err;
uint32_t x86_fail; uint32_t x86_fail;
uint32_t low_thresh_intrs; uint32_t low_thresh_intrs;
int32_t free_list;
} replenish; } replenish;
uint32_t rx_raw_pkts; uint32_t rx_raw_pkts;

View File

@@ -600,6 +600,9 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
union dp_rx_desc_list_elem_t *next; union dp_rx_desc_list_elem_t *next;
QDF_STATUS ret; QDF_STATUS ret;
void *rxdma_srng; void *rxdma_srng;
union dp_rx_desc_list_elem_t *desc_list_append = NULL;
union dp_rx_desc_list_elem_t *tail_append = NULL;
union dp_rx_desc_list_elem_t *temp_list = NULL;
rxdma_srng = dp_rxdma_srng->hal_srng; rxdma_srng = dp_rxdma_srng->hal_srng;
@@ -633,6 +636,28 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
} else if (num_entries_avail < num_req_buffers) { } else if (num_entries_avail < num_req_buffers) {
num_desc_to_free = num_req_buffers - num_entries_avail; num_desc_to_free = num_req_buffers - num_entries_avail;
num_req_buffers = num_entries_avail; num_req_buffers = num_entries_avail;
} else if ((*desc_list) &&
dp_rxdma_srng->num_entries - num_entries_avail <
CRITICAL_BUFFER_THRESHOLD) {
/* Append some free descriptors to tail */
num_alloc_desc =
dp_rx_get_free_desc_list(dp_soc, mac_id,
rx_desc_pool,
CRITICAL_BUFFER_THRESHOLD,
&desc_list_append,
&tail_append);
if (num_alloc_desc) {
temp_list = *desc_list;
*desc_list = desc_list_append;
tail_append->next = temp_list;
num_req_buffers += num_alloc_desc;
DP_STATS_DEC(dp_pdev,
replenish.free_list,
num_alloc_desc);
} else
dp_err_rl("%pK: no free rx_descs in freelist", dp_soc);
} }
if (qdf_unlikely(!num_req_buffers)) { if (qdf_unlikely(!num_req_buffers)) {
@@ -736,6 +761,7 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
* Therefore set replenish.pkts.bytes as 0. * Therefore set replenish.pkts.bytes as 0.
*/ */
DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0); DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count);
free_descs: free_descs:
DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free); DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);

View File

@@ -1482,6 +1482,7 @@ dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
} }
#endif /* WLAN_SUPPORT_RX_FLOW_TAG */ #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
#define CRITICAL_BUFFER_THRESHOLD 64
/* /*
* dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
* called during dp rx initialization * called during dp rx initialization