qcacmn: replenish complete rx_refill ring in UMAC reset
In case of UMAC reset if in_use buffers are sufficient to fill complete RX refill ring we are replenishing only 1/3rd ring at pre-reset. In case of low threshold interrupts disabled ring might be never refilled. At post reset refill complete ring. Change-Id: I0e4ed942120619ef357bc91f8cbbab8c1fd1b06e CRs-Fixed: 3628996
Dieser Commit ist enthalten in:
committet von
Rahul Choudhary
Ursprung
c779207718
Commit
47296136e3
@@ -1974,8 +1974,6 @@ int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
|
||||
}
|
||||
|
||||
if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
|
||||
union dp_rx_desc_list_elem_t *desc_list = NULL;
|
||||
union dp_rx_desc_list_elem_t *tail = NULL;
|
||||
struct dp_srng *rx_refill_buf_ring;
|
||||
struct rx_desc_pool *rx_desc_pool;
|
||||
|
||||
@@ -1990,13 +1988,11 @@ int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
|
||||
intr_stats->num_host2rxdma_ring_masks++;
|
||||
|
||||
if (!rx_refill_lt_disable)
|
||||
dp_rx_buffers_lt_replenish_simple(soc,
|
||||
mac_for_pdev,
|
||||
rx_refill_buf_ring,
|
||||
rx_desc_pool,
|
||||
0,
|
||||
&desc_list,
|
||||
&tail);
|
||||
dp_rx_buffers_lt_replenish_simple
|
||||
(soc, mac_for_pdev,
|
||||
rx_refill_buf_ring,
|
||||
rx_desc_pool,
|
||||
false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11936,6 +11932,7 @@ static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
|
||||
{
|
||||
QDF_STATUS status;
|
||||
qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list;
|
||||
uint8_t mac_id;
|
||||
|
||||
soc->umac_reset_ctx.nbuf_list = NULL;
|
||||
|
||||
@@ -11959,6 +11956,19 @@ static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
|
||||
nbuf_list = nbuf;
|
||||
}
|
||||
|
||||
/*
|
||||
* at pre-reset if in_use descriptors are not sufficient we replenish
|
||||
* only 1/3 of the ring. Try to replenish full ring here.
|
||||
*/
|
||||
for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
|
||||
struct dp_srng *dp_rxdma_srng =
|
||||
&soc->rx_refill_buf_ring[mac_id];
|
||||
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||
|
||||
dp_rx_buffers_lt_replenish_simple(soc, mac_id, dp_rxdma_srng,
|
||||
rx_desc_pool, true);
|
||||
}
|
||||
|
||||
dp_umac_reset_info("Umac reset done on soc %pK\n trigger start : %u us "
|
||||
"trigger done : %u us prereset : %u us\n"
|
||||
"postreset : %u us \n postreset complete: %u us \n",
|
||||
|
@@ -385,7 +385,8 @@ dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
|
||||
QDF_STATUS
|
||||
__dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id,
|
||||
struct dp_srng *dp_rxdma_srng,
|
||||
struct rx_desc_pool *rx_desc_pool)
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
bool force_replenish)
|
||||
{
|
||||
struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
|
||||
uint32_t count;
|
||||
@@ -421,8 +422,8 @@ __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id,
|
||||
dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
|
||||
soc, num_entries_avail);
|
||||
|
||||
if (qdf_unlikely(num_entries_avail <
|
||||
((dp_rxdma_srng->num_entries * 3) / 4))) {
|
||||
if (qdf_unlikely(!force_replenish && (num_entries_avail <
|
||||
((dp_rxdma_srng->num_entries * 3) / 4)))) {
|
||||
hal_srng_access_end(soc->hal_soc, rxdma_srng);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
@@ -915,7 +916,8 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
||||
uint32_t num_req_buffers,
|
||||
union dp_rx_desc_list_elem_t **desc_list,
|
||||
union dp_rx_desc_list_elem_t **tail,
|
||||
bool req_only, const char *func_name)
|
||||
bool req_only, bool force_replenish,
|
||||
const char *func_name)
|
||||
{
|
||||
uint32_t num_alloc_desc;
|
||||
uint16_t num_desc_to_free = 0;
|
||||
@@ -959,8 +961,9 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
||||
dp_verbose_debug("%pK: no of available entries in rxdma ring: %d",
|
||||
dp_soc, num_entries_avail);
|
||||
|
||||
if (!req_only && !(*desc_list) && (num_entries_avail >
|
||||
((dp_rxdma_srng->num_entries * 3) / 4))) {
|
||||
if (!req_only && !(*desc_list) &&
|
||||
(force_replenish || (num_entries_avail >
|
||||
((dp_rxdma_srng->num_entries * 3) / 4)))) {
|
||||
num_req_buffers = num_entries_avail;
|
||||
DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
|
||||
} else if (num_entries_avail < num_req_buffers) {
|
||||
|
@@ -219,7 +219,7 @@ struct dp_rx_desc {
|
||||
num_buffers, desc_list, tail, req_only) \
|
||||
__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
|
||||
num_buffers, desc_list, tail, req_only, \
|
||||
__func__)
|
||||
false, __func__)
|
||||
|
||||
#ifdef WLAN_SUPPORT_RX_FISA
|
||||
/**
|
||||
@@ -1675,6 +1675,9 @@ dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
* interrupt.
|
||||
* @tail: tail of descs list
|
||||
* @req_only: If true don't replenish more than req buffers
|
||||
* @force_replenish: replenish full ring without limit check this
|
||||
* this field will be considered only when desc_list
|
||||
* is NULL and req_only is false
|
||||
* @func_name: name of the caller function
|
||||
*
|
||||
* Return: return success or failure
|
||||
@@ -1686,6 +1689,7 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
||||
union dp_rx_desc_list_elem_t **desc_list,
|
||||
union dp_rx_desc_list_elem_t **tail,
|
||||
bool req_only,
|
||||
bool force_replenish,
|
||||
const char *func_name);
|
||||
|
||||
/**
|
||||
@@ -1754,13 +1758,15 @@ __dp_rx_comp2refill_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
||||
* @mac_id: mac_id which is one of 3 mac_ids
|
||||
* @dp_rxdma_srng: dp rxdma circular ring
|
||||
* @rx_desc_pool: Pointer to free Rx descriptor pool
|
||||
* @force_replenish: Force replenish the ring fully
|
||||
*
|
||||
* Return: return success or failure
|
||||
*/
|
||||
QDF_STATUS
|
||||
__dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
||||
struct dp_srng *dp_rxdma_srng,
|
||||
struct rx_desc_pool *rx_desc_pool);
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
bool force_replenish);
|
||||
|
||||
/**
|
||||
* __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs
|
||||
@@ -2585,12 +2591,11 @@ static inline
|
||||
void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
|
||||
struct dp_srng *rxdma_srng,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t num_req_buffers,
|
||||
union dp_rx_desc_list_elem_t **desc_list,
|
||||
union dp_rx_desc_list_elem_t **tail)
|
||||
bool force_replenish)
|
||||
{
|
||||
__dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng,
|
||||
rx_desc_pool);
|
||||
rx_desc_pool,
|
||||
force_replenish);
|
||||
}
|
||||
|
||||
#ifndef QCA_DP_NBUF_FAST_RECYCLE_CHECK
|
||||
@@ -2727,12 +2732,11 @@ static inline
|
||||
void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
|
||||
struct dp_srng *rxdma_srng,
|
||||
struct rx_desc_pool *rx_desc_pool,
|
||||
uint32_t num_req_buffers,
|
||||
union dp_rx_desc_list_elem_t **desc_list,
|
||||
union dp_rx_desc_list_elem_t **tail)
|
||||
bool force_replenish)
|
||||
{
|
||||
dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
|
||||
num_req_buffers, desc_list, tail, false);
|
||||
__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
|
||||
0, NULL, NULL, false, force_replenish,
|
||||
__func__);
|
||||
}
|
||||
|
||||
static inline
|
||||
|
In neuem Issue referenzieren
Einen Benutzer sperren