qcacmn: Limit the REO cmd number in batch operate
MCL path add the desc back to freelist when FLUSH failed. It may cause the CMD pending in free list is even larger than REO_CMD_RING max size and lead CMD flood then cause REO HW in an unexpected condition. So it's needed to limit the number REO cmds in a batch operation. Change-Id: Ib6b97f0693feb7bb5829bda4683248b495b6c7b3 CRs-Fixed: 2642516
这个提交包含在:
@@ -1848,6 +1848,7 @@ static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
|
|||||||
struct reo_desc_list_node *freedesc =
|
struct reo_desc_list_node *freedesc =
|
||||||
(struct reo_desc_list_node *)cb_ctxt;
|
(struct reo_desc_list_node *)cb_ctxt;
|
||||||
struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
|
struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
|
||||||
|
unsigned long curr_ts = qdf_get_system_timestamp();
|
||||||
|
|
||||||
if ((reo_status->fl_cache_status.header.status !=
|
if ((reo_status->fl_cache_status.header.status !=
|
||||||
HAL_REO_CMD_SUCCESS) &&
|
HAL_REO_CMD_SUCCESS) &&
|
||||||
@@ -1860,7 +1861,8 @@ static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
|
|||||||
freedesc->rx_tid.tid);
|
freedesc->rx_tid.tid);
|
||||||
}
|
}
|
||||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||||
"%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
|
"%s:%lu hw_qdesc_paddr: %pK, tid:%d", __func__,
|
||||||
|
curr_ts,
|
||||||
(void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
|
(void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
|
||||||
qdf_mem_unmap_nbytes_single(soc->osdev,
|
qdf_mem_unmap_nbytes_single(soc->osdev,
|
||||||
rx_tid->hw_qdesc_paddr,
|
rx_tid->hw_qdesc_paddr,
|
||||||
@@ -2089,6 +2091,23 @@ static void dp_reo_desc_clean_up(struct dp_soc *soc,
|
|||||||
(qdf_list_node_t *)desc);
|
(qdf_list_node_t *)desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
|
||||||
|
* ring in aviod of REO hang
|
||||||
|
*
|
||||||
|
* @list_size: REO desc list size to be cleaned
|
||||||
|
*/
|
||||||
|
static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
|
||||||
|
{
|
||||||
|
unsigned long curr_ts = qdf_get_system_timestamp();
|
||||||
|
|
||||||
|
if ((*list_size) > REO_DESC_FREELIST_SIZE) {
|
||||||
|
dp_err_log("%lu:freedesc number %d in freelist",
|
||||||
|
curr_ts, *list_size);
|
||||||
|
/* limit the batch queue size */
|
||||||
|
*list_size = REO_DESC_FREELIST_SIZE;
|
||||||
|
}
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
|
* dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
|
||||||
@@ -2108,6 +2127,16 @@ static void dp_reo_desc_clean_up(struct dp_soc *soc,
|
|||||||
dp_reo_desc_free(soc, (void *)desc, reo_status);
|
dp_reo_desc_free(soc, (void *)desc, reo_status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
|
||||||
|
* ring in aviod of REO hang
|
||||||
|
*
|
||||||
|
* @list_size: REO desc list size to be cleaned
|
||||||
|
*/
|
||||||
|
static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2176,6 +2205,7 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
|||||||
qdf_mem_zero(reo_status, sizeof(*reo_status));
|
qdf_mem_zero(reo_status, sizeof(*reo_status));
|
||||||
reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
|
reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
|
||||||
dp_reo_desc_free(soc, (void *)freedesc, reo_status);
|
dp_reo_desc_free(soc, (void *)freedesc, reo_status);
|
||||||
|
DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
|
||||||
return;
|
return;
|
||||||
} else if (reo_status->rx_queue_status.header.status !=
|
} else if (reo_status->rx_queue_status.header.status !=
|
||||||
HAL_REO_CMD_SUCCESS) {
|
HAL_REO_CMD_SUCCESS) {
|
||||||
@@ -2197,6 +2227,14 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
|||||||
qdf_list_insert_back_size(&soc->reo_desc_freelist,
|
qdf_list_insert_back_size(&soc->reo_desc_freelist,
|
||||||
(qdf_list_node_t *)freedesc, &list_size);
|
(qdf_list_node_t *)freedesc, &list_size);
|
||||||
|
|
||||||
|
/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
|
||||||
|
* failed. it may cause the number of REO queue pending in free
|
||||||
|
* list is even larger than REO_CMD_RING max size and lead REO CMD
|
||||||
|
* flood then cause REO HW in an unexpected condition. So it's
|
||||||
|
* needed to limit the number REO cmds in a batch operation.
|
||||||
|
*/
|
||||||
|
dp_reo_limit_clean_batch_sz(&list_size);
|
||||||
|
|
||||||
while ((qdf_list_peek_front(&soc->reo_desc_freelist,
|
while ((qdf_list_peek_front(&soc->reo_desc_freelist,
|
||||||
(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
|
(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
|
||||||
((list_size >= REO_DESC_FREELIST_SIZE) ||
|
((list_size >= REO_DESC_FREELIST_SIZE) ||
|
||||||
|
@@ -793,6 +793,8 @@ struct dp_soc_stats {
|
|||||||
uint32_t hal_rxdma_err_dup;
|
uint32_t hal_rxdma_err_dup;
|
||||||
/* REO cmd send fail/requeue count */
|
/* REO cmd send fail/requeue count */
|
||||||
uint32_t reo_cmd_send_fail;
|
uint32_t reo_cmd_send_fail;
|
||||||
|
/* REO cmd send drain count */
|
||||||
|
uint32_t reo_cmd_send_drain;
|
||||||
/* RX msdu drop count due to scatter */
|
/* RX msdu drop count due to scatter */
|
||||||
uint32_t scatter_msdu;
|
uint32_t scatter_msdu;
|
||||||
/* RX msdu drop count due to invalid cookie */
|
/* RX msdu drop count due to invalid cookie */
|
||||||
|
在新工单中引用
屏蔽一个用户