|
@@ -2329,10 +2329,13 @@ static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
|
|
|
* @desc: desc with resend update cmd flag set
|
|
|
* @rx_tid: Desc RX tid associated with update cmd for resetting
|
|
|
* valid field to 0 in h/w
|
|
|
+ *
|
|
|
+ * Return: QDF status
|
|
|
*/
|
|
|
-static void dp_resend_update_reo_cmd(struct dp_soc *soc,
|
|
|
- struct reo_desc_list_node *desc,
|
|
|
- struct dp_rx_tid *rx_tid)
|
|
|
+static QDF_STATUS
|
|
|
+dp_resend_update_reo_cmd(struct dp_soc *soc,
|
|
|
+ struct reo_desc_list_node *desc,
|
|
|
+ struct dp_rx_tid *rx_tid)
|
|
|
{
|
|
|
struct hal_reo_cmd_params params;
|
|
|
|
|
@@ -2361,7 +2364,10 @@ static void dp_resend_update_reo_cmd(struct dp_soc *soc,
|
|
|
(qdf_list_node_t *)desc);
|
|
|
dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
|
|
|
DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
}
|
|
|
+
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2382,6 +2388,7 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
|
|
unsigned long curr_ts = qdf_get_system_timestamp();
|
|
|
uint32_t desc_size, tot_desc_size;
|
|
|
struct hal_reo_cmd_params params;
|
|
|
+ bool flush_failure = false;
|
|
|
|
|
|
if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
|
|
|
qdf_mem_zero(reo_status, sizeof(*reo_status));
|
|
@@ -2392,11 +2399,10 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
|
|
} else if (reo_status->rx_queue_status.header.status !=
|
|
|
HAL_REO_CMD_SUCCESS) {
|
|
|
/* Should not happen normally. Just print error for now */
|
|
|
- QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
|
|
- "%s: Rx tid HW desc deletion failed(%d): tid %d",
|
|
|
- __func__,
|
|
|
- reo_status->rx_queue_status.header.status,
|
|
|
- freedesc->rx_tid.tid);
|
|
|
+ dp_info_rl("%s: Rx tid HW desc deletion failed(%d): tid %d",
|
|
|
+ __func__,
|
|
|
+ reo_status->rx_queue_status.header.status,
|
|
|
+ freedesc->rx_tid.tid);
|
|
|
}
|
|
|
|
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
|
|
@@ -2431,13 +2437,19 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
|
|
|
|
|
/* First process descs with resend_update_reo_cmd set */
|
|
|
if (desc->resend_update_reo_cmd) {
|
|
|
- dp_resend_update_reo_cmd(soc, desc, rx_tid);
|
|
|
- continue;
|
|
|
+ if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
|
|
|
+ QDF_STATUS_SUCCESS)
|
|
|
+ break;
|
|
|
+ else
|
|
|
+ continue;
|
|
|
}
|
|
|
|
|
|
/* Flush and invalidate REO descriptor from HW cache: Base and
|
|
|
* extension descriptors should be flushed separately */
|
|
|
- tot_desc_size = rx_tid->hw_qdesc_alloc_size;
|
|
|
+ if (desc->pending_ext_desc_size)
|
|
|
+ tot_desc_size = desc->pending_ext_desc_size;
|
|
|
+ else
|
|
|
+ tot_desc_size = rx_tid->hw_qdesc_alloc_size;
|
|
|
/* Get base descriptor size by passing non-qos TID */
|
|
|
desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
|
|
|
DP_NON_QOS_TID);
|
|
@@ -2456,13 +2468,22 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
|
|
¶ms,
|
|
|
NULL,
|
|
|
NULL)) {
|
|
|
- dp_err_rl("fail to send CMD_CACHE_FLUSH:"
|
|
|
- "tid %d desc %pK", rx_tid->tid,
|
|
|
- (void *)(rx_tid->hw_qdesc_paddr));
|
|
|
- DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
|
|
|
+ dp_info_rl("fail to send CMD_CACHE_FLUSH:"
|
|
|
+ "tid %d desc %pK", rx_tid->tid,
|
|
|
+ (void *)(rx_tid->hw_qdesc_paddr));
|
|
|
+ desc->pending_ext_desc_size = tot_desc_size +
|
|
|
+ desc_size;
|
|
|
+ dp_reo_desc_clean_up(soc, desc, reo_status);
|
|
|
+ flush_failure = true;
|
|
|
+ break;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (flush_failure)
|
|
|
+ break;
|
|
|
+ else
|
|
|
+ desc->pending_ext_desc_size = desc_size;
|
|
|
+
|
|
|
/* Flush base descriptor */
|
|
|
qdf_mem_zero(¶ms, sizeof(params));
|
|
|
params.std.need_status = 1;
|
|
@@ -2486,10 +2507,11 @@ void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
|
|
* In case of MCL path add the desc back to the free
|
|
|
* desc list and defer deletion.
|
|
|
*/
|
|
|
- dp_err_log("%s: fail to send REO cmd to flush cache: tid %d",
|
|
|
+ dp_info_rl("%s: fail to send REO cmd to flush cache: tid %d",
|
|
|
__func__, rx_tid->tid);
|
|
|
dp_reo_desc_clean_up(soc, desc, &reo_status);
|
|
|
DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
|
|
|
+ break;
|
|
|
}
|
|
|
}
|
|
|
qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
|