|
@@ -72,6 +72,7 @@ dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
|
|
|
#ifdef DP_RX_MSDU_DONE_FAIL_HISTORY
|
|
|
static inline void
|
|
|
dp_rx_msdu_done_fail_event_record(struct dp_soc *soc,
|
|
|
+ struct dp_rx_desc *rx_desc,
|
|
|
qdf_nbuf_t nbuf)
|
|
|
{
|
|
|
struct dp_msdu_done_fail_entry *entry;
|
|
@@ -84,10 +85,16 @@ dp_rx_msdu_done_fail_event_record(struct dp_soc *soc,
|
|
|
DP_MSDU_DONE_FAIL_HIST_MAX);
|
|
|
entry = &soc->msdu_done_fail_hist->entry[idx];
|
|
|
entry->paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
|
|
|
+
|
|
|
+ if (rx_desc)
|
|
|
+ entry->sw_cookie = rx_desc->cookie;
|
|
|
+ else
|
|
|
+ entry->sw_cookie = 0xDEAD;
|
|
|
}
|
|
|
#else
|
|
|
static inline void
|
|
|
dp_rx_msdu_done_fail_event_record(struct dp_soc *soc,
|
|
|
+ struct dp_rx_desc *rx_desc,
|
|
|
qdf_nbuf_t nbuf)
|
|
|
{
|
|
|
}
|
|
@@ -192,6 +199,84 @@ dp_rx_wds_learn(struct dp_soc *soc,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+#ifdef DP_RX_PEEK_MSDU_DONE_WAR
|
|
|
+static inline int dp_rx_war_peek_msdu_done(struct dp_soc *soc,
|
|
|
+ struct dp_rx_desc *rx_desc)
|
|
|
+{
|
|
|
+ uint8_t *rx_tlv_hdr;
|
|
|
+
|
|
|
+ qdf_nbuf_sync_for_cpu(soc->osdev, rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
|
|
|
+ rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
|
|
|
+
|
|
|
+ return hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dp_rx_delink_n_rel_rx_desc() - unmap & free the nbuf in the rx_desc
|
|
|
+ * @soc: DP SoC handle
|
|
|
+ * @rx_desc: rx_desc handle of the nbuf to be unmapped & freed
|
|
|
+ * @reo_ring_num: REO_RING_NUM corresponding to the REO for which the
|
|
|
+ * bottom half is being serviced.
|
|
|
+ *
|
|
|
+ * Return: None
|
|
|
+ */
|
|
|
+static inline void
|
|
|
+dp_rx_delink_n_rel_rx_desc(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
|
|
|
+ uint8_t reo_ring_num)
|
|
|
+{
|
|
|
+ if (!rx_desc)
|
|
|
+ return;
|
|
|
+
|
|
|
+ dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
|
|
|
+ dp_rx_nbuf_free(rx_desc->nbuf);
|
|
|
+ /*
|
|
|
+ * RX_DESC flags:
|
|
|
+ * in_use = 0 will be set when this rx_desc is added to local freelist
|
|
|
+ * unmapped = 1 will be set by dp_rx_nbuf_unmap
|
|
|
+ * in_err_state = 0 will be set during replenish
|
|
|
+ * has_reuse_nbuf need not be touched.
|
|
|
+ * msdu_done_fail = 0 should be set here ..!!
|
|
|
+ */
|
|
|
+ rx_desc->msdu_done_fail = 0;
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct dp_rx_desc *
|
|
|
+dp_rx_war_store_msdu_done_fail_desc(struct dp_soc *soc,
|
|
|
+ struct dp_rx_desc *rx_desc,
|
|
|
+ uint8_t reo_ring_num)
|
|
|
+{
|
|
|
+ struct dp_rx_msdu_done_fail_desc_list *msdu_done_fail_desc_list =
|
|
|
+ &soc->msdu_done_fail_desc_list;
|
|
|
+ struct dp_rx_desc *old_rx_desc;
|
|
|
+ uint32_t idx;
|
|
|
+
|
|
|
+ idx = dp_get_next_index(&msdu_done_fail_desc_list->index,
|
|
|
+ DP_MSDU_DONE_FAIL_DESCS_MAX);
|
|
|
+
|
|
|
+ old_rx_desc = msdu_done_fail_desc_list->msdu_done_fail_descs[idx];
|
|
|
+ dp_rx_delink_n_rel_rx_desc(soc, old_rx_desc, reo_ring_num);
|
|
|
+
|
|
|
+ msdu_done_fail_desc_list->msdu_done_fail_descs[idx] = rx_desc;
|
|
|
+
|
|
|
+ return old_rx_desc;
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+static inline int dp_rx_war_peek_msdu_done(struct dp_soc *soc,
|
|
|
+ struct dp_rx_desc *rx_desc)
|
|
|
+{
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct dp_rx_desc *
|
|
|
+dp_rx_war_store_msdu_done_fail_desc(struct dp_soc *soc,
|
|
|
+ struct dp_rx_desc *rx_desc,
|
|
|
+ uint8_t reo_ring_num)
|
|
|
+{
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
|
|
|
hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
|
|
|
uint32_t quota)
|
|
@@ -446,6 +531,29 @@ more_data:
|
|
|
}
|
|
|
is_prev_msdu_last = false;
|
|
|
}
|
|
|
+ } else if (qdf_unlikely(!dp_rx_war_peek_msdu_done(soc,
|
|
|
+ rx_desc))) {
|
|
|
+ struct dp_rx_desc *old_rx_desc =
|
|
|
+ dp_rx_war_store_msdu_done_fail_desc(
|
|
|
+ soc, rx_desc,
|
|
|
+ reo_ring_num);
|
|
|
+ if (qdf_likely(old_rx_desc)) {
|
|
|
+ rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
|
|
|
+ dp_rx_add_to_free_desc_list
|
|
|
+ (&head[rx_desc->chip_id][rx_desc->pool_id],
|
|
|
+ &tail[rx_desc->chip_id][rx_desc->pool_id],
|
|
|
+ old_rx_desc);
|
|
|
+ quota -= 1;
|
|
|
+ num_pending -= 1;
|
|
|
+ num_rx_bufs_reaped++;
|
|
|
+ }
|
|
|
+ rx_desc->msdu_done_fail = 1;
|
|
|
+ DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
|
|
|
+ dp_err("MSDU DONE failure %d",
|
|
|
+ soc->stats.rx.err.msdu_done_fail);
|
|
|
+ dp_rx_msdu_done_fail_event_record(soc, rx_desc,
|
|
|
+ rx_desc->nbuf);
|
|
|
+ continue;
|
|
|
}
|
|
|
|
|
|
if (!is_prev_msdu_last &&
|
|
@@ -641,7 +749,7 @@ done:
|
|
|
soc->stats.rx.err.msdu_done_fail);
|
|
|
hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
|
|
|
QDF_TRACE_LEVEL_INFO);
|
|
|
- dp_rx_msdu_done_fail_event_record(soc, nbuf);
|
|
|
+ dp_rx_msdu_done_fail_event_record(soc, NULL, nbuf);
|
|
|
tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
|
|
|
dp_rx_nbuf_free(nbuf);
|
|
|
qdf_assert(0);
|