|
@@ -1493,7 +1493,7 @@ void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
|
|
|
|
|
|
if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
|
|
|
status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
|
|
|
- DP_PDEV_TYPE,
|
|
|
+ ctxt_type,
|
|
|
vaddr);
|
|
|
} else {
|
|
|
dp_warn("dp_prealloc_get_context null!");
|
|
@@ -4200,6 +4200,28 @@ static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
|
|
|
}
|
|
|
|
|
|
#ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
|
|
|
+#ifndef RX_DEFRAG_DO_NOT_REINJECT
|
|
|
+/**
|
|
|
+ * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
|
|
|
+ * history.
|
|
|
+ * @soc: DP soc handle
|
|
|
+ *
|
|
|
+ * Return: None
|
|
|
+ */
|
|
|
+static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
|
|
|
+{
|
|
|
+ soc->rx_reinject_ring_history = dp_context_alloc_mem(
|
|
|
+ soc, DP_RX_REINJECT_RING_HIST_TYPE, rx_ring_hist_size);
|
|
|
+ if (soc->rx_reinject_ring_history)
|
|
|
+ qdf_atomic_init(&soc->rx_reinject_ring_history->index);
|
|
|
+}
|
|
|
+#else /* RX_DEFRAG_DO_NOT_REINJECT */
|
|
|
+static inline void
|
|
|
+dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif /* RX_DEFRAG_DO_NOT_REINJECT */
|
|
|
+
|
|
|
/**
|
|
|
* dp_soc_rx_history_attach() - Attach the ring history record buffers
|
|
|
* @soc: DP soc structure
|
|
@@ -4219,23 +4241,23 @@ static void dp_soc_rx_history_attach(struct dp_soc *soc)
|
|
|
uint32_t rx_err_ring_hist_size;
|
|
|
uint32_t rx_reinject_hist_size;
|
|
|
|
|
|
- rx_ring_hist_size = sizeof(*soc->rx_ring_history[i]);
|
|
|
+ rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
|
|
|
rx_err_ring_hist_size = sizeof(*soc->rx_err_ring_history);
|
|
|
rx_reinject_hist_size = sizeof(*soc->rx_reinject_ring_history);
|
|
|
|
|
|
for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
|
|
|
- soc->rx_ring_history[i] = qdf_mem_malloc(rx_ring_hist_size);
|
|
|
+ soc->rx_ring_history[i] = dp_context_alloc_mem(
|
|
|
+ soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
|
|
|
if (soc->rx_ring_history[i])
|
|
|
qdf_atomic_init(&soc->rx_ring_history[i]->index);
|
|
|
}
|
|
|
|
|
|
- soc->rx_err_ring_history = qdf_mem_malloc(rx_err_ring_hist_size);
|
|
|
+ soc->rx_err_ring_history = dp_context_alloc_mem(
|
|
|
+ soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
|
|
|
if (soc->rx_err_ring_history)
|
|
|
qdf_atomic_init(&soc->rx_err_ring_history->index);
|
|
|
|
|
|
- soc->rx_reinject_ring_history = qdf_mem_malloc(rx_reinject_hist_size);
|
|
|
- if (soc->rx_reinject_ring_history)
|
|
|
- qdf_atomic_init(&soc->rx_reinject_ring_history->index);
|
|
|
+ dp_soc_rx_reinject_ring_history_attach(soc);
|
|
|
}
|
|
|
|
|
|
static void dp_soc_rx_history_detach(struct dp_soc *soc)
|
|
@@ -4243,10 +4265,18 @@ static void dp_soc_rx_history_detach(struct dp_soc *soc)
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < MAX_REO_DEST_RINGS; i++)
|
|
|
- qdf_mem_free(soc->rx_ring_history[i]);
|
|
|
+ dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
|
|
|
+ soc->rx_ring_history[i]);
|
|
|
+
|
|
|
+ dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
|
|
|
+ soc->rx_err_ring_history);
|
|
|
|
|
|
- qdf_mem_free(soc->rx_err_ring_history);
|
|
|
- qdf_mem_free(soc->rx_reinject_ring_history);
|
|
|
+ /*
|
|
|
+ * No need for a featurized detach since qdf_mem_free takes
|
|
|
+ * care of NULL pointer.
|
|
|
+ */
|
|
|
+ dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
|
|
|
+ soc->rx_reinject_ring_history);
|
|
|
}
|
|
|
|
|
|
#else
|