qcacmn: Pre-alloc rx rings history when feature is enabled

Currently the rx rings history is allocated dynamically
on load time. The memory requirement for saving these
history are more than a page (order 5 to 6 allocations).
Such big memory allocation can fail due to various
reasons, one of them being memory fragmentation.

Fix this by pre-allocating the rx ring history memory.
Also allocate the rx reinject history memory when the
HW accelerated path is used.

Change-Id: Id957cd5df91a2ca7f182dea691a0557b4e386f55
CRs-Fixed: 2844388
Cette révision appartient à :
Rakesh Pillai
2020-12-31 13:52:44 +05:30
révisé par snandini
Parent a7cc077a26
révision f1aa992998
2 fichiers modifiés avec 47 ajouts et 11 suppressions

Voir le fichier

@@ -1493,7 +1493,7 @@ void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
DP_PDEV_TYPE,
ctxt_type,
vaddr);
} else {
dp_warn("dp_prealloc_get_context null!");
@@ -4200,6 +4200,28 @@ static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
}
#ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
#ifndef RX_DEFRAG_DO_NOT_REINJECT
/**
* dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
* history.
* @soc: DP soc handle
*
* Return: None
*/
static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
{
soc->rx_reinject_ring_history = dp_context_alloc_mem(
soc, DP_RX_REINJECT_RING_HIST_TYPE, rx_ring_hist_size);
if (soc->rx_reinject_ring_history)
qdf_atomic_init(&soc->rx_reinject_ring_history->index);
}
#else /* RX_DEFRAG_DO_NOT_REINJECT */
static inline void
dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
{
}
#endif /* RX_DEFRAG_DO_NOT_REINJECT */
/**
* dp_soc_rx_history_attach() - Attach the ring history record buffers
* @soc: DP soc structure
@@ -4219,23 +4241,23 @@ static void dp_soc_rx_history_attach(struct dp_soc *soc)
uint32_t rx_err_ring_hist_size;
uint32_t rx_reinject_hist_size;
rx_ring_hist_size = sizeof(*soc->rx_ring_history[i]);
rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
rx_err_ring_hist_size = sizeof(*soc->rx_err_ring_history);
rx_reinject_hist_size = sizeof(*soc->rx_reinject_ring_history);
for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
soc->rx_ring_history[i] = qdf_mem_malloc(rx_ring_hist_size);
soc->rx_ring_history[i] = dp_context_alloc_mem(
soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
if (soc->rx_ring_history[i])
qdf_atomic_init(&soc->rx_ring_history[i]->index);
}
soc->rx_err_ring_history = qdf_mem_malloc(rx_err_ring_hist_size);
soc->rx_err_ring_history = dp_context_alloc_mem(
soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
if (soc->rx_err_ring_history)
qdf_atomic_init(&soc->rx_err_ring_history->index);
soc->rx_reinject_ring_history = qdf_mem_malloc(rx_reinject_hist_size);
if (soc->rx_reinject_ring_history)
qdf_atomic_init(&soc->rx_reinject_ring_history->index);
dp_soc_rx_reinject_ring_history_attach(soc);
}
static void dp_soc_rx_history_detach(struct dp_soc *soc)
@@ -4243,10 +4265,18 @@ static void dp_soc_rx_history_detach(struct dp_soc *soc)
int i;
for (i = 0; i < MAX_REO_DEST_RINGS; i++)
qdf_mem_free(soc->rx_ring_history[i]);
dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
soc->rx_ring_history[i]);
qdf_mem_free(soc->rx_err_ring_history);
qdf_mem_free(soc->rx_reinject_ring_history);
dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
soc->rx_err_ring_history);
/*
* No need for a featurized detach since qdf_mem_free takes
* care of NULL pointer.
*/
dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
soc->rx_reinject_ring_history);
}
#else

Voir le fichier

@@ -370,9 +370,15 @@ struct dp_rx_nbuf_frag_info {
/**
* enum dp_ctxt - context type
* @DP_PDEV_TYPE: PDEV context
* @DP_RX_RING_HIST_TYPE: Datapath rx ring history
* @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
* @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
*/
enum dp_ctxt_type {
DP_PDEV_TYPE
DP_PDEV_TYPE,
DP_RX_RING_HIST_TYPE,
DP_RX_ERR_RING_HIST_TYPE,
DP_RX_REINJECT_RING_HIST_TYPE,
};
/**