فهرست منبع

qcacmn: add RX buffer pool support

Add support for RX buffer pool, this is a pre-allocated pool
of buffers which will be utilized during low memory conditions.

RX buffer replenish will fail when the system is low in memory;
this is okay if the condition is momentary, but if the
system persists in such state for longer duration, then HW will
run out of free buffers and this results in a crash.

RX buffer pool will be helpful in such scenarios. Buffers from
the pool will be given to the HW when the system is running low
in memory.

Change-Id: I7ca1159d858227a0a2021235d24d763e56ac0b27
CRs-Fixed: 2731517
Manikanta Pubbisetty 5 سال پیش
والد
کامیت
ea9aadb12d
2فایلهای تغییر یافته به همراه369 افزوده شده و 0 حذف شده
  1. 206 0
      dp/wifi3.0/dp_rx_buffer_pool.c
  2. 163 0
      dp/wifi3.0/dp_rx_buffer_pool.h

+ 206 - 0
dp/wifi3.0/dp_rx_buffer_pool.c

@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "dp_rx_buffer_pool.h"
+
+#ifndef DP_RX_BUFFER_POOL_SIZE
+#define DP_RX_BUFFER_POOL_SIZE 128
+#endif
+
+#ifndef DP_RX_BUFFER_POOL_ALLOC_THRES
+#define DP_RX_BUFFER_POOL_ALLOC_THRES 1
+#endif
+
+#ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
+bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
+{
+	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
+	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
+	struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
+	qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
+	bool consumed = false;
+
+	if (!bufpool->is_initialized)
+		return consumed;
+
+	/* process only buffers of RXDMA ring */
+	if (qdf_unlikely(rx_desc_pool !=
+			 dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id)))
+		return consumed;
+
+	first_nbuf = nbuf;
+
+	while (nbuf) {
+		next_nbuf = qdf_nbuf_next(nbuf);
+
+		if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
+		    DP_RX_BUFFER_POOL_SIZE))
+			break;
+
+		refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
+					     RX_BUFFER_RESERVATION,
+					     rx_desc_pool->buf_alignment,
+					     FALSE);
+
+		/* Failed to allocate new nbuf, reset and place it back
+		 * in to the pool.
+		 */
+		if (!refill_nbuf) {
+			DP_STATS_INC(pdev,
+				     rx_buffer_pool.num_bufs_consumed, 1);
+			consumed = true;
+			break;
+		}
+
+		/* Successful allocation!! */
+		DP_STATS_INC(pdev,
+			     rx_buffer_pool.num_bufs_alloc_success, 1);
+		qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
+						 refill_nbuf);
+		nbuf = next_nbuf;
+	}
+
+	nbuf = first_nbuf;
+	if (consumed) {
+		/* Free the MSDU/scattered MSDU */
+		while (nbuf) {
+			next_nbuf = qdf_nbuf_next(nbuf);
+			dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
+			nbuf = next_nbuf;
+		}
+	}
+
+	return consumed;
+}
+
+void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
+{
+	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
+	struct rx_desc_pool *rx_desc_pool;
+	struct rx_buff_pool *buff_pool;
+
+	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
+		mac_id = dp_pdev->lmac_id;
+
+	rx_desc_pool = &soc->rx_desc_buf[mac_id];
+	buff_pool = &soc->rx_buff_pool[mac_id];
+
+	if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
+		       DP_RX_BUFFER_POOL_SIZE))
+		return qdf_nbuf_free(nbuf);
+
+	qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
+		       rx_desc_pool->buf_alignment);
+	qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
+}
+
+qdf_nbuf_t
+dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
+			     struct rx_desc_pool *rx_desc_pool,
+			     uint32_t num_available_buffers)
+{
+	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
+	struct rx_buff_pool *buff_pool;
+	struct dp_srng *dp_rxdma_srng;
+	qdf_nbuf_t nbuf;
+
+	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
+		mac_id = dp_pdev->lmac_id;
+
+	buff_pool = &soc->rx_buff_pool[mac_id];
+	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
+
+	nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
+			      RX_BUFFER_RESERVATION,
+			      rx_desc_pool->buf_alignment,
+			      FALSE);
+
+	if (!buff_pool->is_initialized)
+		return nbuf;
+
+	if (qdf_likely(nbuf)) {
+		buff_pool->nbuf_fail_cnt = 0;
+		return nbuf;
+	}
+
+	buff_pool->nbuf_fail_cnt++;
+
+	/* Allocate buffer from the buffer pool */
+	if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
+	    (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
+		nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
+		if (nbuf)
+			DP_STATS_INC(dp_pdev,
+				     rx_buffer_pool.num_pool_bufs_replenish, 1);
+	}
+
+	return nbuf;
+}
+
+void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
+{
+	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
+	struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
+	qdf_nbuf_t nbuf;
+	int i;
+
+	if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
+		dp_err("RX buffer pool support is disabled");
+		buff_pool->is_initialized = false;
+		return;
+	}
+
+	if (buff_pool->is_initialized)
+		return;
+
+	qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
+
+	for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
+		nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
+				      RX_BUFFER_RESERVATION,
+				      rx_desc_pool->buf_alignment, FALSE);
+		if (!nbuf)
+			continue;
+		qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
+						 nbuf);
+	}
+
+	dp_info("RX buffer pool required allocation: %u actual allocation: %u",
+		DP_RX_BUFFER_POOL_SIZE,
+		qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
+
+	buff_pool->is_initialized = true;
+}
+
+void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
+{
+	struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
+	qdf_nbuf_t nbuf;
+
+	if (!buff_pool->is_initialized)
+		return;
+
+	dp_info("buffers in the RX buffer pool during deinit: %u",
+		qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
+
+	while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
+		qdf_nbuf_free(nbuf);
+
+	buff_pool->is_initialized = false;
+}
+#endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */

+ 163 - 0
dp/wifi3.0/dp_rx_buffer_pool.h

@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DP_RX_BUFFER_POOL_H_
+#define _DP_RX_BUFFER_POOL_H_
+
+#include "dp_types.h"
+#include "qdf_nbuf.h"
+#include "qdf_module.h"
+#include "athdefs.h"
+#include "wlan_cfg.h"
+#include "dp_internal.h"
+#include "dp_rx.h"
+#include "dp_rx_mon.h"
+
+#ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
+/**
+ * dp_rx_buffer_pool_init() - Initialize emergency buffer pool
+ * @soc: SoC handle
+ * @mac_id: MAC ID
+ *
+ * Return: None
+ */
+void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id);
+
+/**
+ * dp_rx_buffer_pool_deinit() - De-Initialize emergency buffer pool
+ * @soc: SoC handle
+ * @mac_id: MAC ID
+ *
+ * Return: None
+ */
+void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id);
+
+/**
+ * dp_rx_buffer_pool_refill() - Process the rx nbuf list and
+ * refill the emergency buffer pool
+ * @soc: SoC handle
+ * @nbuf: RX buffer
+ * @mac_id: MAC ID
+ *
+ * Return: Whether the rx nbuf is consumed into the pool or not.
+ */
+bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id);
+
+/**
+ * dp_rx_buffer_pool_nbuf_free() - Free the nbuf or queue it
+ * back into the pool
+ * @soc: SoC handle
+ * @nbuf: RX buffer
+ * @mac_id: MAC ID
+ *
+ * Return: None
+ */
+void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf,
+				 u8 mac_id);
+
+/**
+ * dp_rx_buffer_pool_nbuf_alloc() - Allocate nbuf for buffer replenish,
+ * give nbuf from the pool if allocation fails
+ * @soc: SoC handle
+ * @mac_id: MAC ID
+ * @rx_desc_pool: RX descriptor pool
+ * @num_available_buffers: number of available buffers in the ring.
+ *
+ * Return: nbuf
+ */
+qdf_nbuf_t dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
+					struct rx_desc_pool *rx_desc_pool,
+					uint32_t num_available_buffers);
+#else
+/**
+ * dp_rx_buffer_pool_init() - Initialize emergency buffer pool
+ * @soc: SoC handle
+ * @mac_id: MAC ID
+ *
+ * Return: None
+ */
+static inline
+void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
+{
+	soc->rx_buff_pool[mac_id].is_initialized = false;
+}
+
+/**
+ * dp_rx_buffer_pool_deinit() - De-Initialize emergency buffer pool
+ * @soc: SoC handle
+ * @mac_id: MAC ID
+ *
+ * Return: None
+ */
+static inline
+void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
+{
+}
+
+/**
+ * dp_rx_buffer_pool_refill() - Process the rx nbuf list and
+ * refill the emergency buffer pool
+ * @soc: SoC handle
+ * @nbuf: RX buffer
+ * @mac_id: MAC ID
+ *
+ * Return: Whether the rx nbuf is consumed into the pool or not.
+ */
+static inline
+bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
+{
+	return false;
+}
+
+/**
+ * dp_rx_buffer_pool_nbuf_free() - Free the nbuf or queue it
+ * back into the pool
+ * @soc: SoC handle
+ * @nbuf: RX buffer
+ * @mac_id: MAC ID
+ *
+ * Return: None
+ */
+static inline
+void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf,
+				 u8 mac_id)
+{
+	qdf_nbuf_free(nbuf);
+}
+
+/**
+ * dp_rx_buffer_pool_nbuf_alloc() - Allocate nbuf for buffer replenish,
+ * give nbuf from the pool if allocation fails
+ * @soc: SoC handle
+ * @mac_id: MAC ID
+ * @rx_desc_pool: RX descriptor pool
+ * @num_available_buffers: number of available buffers in the ring.
+ *
+ * Return: nbuf
+ */
+static inline qdf_nbuf_t
+dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
+			     struct rx_desc_pool *rx_desc_pool,
+			     uint32_t num_available_buffers)
+{
+	return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
+			      RX_BUFFER_RESERVATION,
+			      rx_desc_pool->buf_alignment, FALSE);
+}
+#endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
+#endif /* _DP_RX_BUFFER_POOL_H_ */