qcacmn: Add RX prealloc pool for replenishing RX buffers
Add and initialize a preallocated pool of buffers which can be used to replenish RX buffers. During replenish, the buffers from the pool are used instead of allocating/mapping a new buffer in the softirq context. This preallocated pool will be refilled in thread context. Change-Id: Idf3bd7d25c5d57ddba105ccd8fab672c26a184f1 CRs-Fixed: 2869345
This commit is contained in:

committed by
snandini

parent
71e3244d46
commit
595bc84a39
@@ -665,6 +665,29 @@ cdp_register_rx_mic_error_ind_handler(ol_txrx_soc_handle soc,
|
|||||||
soc->ol_ops->rx_mic_error = rx_mic_cb;
|
soc->ol_ops->rx_mic_error = rx_mic_cb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef void (*rx_refill_thread_sched_cb)(ol_txrx_soc_handle soc);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cdp_register_rx_refill_thread_sched_handler() - API to register RX refill
|
||||||
|
* thread schedule handler
|
||||||
|
*
|
||||||
|
* @soc: soc handle
|
||||||
|
*
|
||||||
|
* Return: void
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
cdp_register_rx_refill_thread_sched_handler(ol_txrx_soc_handle soc,
|
||||||
|
rx_refill_thread_sched_cb rx_sched_cb)
|
||||||
|
{
|
||||||
|
if (!soc || !soc->ol_ops) {
|
||||||
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||||
|
"%s invalid instance", __func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
soc->ol_ops->dp_rx_sched_refill_thread = rx_sched_cb;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cdp_pdev_reset_driver_del_ack() - reset driver TCP delayed ack flag
|
* cdp_pdev_reset_driver_del_ack() - reset driver TCP delayed ack flag
|
||||||
* @soc: data path soc handle
|
* @soc: data path soc handle
|
||||||
|
@@ -1147,6 +1147,7 @@ struct ol_if_ops {
|
|||||||
QDF_STATUS(*nss_stats_clr)(struct cdp_ctrl_objmgr_psoc *psoc,
|
QDF_STATUS(*nss_stats_clr)(struct cdp_ctrl_objmgr_psoc *psoc,
|
||||||
uint8_t vdev_id);
|
uint8_t vdev_id);
|
||||||
int (*dp_rx_get_pending)(ol_txrx_soc_handle soc);
|
int (*dp_rx_get_pending)(ol_txrx_soc_handle soc);
|
||||||
|
void (*dp_rx_sched_refill_thread)(ol_txrx_soc_handle soc);
|
||||||
/* TODO: Add any other control path calls required to OL_IF/WMA layer */
|
/* TODO: Add any other control path calls required to OL_IF/WMA layer */
|
||||||
#ifdef QCA_SUPPORT_WDS_EXTENDED
|
#ifdef QCA_SUPPORT_WDS_EXTENDED
|
||||||
void (*rx_wds_ext_peer_learn)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
|
void (*rx_wds_ext_peer_learn)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
|
||||||
|
@@ -1968,6 +1968,11 @@ struct cdp_pdev_stats {
|
|||||||
uint64_t num_pool_bufs_replenish;
|
uint64_t num_pool_bufs_replenish;
|
||||||
uint64_t num_bufs_alloc_success;
|
uint64_t num_bufs_alloc_success;
|
||||||
} rx_buffer_pool;
|
} rx_buffer_pool;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
uint64_t num_bufs_refilled;
|
||||||
|
uint64_t num_bufs_allocated;
|
||||||
|
} rx_refill_buff_pool;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* struct cdp_peer_hmwds_ast_add_status - hmwds peer ast add status
|
/* struct cdp_peer_hmwds_ast_add_status - hmwds peer ast add status
|
||||||
|
@@ -230,11 +230,8 @@ dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
|
|||||||
return QDF_STATUS_E_NOMEM;
|
return QDF_STATUS_E_NOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
|
ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
|
||||||
(nbuf_frag_info_t->virt_addr).nbuf,
|
nbuf_frag_info_t);
|
||||||
QDF_DMA_FROM_DEVICE,
|
|
||||||
rx_desc_pool->buf_size);
|
|
||||||
|
|
||||||
if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
|
if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
|
||||||
dp_rx_buffer_pool_nbuf_free(dp_soc,
|
dp_rx_buffer_pool_nbuf_free(dp_soc,
|
||||||
(nbuf_frag_info_t->virt_addr).nbuf, mac_id);
|
(nbuf_frag_info_t->virt_addr).nbuf, mac_id);
|
||||||
@@ -246,11 +243,6 @@ dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
|
|||||||
nbuf_frag_info_t->paddr =
|
nbuf_frag_info_t->paddr =
|
||||||
qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
|
qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
|
||||||
|
|
||||||
dp_ipa_handle_rx_buf_smmu_mapping(dp_soc,
|
|
||||||
(qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
|
|
||||||
rx_desc_pool->buf_size,
|
|
||||||
true);
|
|
||||||
|
|
||||||
ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
|
ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
|
||||||
&nbuf_frag_info_t->paddr,
|
&nbuf_frag_info_t->paddr,
|
||||||
rx_desc_pool);
|
rx_desc_pool);
|
||||||
@@ -415,6 +407,8 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
|||||||
|
|
||||||
hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
|
hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
|
||||||
|
|
||||||
|
dp_rx_schedule_refill_thread(dp_soc);
|
||||||
|
|
||||||
dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
|
dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
|
||||||
count, num_desc_to_free);
|
count, num_desc_to_free);
|
||||||
|
|
||||||
|
@@ -17,13 +17,22 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "dp_rx_buffer_pool.h"
|
#include "dp_rx_buffer_pool.h"
|
||||||
|
#include "dp_ipa.h"
|
||||||
|
|
||||||
#ifndef DP_RX_BUFFER_POOL_SIZE
|
#ifndef DP_RX_BUFFER_POOL_SIZE
|
||||||
#define DP_RX_BUFFER_POOL_SIZE 128
|
#define DP_RX_BUFFER_POOL_SIZE 128
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef DP_RX_BUFFER_POOL_ALLOC_THRES
|
#ifndef DP_RX_REFILL_BUFF_POOL_SIZE
|
||||||
#define DP_RX_BUFFER_POOL_ALLOC_THRES 1
|
#define DP_RX_REFILL_BUFF_POOL_SIZE 2048
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef DP_RX_REFILL_BUFF_POOL_BURST
|
||||||
|
#define DP_RX_REFILL_BUFF_POOL_BURST 64
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef DP_RX_BUFF_POOL_ALLOC_THRES
|
||||||
|
#define DP_RX_BUFF_POOL_ALLOC_THRES 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
|
#ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
|
||||||
@@ -109,6 +118,96 @@ void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
|
|||||||
qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
|
qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
struct rx_desc_pool *rx_desc_pool;
|
||||||
|
struct rx_refill_buff_pool *buff_pool;
|
||||||
|
struct dp_pdev *dp_pdev;
|
||||||
|
qdf_nbuf_t nbuf;
|
||||||
|
QDF_STATUS ret;
|
||||||
|
int count, i;
|
||||||
|
qdf_nbuf_t nbuf_head;
|
||||||
|
qdf_nbuf_t nbuf_tail;
|
||||||
|
uint32_t num_req_refill;
|
||||||
|
|
||||||
|
if (!soc)
|
||||||
|
return;
|
||||||
|
|
||||||
|
buff_pool = &soc->rx_refill_buff_pool;
|
||||||
|
if (!buff_pool->is_initialized)
|
||||||
|
return;
|
||||||
|
|
||||||
|
rx_desc_pool = &soc->rx_desc_buf[0];
|
||||||
|
dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
|
||||||
|
|
||||||
|
num_req_refill = buff_pool->max_bufq_len - buff_pool->bufq_len;
|
||||||
|
|
||||||
|
while (num_req_refill) {
|
||||||
|
if (num_req_refill > DP_RX_REFILL_BUFF_POOL_BURST)
|
||||||
|
num_req_refill = DP_RX_REFILL_BUFF_POOL_BURST;
|
||||||
|
|
||||||
|
count = 0;
|
||||||
|
nbuf_head = NULL;
|
||||||
|
nbuf_tail = NULL;
|
||||||
|
for (i = 0; i < num_req_refill; i++) {
|
||||||
|
nbuf = qdf_nbuf_alloc(soc->osdev,
|
||||||
|
rx_desc_pool->buf_size,
|
||||||
|
RX_BUFFER_RESERVATION,
|
||||||
|
rx_desc_pool->buf_alignment,
|
||||||
|
FALSE);
|
||||||
|
if (!nbuf)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
|
||||||
|
QDF_DMA_FROM_DEVICE,
|
||||||
|
rx_desc_pool->buf_size);
|
||||||
|
if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
|
||||||
|
qdf_nbuf_free(nbuf);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
|
||||||
|
rx_desc_pool->buf_size,
|
||||||
|
true);
|
||||||
|
DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
if (count) {
|
||||||
|
qdf_spin_lock_bh(&buff_pool->bufq_lock);
|
||||||
|
DP_RX_MERGE_TWO_LIST(buff_pool->buf_head,
|
||||||
|
buff_pool->buf_tail,
|
||||||
|
nbuf_head, nbuf_tail);
|
||||||
|
buff_pool->bufq_len += count;
|
||||||
|
|
||||||
|
num_req_refill = buff_pool->max_bufq_len -
|
||||||
|
buff_pool->bufq_len;
|
||||||
|
qdf_spin_unlock_bh(&buff_pool->bufq_lock);
|
||||||
|
|
||||||
|
DP_STATS_INC(dp_pdev,
|
||||||
|
rx_refill_buff_pool.num_bufs_refilled,
|
||||||
|
count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
qdf_nbuf_t nbuf = NULL;
|
||||||
|
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||||
|
|
||||||
|
if (!buff_pool->is_initialized || !buff_pool->bufq_len)
|
||||||
|
return nbuf;
|
||||||
|
|
||||||
|
qdf_spin_lock_bh(&buff_pool->bufq_lock);
|
||||||
|
nbuf = buff_pool->buf_head;
|
||||||
|
buff_pool->buf_head = qdf_nbuf_next(buff_pool->buf_head);
|
||||||
|
qdf_nbuf_set_next(nbuf, NULL);
|
||||||
|
buff_pool->bufq_len--;
|
||||||
|
qdf_spin_unlock_bh(&buff_pool->bufq_lock);
|
||||||
|
|
||||||
|
return nbuf;
|
||||||
|
}
|
||||||
|
|
||||||
qdf_nbuf_t
|
qdf_nbuf_t
|
||||||
dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
||||||
struct rx_desc_pool *rx_desc_pool,
|
struct rx_desc_pool *rx_desc_pool,
|
||||||
@@ -119,6 +218,13 @@ dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
|||||||
struct dp_srng *dp_rxdma_srng;
|
struct dp_srng *dp_rxdma_srng;
|
||||||
qdf_nbuf_t nbuf;
|
qdf_nbuf_t nbuf;
|
||||||
|
|
||||||
|
nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
|
||||||
|
if (nbuf) {
|
||||||
|
DP_STATS_INC(dp_pdev,
|
||||||
|
rx_refill_buff_pool.num_bufs_allocated, 1);
|
||||||
|
return nbuf;
|
||||||
|
}
|
||||||
|
|
||||||
if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
|
if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
|
||||||
mac_id = dp_pdev->lmac_id;
|
mac_id = dp_pdev->lmac_id;
|
||||||
|
|
||||||
@@ -152,6 +258,81 @@ dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
|||||||
return nbuf;
|
return nbuf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QDF_STATUS
|
||||||
|
dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
|
||||||
|
struct rx_desc_pool *rx_desc_pool,
|
||||||
|
struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
|
||||||
|
{
|
||||||
|
QDF_STATUS ret = QDF_STATUS_SUCCESS;
|
||||||
|
|
||||||
|
if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)) {
|
||||||
|
ret = qdf_nbuf_map_nbytes_single(soc->osdev,
|
||||||
|
(nbuf_frag_info_t->virt_addr).nbuf,
|
||||||
|
QDF_DMA_FROM_DEVICE,
|
||||||
|
rx_desc_pool->buf_size);
|
||||||
|
|
||||||
|
if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
dp_ipa_handle_rx_buf_smmu_mapping(soc,
|
||||||
|
(qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
|
||||||
|
rx_desc_pool->buf_size,
|
||||||
|
true);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
|
||||||
|
{
|
||||||
|
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||||
|
qdf_nbuf_t nbuf;
|
||||||
|
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||||
|
QDF_STATUS ret;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
|
||||||
|
dp_err("RX refill buffer pool support is disabled");
|
||||||
|
buff_pool->is_initialized = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
buff_pool->bufq_len = 0;
|
||||||
|
buff_pool->buf_head = NULL;
|
||||||
|
buff_pool->buf_tail = NULL;
|
||||||
|
buff_pool->max_bufq_len = DP_RX_REFILL_BUFF_POOL_SIZE;
|
||||||
|
qdf_spinlock_create(&buff_pool->bufq_lock);
|
||||||
|
|
||||||
|
for (i = 0; i < buff_pool->max_bufq_len; i++) {
|
||||||
|
nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
|
||||||
|
RX_BUFFER_RESERVATION,
|
||||||
|
rx_desc_pool->buf_alignment, FALSE);
|
||||||
|
if (!nbuf)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
|
||||||
|
QDF_DMA_FROM_DEVICE,
|
||||||
|
rx_desc_pool->buf_size);
|
||||||
|
if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
|
||||||
|
qdf_nbuf_free(nbuf);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
|
||||||
|
rx_desc_pool->buf_size,
|
||||||
|
true);
|
||||||
|
DP_RX_LIST_APPEND(buff_pool->buf_head,
|
||||||
|
buff_pool->buf_tail, nbuf);
|
||||||
|
buff_pool->bufq_len++;
|
||||||
|
}
|
||||||
|
|
||||||
|
dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
|
||||||
|
buff_pool->max_bufq_len,
|
||||||
|
buff_pool->bufq_len);
|
||||||
|
|
||||||
|
buff_pool->is_initialized = true;
|
||||||
|
}
|
||||||
|
|
||||||
void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
|
void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
|
||||||
{
|
{
|
||||||
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||||
@@ -159,6 +340,8 @@ void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
|
|||||||
qdf_nbuf_t nbuf;
|
qdf_nbuf_t nbuf;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
dp_rx_refill_buff_pool_init(soc, mac_id);
|
||||||
|
|
||||||
if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
|
if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
|
||||||
dp_err("RX buffer pool support is disabled");
|
dp_err("RX buffer pool support is disabled");
|
||||||
buff_pool->is_initialized = false;
|
buff_pool->is_initialized = false;
|
||||||
@@ -187,11 +370,35 @@ void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
|
|||||||
buff_pool->is_initialized = true;
|
buff_pool->is_initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
|
||||||
|
{
|
||||||
|
struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
|
||||||
|
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||||
|
qdf_nbuf_t nbuf;
|
||||||
|
|
||||||
|
if (!buff_pool->is_initialized)
|
||||||
|
return;
|
||||||
|
|
||||||
|
while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
|
||||||
|
dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
|
||||||
|
rx_desc_pool->buf_size,
|
||||||
|
false);
|
||||||
|
qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
|
||||||
|
QDF_DMA_BIDIRECTIONAL,
|
||||||
|
rx_desc_pool->buf_size);
|
||||||
|
qdf_nbuf_free(nbuf);
|
||||||
|
}
|
||||||
|
|
||||||
|
buff_pool->is_initialized = false;
|
||||||
|
}
|
||||||
|
|
||||||
void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
|
void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
|
||||||
{
|
{
|
||||||
struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
|
struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
|
||||||
qdf_nbuf_t nbuf;
|
qdf_nbuf_t nbuf;
|
||||||
|
|
||||||
|
dp_rx_refill_buff_pool_deinit(soc, mac_id);
|
||||||
|
|
||||||
if (!buff_pool->is_initialized)
|
if (!buff_pool->is_initialized)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
|
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission to use, copy, modify, and/or distribute this software for
|
* Permission to use, copy, modify, and/or distribute this software for
|
||||||
* any purpose with or without fee is hereby granted, provided that the
|
* any purpose with or without fee is hereby granted, provided that the
|
||||||
@@ -83,6 +83,32 @@ void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
|||||||
qdf_nbuf_t dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
qdf_nbuf_t dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
||||||
struct rx_desc_pool *rx_desc_pool,
|
struct rx_desc_pool *rx_desc_pool,
|
||||||
uint32_t num_available_buffers);
|
uint32_t num_available_buffers);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_rx_buffer_pool_nbuf_map() - Map nbuff for buffer replenish
|
||||||
|
* @soc: SoC handle
|
||||||
|
* @rx_desc_pool: RX descriptor pool
|
||||||
|
* @nbuf_frag_info_t: nbuf frag info
|
||||||
|
*
|
||||||
|
* Return: nbuf
|
||||||
|
*/
|
||||||
|
QDF_STATUS
|
||||||
|
dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
|
||||||
|
struct rx_desc_pool *rx_desc_pool,
|
||||||
|
struct dp_rx_nbuf_frag_info *nbuf_frag_info_t);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_rx_schedule_refill_thread() - Schedule RX refill thread to enqueue
|
||||||
|
* buffers in refill pool
|
||||||
|
* @soc: SoC handle
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
if (soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread)
|
||||||
|
soc->cdp_soc.ol_ops->dp_rx_sched_refill_thread(dp_soc_to_cdp_soc_t(soc));
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
/**
|
/**
|
||||||
* dp_rx_buffer_pool_init() - Initialize emergency buffer pool
|
* dp_rx_buffer_pool_init() - Initialize emergency buffer pool
|
||||||
@@ -159,5 +185,26 @@ dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
|
|||||||
RX_BUFFER_RESERVATION,
|
RX_BUFFER_RESERVATION,
|
||||||
rx_desc_pool->buf_alignment, FALSE);
|
rx_desc_pool->buf_alignment, FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_rx_buffer_pool_nbuf_map() - Map nbuff for buffer replenish
|
||||||
|
* @soc: SoC handle
|
||||||
|
* @rx_desc_pool: RX descriptor pool
|
||||||
|
* @nbuf_frag_info_t: nbuf frag info
|
||||||
|
*
|
||||||
|
* Return: nbuf
|
||||||
|
*/
|
||||||
|
static inline QDF_STATUS
|
||||||
|
dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
|
||||||
|
struct rx_desc_pool *rx_desc_pool,
|
||||||
|
struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
|
||||||
|
{
|
||||||
|
return qdf_nbuf_map_nbytes_single(soc->osdev,
|
||||||
|
(nbuf_frag_info_t->virt_addr).nbuf,
|
||||||
|
QDF_DMA_FROM_DEVICE,
|
||||||
|
rx_desc_pool->buf_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dp_rx_schedule_refill_thread(struct dp_soc *soc) { }
|
||||||
#endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
|
#endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
|
||||||
#endif /* _DP_RX_BUFFER_POOL_H_ */
|
#endif /* _DP_RX_BUFFER_POOL_H_ */
|
||||||
|
@@ -1138,6 +1138,15 @@ struct rx_buff_pool {
|
|||||||
bool is_initialized;
|
bool is_initialized;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct rx_refill_buff_pool {
|
||||||
|
qdf_nbuf_t buf_head;
|
||||||
|
qdf_nbuf_t buf_tail;
|
||||||
|
qdf_spinlock_t bufq_lock;
|
||||||
|
uint32_t bufq_len;
|
||||||
|
uint32_t max_bufq_len;
|
||||||
|
bool is_initialized;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The logic for get current index of these history is dependent on this
|
* The logic for get current index of these history is dependent on this
|
||||||
* value being power of 2.
|
* value being power of 2.
|
||||||
@@ -1727,6 +1736,7 @@ struct dp_soc {
|
|||||||
|
|
||||||
/* RX buffer params */
|
/* RX buffer params */
|
||||||
struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
|
struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
|
||||||
|
struct rx_refill_buff_pool rx_refill_buff_pool;
|
||||||
/* Save recent operation related variable */
|
/* Save recent operation related variable */
|
||||||
struct dp_last_op_info last_op_info;
|
struct dp_last_op_info last_op_info;
|
||||||
TAILQ_HEAD(, dp_peer) inactive_peer_list;
|
TAILQ_HEAD(, dp_peer) inactive_peer_list;
|
||||||
@@ -3120,4 +3130,9 @@ QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
|
|||||||
uint32_t mac_id);
|
uint32_t mac_id);
|
||||||
void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
|
void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
|
||||||
|
|
||||||
|
#ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
|
||||||
|
void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc);
|
||||||
|
#else
|
||||||
|
static inline void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc) {}
|
||||||
|
#endif
|
||||||
#endif /* _DP_TYPES_H_ */
|
#endif /* _DP_TYPES_H_ */
|
||||||
|
Reference in New Issue
Block a user