qcacmn: Add HIF CE RX support to WBUFF

Currently, WBUFF is being used only for WMI TX buffers.
Add HIF CE RX buffers support to WBUFF in an effort to re-use
the copy engine RX buffers instead of freeing/allocating buffers
for every CE RX transaction. This fixes the problem of CE RX
memory fragmentation.

Change-Id: Id9c043a5c5d0882a7994fa03cd8c335555d46b8d
CRs-Fixed: 3534539
This commit is contained in:
Manikanta Pubbisetty
2023-06-15 16:02:25 +05:30
committed by Rahul Choudhary
parent 9b27c6e104
commit cbe8170798
8 changed files with 240 additions and 37 deletions

View File

@@ -43,6 +43,7 @@
#ifdef CONFIG_SAWF_DEF_QUEUES
#include <dp_sawf_htt.h>
#endif
#include <wbuff.h>
#define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
@@ -3685,6 +3686,20 @@ dp_htt_peer_ext_evt(struct htt_soc *soc, uint32_t *msg_word)
}
#endif
#ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
static void dp_htt_rx_nbuf_free(qdf_nbuf_t nbuf)
{
nbuf = wbuff_buff_put(nbuf);
if (nbuf)
qdf_nbuf_free(nbuf);
}
#else
static inline void dp_htt_rx_nbuf_free(qdf_nbuf_t nbuf)
{
return qdf_nbuf_free(nbuf);
}
#endif
void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
{
struct htt_soc *soc = (struct htt_soc *) context;
@@ -3698,7 +3713,7 @@ void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
if (pkt->Status != QDF_STATUS_E_CANCELED)
soc->stats.htc_err_cnt++;
qdf_nbuf_free(htt_t2h_msg);
dp_htt_rx_nbuf_free(htt_t2h_msg);
return;
}
@@ -4150,7 +4165,7 @@ void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
/* Free the indication buffer */
if (free_buf)
qdf_nbuf_free(htt_t2h_msg);
dp_htt_rx_nbuf_free(htt_t2h_msg);
}
enum htc_send_full_action

View File

@@ -40,6 +40,7 @@
#include "ce_tasklet.h"
#include "qdf_module.h"
#include "qdf_ssr_driver_dump.h"
#include <wbuff.h>
#define CE_POLL_TIMEOUT 10 /* ms */
@@ -3300,6 +3301,141 @@ hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
}
#ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
#define HIF_CE_RX_NBUF_WMI_POOL_SIZE 32
static qdf_nbuf_t hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
{
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
qdf_nbuf_t nbuf;
nbuf = wbuff_buff_get(scn->wbuff_handle, ce_id, 0, __func__,
__LINE__);
if (!nbuf)
nbuf = qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz,
0, 4, false);
if (!nbuf)
return NULL;
return nbuf;
}
static void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
{
nbuf = wbuff_buff_put(nbuf);
if (nbuf)
qdf_nbuf_free(nbuf);
}
static int
hif_calc_wbuff_pool_size(struct hif_softc *scn, struct CE_state *ce_state)
{
int ul_is_polled, dl_is_polled;
bool is_wmi_svc, wmi_diag_svc;
uint8_t ul_pipe, dl_pipe;
int pool_size;
int status;
int ce_id;
if (!ce_state)
return 0;
ce_id = ce_state->id;
status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
&ul_pipe, &dl_pipe,
&ul_is_polled, &dl_is_polled);
is_wmi_svc = !status && (dl_pipe == ce_id);
status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
WMI_CONTROL_DIAG_SVC,
&ul_pipe, &dl_pipe,
&ul_is_polled, &dl_is_polled);
wmi_diag_svc = !status;
if (is_wmi_svc && !wmi_diag_svc)
pool_size = ce_state->dest_ring->nentries +
HIF_CE_RX_NBUF_WMI_POOL_SIZE;
else if (is_wmi_svc && wmi_diag_svc)
pool_size = ce_state->dest_ring->nentries +
HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
else if (!is_wmi_svc && wmi_diag_svc && ce_id == dl_pipe)
pool_size = ce_state->dest_ring->nentries +
HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
else
pool_size = ce_state->dest_ring->nentries;
return pool_size;
}
static void hif_ce_rx_wbuff_register(struct hif_softc *scn)
{
struct wbuff_alloc_request wbuff_alloc[CE_COUNT_MAX] = {0};
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
struct HIF_CE_pipe_info *pipe_info;
struct CE_state *ce_state;
int ce_id;
for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
pipe_info = &hif_state->pipe_info[ce_id];
ce_state = scn->ce_id_to_state[ce_id];
if (!pipe_info->buf_sz)
continue;
/* Only RX CEs need WBUFF registration. recv_bufs_needed
* contains valid count for RX CEs during init time.
*/
if (!atomic_read(&pipe_info->recv_bufs_needed))
continue;
if (ce_is_fastpath_enabled(scn) &&
ce_state->htt_rx_data)
continue;
wbuff_alloc[ce_id].pool_id = ce_id;
wbuff_alloc[ce_id].buffer_size = pipe_info->buf_sz;
wbuff_alloc[ce_id].pool_size =
hif_calc_wbuff_pool_size(scn, ce_state);
}
scn->wbuff_handle =
wbuff_module_register(wbuff_alloc, CE_COUNT_MAX, 0, 4,
WBUFF_MODULE_CE_RX);
}
static void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
{
wbuff_module_deregister(scn->wbuff_handle);
scn->wbuff_handle = NULL;
}
#else
static inline qdf_nbuf_t
hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
{
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
return qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz, 0, 4, false);
}
static inline void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
{
return qdf_nbuf_free(nbuf);
}
static inline void hif_ce_rx_wbuff_register(struct hif_softc *scn)
{
}
static inline void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
{
}
#endif /* WLAN_FEATURE_CE_RX_BUFFER_REUSE */
/**
* hif_ce_do_recv(): send message from copy engine to upper layers
* @msg_callbacks: structure containing callback and callback context
@@ -3322,7 +3458,7 @@ static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
netbuf, pipe_info->pipe_num);
} else {
hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes);
qdf_nbuf_free(netbuf);
hif_ce_rx_nbuf_free(netbuf);
}
}
@@ -3350,7 +3486,7 @@ hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
atomic_inc(&pipe_info->recv_bufs_needed);
hif_post_recv_buffers_for_pipe(pipe_info);
if (scn->target_status == TARGET_STATUS_RESET)
qdf_nbuf_free(transfer_context);
hif_ce_rx_nbuf_free(transfer_context);
else
hif_ce_do_recv(msg_callbacks, transfer_context,
nbytes, pipe_info);
@@ -3599,7 +3735,7 @@ QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
hif_record_ce_desc_event(scn, ce_id,
HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
0, 0);
nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
nbuf = hif_ce_rx_nbuf_alloc(scn, ce_id);
if (!nbuf) {
hif_post_recv_buffers_failure(pipe_info, nbuf,
&pipe_info->nbuf_alloc_err_count,
@@ -3624,7 +3760,7 @@ QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
&pipe_info->nbuf_dma_err_count,
HIF_RX_NBUF_MAP_FAILURE,
"HIF_RX_NBUF_MAP_FAILURE");
qdf_nbuf_free(nbuf);
hif_ce_rx_nbuf_free(nbuf);
return status;
}
@@ -3643,7 +3779,7 @@ QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
QDF_DMA_FROM_DEVICE);
qdf_nbuf_free(nbuf);
hif_ce_rx_nbuf_free(nbuf);
return status;
}
@@ -3813,6 +3949,8 @@ QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
if (hif_completion_thread_startup(hif_state))
return QDF_STATUS_E_FAILURE;
hif_ce_rx_wbuff_register(scn);
/* enable buffer cleanup */
hif_state->started = true;
@@ -3865,7 +4003,7 @@ static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
if (netbuf) {
qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
QDF_DMA_FROM_DEVICE);
qdf_nbuf_free(netbuf);
hif_ce_rx_nbuf_free(netbuf);
}
}
}
@@ -4002,6 +4140,7 @@ void hif_ce_stop(struct hif_softc *scn)
}
hif_buffer_cleanup(hif_state);
hif_ce_rx_wbuff_deregister(scn);
for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
struct HIF_CE_pipe_info *pipe_info;

View File

@@ -499,6 +499,9 @@ struct hif_softc {
struct qdf_mem_multi_page_t dl_recv_pages;
int dl_recv_pipe_num;
#endif
#ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
struct wbuff_mod_handle *wbuff_handle;
#endif
#ifdef FEATURE_HIF_DELAYED_REG_WRITE
/* queue(array) to hold register writes */
struct hif_reg_write_q_elem *reg_write_queue;

View File

@@ -21,6 +21,7 @@
#include "htc_internal.h"
#include "htc_credit_history.h"
#include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <wbuff.h>
#if defined(WLAN_DEBUG) || defined(DEBUG)
void debug_dump_bytes(uint8_t *buffer, uint16_t length, char *pDescription)
@@ -71,6 +72,20 @@ static A_STATUS htc_process_trailer(HTC_TARGET *target,
uint8_t *pBuffer,
int Length, HTC_ENDPOINT_ID FromEndpoint);
#ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
static void htc_rx_nbuf_free(qdf_nbuf_t nbuf)
{
nbuf = wbuff_buff_put(nbuf);
if (nbuf)
qdf_nbuf_free(nbuf);
}
#else
static inline void htc_rx_nbuf_free(qdf_nbuf_t nbuf)
{
return qdf_nbuf_free(nbuf);
}
#endif
static void do_recv_completion_pkt(HTC_ENDPOINT *pEndpoint,
HTC_PACKET *pPacket)
{
@@ -80,7 +95,7 @@ static void do_recv_completion_pkt(HTC_ENDPOINT *pEndpoint,
pEndpoint->Id,
pPacket));
if (pPacket)
qdf_nbuf_free(pPacket->pPktContext);
htc_rx_nbuf_free(pPacket->pPktContext);
} else {
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("HTC calling ep %d recv callback on packet %pK\n",
@@ -210,7 +225,7 @@ qdf_nbuf_t rx_sg_to_single_netbuf(HTC_TARGET *target)
qdf_mem_copy(anbdata_new, anbdata, qdf_nbuf_len(skb));
qdf_nbuf_put_tail(new_skb, qdf_nbuf_len(skb));
anbdata_new += qdf_nbuf_len(skb);
qdf_nbuf_free(skb);
htc_rx_nbuf_free(skb);
skb = qdf_nbuf_queue_remove(rx_sg_queue);
} while (skb);
@@ -220,7 +235,7 @@ qdf_nbuf_t rx_sg_to_single_netbuf(HTC_TARGET *target)
_failed:
while ((skb = qdf_nbuf_queue_remove(rx_sg_queue)) != NULL)
qdf_nbuf_free(skb);
htc_rx_nbuf_free(skb);
RESET_RX_SG_CONFIG(target);
return NULL;
@@ -478,7 +493,7 @@ QDF_STATUS htc_rx_completion_handler(void *Context, qdf_nbuf_t netbuf,
break;
}
qdf_nbuf_free(netbuf);
htc_rx_nbuf_free(netbuf);
netbuf = NULL;
break;
}
@@ -516,7 +531,7 @@ _out:
#endif
if (netbuf)
qdf_nbuf_free(netbuf);
htc_rx_nbuf_free(netbuf);
return status;

View File

@@ -28,8 +28,13 @@
#include <qdf_status.h>
#include <qdf_nbuf.h>
/* Number of pools supported per module */
#define WBUFF_MAX_POOLS 16
#define WBUFF_MAX_POOL_ID WBUFF_MAX_POOLS
enum wbuff_module_id {
WBUFF_MODULE_WMI_TX,
WBUFF_MODULE_CE_RX,
WBUFF_MAX_MODULES,
};
@@ -93,6 +98,7 @@ QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl);
/**
* wbuff_buff_get() - return buffer to the requester
* @hdl: wbuff_handle corresponding to the module
* @pool_id: pool identifier
* @len: length of buffer requested
* @func_name: function from which buffer is requested
* @line_num: line number in the file
@@ -100,7 +106,8 @@ QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl);
* Return: Network buffer if success
* NULL if failure
*/
qdf_nbuf_t wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len,
qdf_nbuf_t
wbuff_buff_get(struct wbuff_mod_handle *hdl, uint8_t pool_id, uint32_t len,
const char *func_name, uint32_t line_num);
/**
@@ -137,8 +144,8 @@ static inline QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl)
}
static inline qdf_nbuf_t
wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len, const char *func_name,
uint32_t line_num)
wbuff_buff_get(struct wbuff_mod_handle *hdl, uint8_t pool_id, uint32_t len,
const char *func_name, uint32_t line_num)
{
return NULL;
}

View File

@@ -26,9 +26,7 @@
#define _I_WBUFF_H
#include <qdf_nbuf.h>
/* Number of pools supported per module */
#define WBUFF_MAX_POOLS 4
#include <wbuff.h>
#define WBUFF_MODULE_ID_SHIFT 4
#define WBUFF_MODULE_ID_BITMASK 0xF0

View File

@@ -203,6 +203,9 @@ wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num_pools,
len = req[i].buffer_size;
wbuff_pool = &mod->wbuff_pool[pool_id];
if (!pool_size)
continue;
/**
* Allocate pool_size number of buffers for
* the pool given by pool_id
@@ -240,6 +243,7 @@ QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl)
struct wbuff_module *mod = NULL;
uint8_t module_id = 0, pool_id = 0;
qdf_nbuf_t first = NULL, buf = NULL;
struct wbuff_pool *wbuff_pool;
handle = (struct wbuff_handle *)hdl;
@@ -251,7 +255,12 @@ QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl)
qdf_spin_lock_bh(&mod->lock);
for (pool_id = 0; pool_id < WBUFF_MAX_POOLS; pool_id++) {
first = mod->wbuff_pool[pool_id].pool;
wbuff_pool = &mod->wbuff_pool[pool_id];
if (!wbuff_pool->initialized)
continue;
first = wbuff_pool->pool;
while (first) {
buf = first;
first = qdf_nbuf_next(buf);
@@ -264,26 +273,29 @@ QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl)
return QDF_STATUS_SUCCESS;
}
qdf_nbuf_t wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len,
qdf_nbuf_t
wbuff_buff_get(struct wbuff_mod_handle *hdl, uint8_t pool_id, uint32_t len,
const char *func_name, uint32_t line_num)
{
struct wbuff_handle *handle;
struct wbuff_module *mod = NULL;
struct wbuff_pool *wbuff_pool;
uint8_t module_id = 0;
uint8_t pool_id = 0;
qdf_nbuf_t buf = NULL;
handle = (struct wbuff_handle *)hdl;
if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)) || !len)
if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)) ||
((pool_id >= WBUFF_MAX_POOL_ID && !len)))
return NULL;
module_id = handle->id;
mod = &wbuff.mod[module_id];
if (pool_id == WBUFF_MAX_POOL_ID && len)
pool_id = wbuff_get_pool_slot_from_len(mod, len);
if (pool_id == WBUFF_MAX_POOLS)
if (pool_id >= WBUFF_MAX_POOLS)
return NULL;
wbuff_pool = &mod->wbuff_pool[pool_id];

View File

@@ -1768,8 +1768,8 @@ wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
return NULL;
}
wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name,
line_num);
wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, WBUFF_MAX_POOL_ID,
len, func_name, line_num);
if (!wmi_buf)
wmi_buf = qdf_nbuf_alloc_debug(NULL,
roundup(len + WMI_MIN_HEAD_ROOM,
@@ -1810,8 +1810,8 @@ wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
return NULL;
}
wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__,
__LINE__);
wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, WBUFF_MAX_POOL_ID,
len, __func__, __LINE__);
if (!wmi_buf)
wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
@@ -2653,6 +2653,20 @@ static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
mtrace_message_id, vdev_id, data);
}
#ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
static void wmi_rx_nbuf_free(qdf_nbuf_t nbuf)
{
nbuf = wbuff_buff_put(nbuf);
if (nbuf)
qdf_nbuf_free(nbuf);
}
#else
static inline void wmi_rx_nbuf_free(qdf_nbuf_t nbuf)
{
return qdf_nbuf_free(nbuf);
}
#endif
/**
* wmi_process_control_rx() - process fw events callbacks
* @wmi_handle: handle to wmi_unified
@@ -2672,7 +2686,7 @@ static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
if (qdf_unlikely(idx == A_ERROR)) {
wmi_debug("no handler registered for event id 0x%x", id);
qdf_nbuf_free(evt_buf);
wmi_rx_nbuf_free(evt_buf);
return;
}
wmi_mtrace_rx(id, 0xFF, idx);
@@ -2714,7 +2728,7 @@ static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
evt_buf);
} else {
wmi_err("Invalid event context %d", exec_ctx);
qdf_nbuf_free(evt_buf);
wmi_rx_nbuf_free(evt_buf);
}
}
@@ -2738,7 +2752,7 @@ static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
if (!wmi_handle) {
wmi_err("unable to get wmi_handle to Endpoint %d",
htc_packet->Endpoint);
qdf_nbuf_free(evt_buf);
wmi_rx_nbuf_free(evt_buf);
return;
}
@@ -2766,7 +2780,7 @@ static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
if (!wmi_handle) {
wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
qdf_nbuf_free(evt_buf);
wmi_rx_nbuf_free(evt_buf);
return;
}
@@ -2794,7 +2808,7 @@ static void wmi_control_dbr_rx(void *ctx, HTC_PACKET *htc_packet)
if (!wmi_handle) {
wmi_err("unable to get wmi_handle for dbr event endpoint id:%d",
htc_packet->Endpoint);
qdf_nbuf_free(evt_buf);
wmi_rx_nbuf_free(evt_buf);
return;
}
@@ -2972,7 +2986,7 @@ end:
wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
#endif
qdf_nbuf_free(evt_buf);
wmi_rx_nbuf_free(evt_buf);
}