qcacmn: Add fastpath Rx support
With dedicated CE for Rx and Tx completion HTT messages, skip processing in Host Target Communication layer. Do special handling in HIF-CE and HTT layer, this optimization results in 3-4% CPU utilization gain. Change-Id: I400148a0e24ac62dd09e2a95d5f35d94d83fe2df CRs-Fixed: 987182
This commit is contained in:

committed by
Gerrit - the friendly Code Review server

parent
c7d5429428
commit
7399f148b5
@@ -316,6 +316,7 @@ void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
|
||||
bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
|
||||
void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int);
|
||||
#endif
|
||||
void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *scn);
|
||||
|
||||
/*
|
||||
* Enable/disable CDC max performance workaround
|
||||
@@ -385,6 +386,10 @@ struct hif_msg_callbacks {
|
||||
struct hif_bus_id;
|
||||
typedef struct hif_bus_id hif_bus_id;
|
||||
|
||||
typedef int (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
|
||||
int
|
||||
hif_ce_fastpath_cb_register(fastpath_msg_handler, void *context);
|
||||
|
||||
void hif_post_init(struct hif_opaque_softc *scn, void *hHTC,
|
||||
struct hif_msg_callbacks *callbacks);
|
||||
QDF_STATUS hif_start(struct hif_opaque_softc *scn);
|
||||
@@ -509,6 +514,9 @@ ol_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
|
||||
void hif_set_target_status(struct hif_opaque_softc *hif_ctx, ol_target_status);
|
||||
void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
|
||||
struct hif_config_info *cfg);
|
||||
|
||||
bool ce_is_fastpath_enabled(struct hif_opaque_softc *scn);
|
||||
bool ce_is_fastpath_handler_registered(struct CE_state *ce_state);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@@ -108,6 +108,8 @@ struct CE_state {
|
||||
|
||||
#ifdef WLAN_FEATURE_FASTPATH
|
||||
u_int32_t download_len; /* pkt download length for source ring */
|
||||
fastpath_msg_handler fastpath_handler;
|
||||
void *context;
|
||||
#endif /* WLAN_FEATURE_FASTPATH */
|
||||
|
||||
ce_send_cb send_cb;
|
||||
@@ -355,9 +357,8 @@ struct ce_sendlist_s {
|
||||
} item[CE_SENDLIST_ITEMS_MAX];
|
||||
};
|
||||
|
||||
#ifdef WLAN_FEATURE_FASTPATH
|
||||
void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
|
||||
#endif
|
||||
void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
|
||||
|
||||
/* which ring of a CE? */
|
||||
#define CE_RING_SRC 0
|
||||
|
@@ -50,6 +50,7 @@
|
||||
#include "ce_tasklet.h"
|
||||
#include "platform_icnss.h"
|
||||
#include "qwlan_version.h"
|
||||
#include <cds_api.h>
|
||||
|
||||
#define CE_POLL_TIMEOUT 10 /* ms */
|
||||
|
||||
@@ -755,10 +756,61 @@ ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
|
||||
qdf_assert_always(sw_index == write_index);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
|
||||
* @ce_hdl: Handle to CE
|
||||
*
|
||||
* These buffers are never allocated on the fly, but
|
||||
* are allocated only once during HIF start and freed
|
||||
* only once during HIF stop.
|
||||
* NOTE:
|
||||
* The assumption here is there is no in-flight DMA in progress
|
||||
* currently, so that buffers can be freed up safely.
|
||||
*
|
||||
* Return: NONE
|
||||
*/
|
||||
void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
|
||||
{
|
||||
struct CE_state *ce_state = (struct CE_state *)ce_hdl;
|
||||
struct CE_ring_state *dst_ring = ce_state->dest_ring;
|
||||
qdf_nbuf_t nbuf;
|
||||
int i;
|
||||
|
||||
if (!ce_state->fastpath_handler)
|
||||
return;
|
||||
/*
|
||||
* when fastpath_mode is on and for datapath CEs. Unlike other CE's,
|
||||
* this CE is completely full: does not leave one blank space, to
|
||||
* distinguish between empty queue & full queue. So free all the
|
||||
* entries.
|
||||
*/
|
||||
for (i = 0; i < dst_ring->nentries; i++) {
|
||||
nbuf = dst_ring->per_transfer_context[i];
|
||||
|
||||
/*
|
||||
* The reasons for doing this check are:
|
||||
* 1) Protect against calling cleanup before allocating buffers
|
||||
* 2) In a corner case, FASTPATH_mode_on may be set, but we
|
||||
* could have a partially filled ring, because of a memory
|
||||
* allocation failure in the middle of allocating ring.
|
||||
* This check accounts for that case, checking
|
||||
* fastpath_mode_on flag or started flag would not have
|
||||
* covered that case. This is not in performance path,
|
||||
* so OK to do this.
|
||||
*/
|
||||
if (nbuf)
|
||||
qdf_nbuf_free(nbuf);
|
||||
}
|
||||
}
|
||||
#else
|
||||
void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
|
||||
{
|
||||
}
|
||||
|
||||
void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
|
||||
{
|
||||
}
|
||||
#endif /* WLAN_FEATURE_FASTPATH */
|
||||
|
||||
void ce_fini(struct CE_handle *copyeng)
|
||||
@@ -770,7 +822,7 @@ void ce_fini(struct CE_handle *copyeng)
|
||||
CE_state->state = CE_UNUSED;
|
||||
scn->ce_id_to_state[CE_id] = NULL;
|
||||
if (CE_state->src_ring) {
|
||||
/* Cleanup the HTT Tx ring */
|
||||
/* Cleanup the datapath Tx ring */
|
||||
ce_h2t_tx_ce_cleanup(copyeng);
|
||||
|
||||
if (CE_state->src_ring->shadow_base_unaligned)
|
||||
@@ -788,6 +840,9 @@ void ce_fini(struct CE_handle *copyeng)
|
||||
qdf_mem_free(CE_state->src_ring);
|
||||
}
|
||||
if (CE_state->dest_ring) {
|
||||
/* Cleanup the datapath Rx ring */
|
||||
ce_t2h_msg_ce_cleanup(copyeng);
|
||||
|
||||
if (CE_state->dest_ring->base_addr_owner_space_unaligned)
|
||||
qdf_mem_free_consistent(scn->qdf_dev,
|
||||
scn->qdf_dev->dev,
|
||||
@@ -1359,7 +1414,7 @@ void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
|
||||
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
||||
|
||||
HIF_INFO("Enabling fastpath mode\n");
|
||||
scn->fastpath_mode_on = 1;
|
||||
scn->fastpath_mode_on = true;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1676,6 +1731,80 @@ void hif_ce_close(struct hif_softc *hif_sc)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef WLAN_FEATURE_FASTPATH
|
||||
/**
|
||||
* ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
|
||||
* @scn: Handle to HIF context
|
||||
*
|
||||
* Return: true if fastpath is enabled else false.
|
||||
*/
|
||||
bool ce_is_fastpath_enabled(struct hif_opaque_softc *hif_hdl)
|
||||
{
|
||||
return HIF_GET_SOFTC(hif_hdl)->fastpath_mode_on;
|
||||
}
|
||||
|
||||
/**
|
||||
* ce_is_fastpath_handler_registered() - return true for datapath CEs and if
|
||||
* fastpath is enabled.
|
||||
* @ce_state: handle to copy engine
|
||||
*
|
||||
* Return: true if fastpath handler is registered for datapath CE.
|
||||
*/
|
||||
bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
|
||||
{
|
||||
if (ce_state->fastpath_handler)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
|
||||
* @scn: HIF handle
|
||||
*
|
||||
* Datapath Rx CEs are special case, where we reuse all the message buffers.
|
||||
* Hence we have to post all the entries in the pipe, even, in the beginning
|
||||
* unlike for other CE pipes where one less than dest_nentries are filled in
|
||||
* the beginning.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *hif_hdl)
|
||||
{
|
||||
int pipe_num;
|
||||
struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
|
||||
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
||||
|
||||
if (scn->fastpath_mode_on == false)
|
||||
return;
|
||||
|
||||
for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
|
||||
struct HIF_CE_pipe_info *pipe_info =
|
||||
&hif_state->pipe_info[pipe_num];
|
||||
struct CE_state *ce_state =
|
||||
scn->ce_id_to_state[pipe_info->pipe_num];
|
||||
|
||||
if (ce_state->htt_rx_data)
|
||||
atomic_inc(&pipe_info->recv_bufs_needed);
|
||||
}
|
||||
}
|
||||
#else
|
||||
bool ce_is_fastpath_enabled(struct hif_opaque_softc *scn)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *scn)
|
||||
{
|
||||
}
|
||||
|
||||
bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* WLAN_FEATURE_FASTPATH */
|
||||
|
||||
/**
|
||||
* hif_unconfig_ce() - ensure resources from hif_config_ce are freed
|
||||
* @hif_sc: hif context
|
||||
@@ -1842,6 +1971,45 @@ err:
|
||||
return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
#ifdef WLAN_FEATURE_FASTPATH
|
||||
/**
|
||||
* hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
|
||||
* @handler: Callback funtcion
|
||||
* @context: handle for callback function
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
|
||||
*/
|
||||
int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
|
||||
{
|
||||
struct hif_softc *scn =
|
||||
(struct hif_softc *)cds_get_context(QDF_MODULE_ID_HIF);
|
||||
struct CE_state *ce_state;
|
||||
int i;
|
||||
|
||||
QDF_ASSERT(scn != NULL);
|
||||
|
||||
if (!scn->fastpath_mode_on) {
|
||||
HIF_WARN("Fastpath mode disabled\n");
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
for (i = 0; i < CE_COUNT_MAX; i++) {
|
||||
ce_state = scn->ce_id_to_state[i];
|
||||
if (ce_state->htt_rx_data) {
|
||||
ce_state->fastpath_handler = handler;
|
||||
ce_state->context = context;
|
||||
}
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#else
|
||||
int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
/**
|
||||
* hif_ipa_get_ce_resource() - get uc resource on hif
|
||||
|
@@ -36,6 +36,9 @@
|
||||
#include "epping_main.h"
|
||||
#include "hif_main.h"
|
||||
#include "hif_debug.h"
|
||||
#include "ol_txrx_types.h"
|
||||
#include <cds_api.h>
|
||||
#include <osdep.h>
|
||||
|
||||
#ifdef IPA_OFFLOAD
|
||||
#ifdef QCA_WIFI_3_0
|
||||
@@ -525,7 +528,6 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
|
||||
u_int32_t ctrl_addr = ce_state->ctrl_addr;
|
||||
unsigned int nentries_mask = src_ring->nentries_mask;
|
||||
unsigned int write_index;
|
||||
unsigned int sw_index;
|
||||
unsigned int frag_len;
|
||||
qdf_nbuf_t msdu;
|
||||
int i;
|
||||
@@ -533,7 +535,6 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
|
||||
uint32_t user_flags = 0;
|
||||
|
||||
qdf_spin_lock_bh(&ce_state->ce_index_lock);
|
||||
sw_index = src_ring->sw_index;
|
||||
write_index = src_ring->write_index;
|
||||
|
||||
/* 2 msdus per packet */
|
||||
@@ -673,10 +674,12 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
|
||||
if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
|
||||
(ce_is_fastpath_enabled((struct hif_opaque_softc *)scn) &&
|
||||
CE_state->htt_rx_data &&
|
||||
(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0))) {
|
||||
struct CE_dest_desc *dest_ring_base =
|
||||
(struct CE_dest_desc *)dest_ring->
|
||||
base_addr_owner_space;
|
||||
(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
|
||||
struct CE_dest_desc *dest_desc =
|
||||
CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
|
||||
|
||||
@@ -697,12 +700,14 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
|
||||
|
||||
/* Update Destination Ring Write Index */
|
||||
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
|
||||
CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
|
||||
dest_ring->write_index = write_index;
|
||||
if (write_index != sw_index) {
|
||||
CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
|
||||
dest_ring->write_index = write_index;
|
||||
}
|
||||
status = QDF_STATUS_SUCCESS;
|
||||
} else {
|
||||
} else
|
||||
status = QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
Q_TARGET_ACCESS_END(scn);
|
||||
qdf_spin_unlock_bh(&CE_state->ce_index_lock);
|
||||
return status;
|
||||
@@ -1267,6 +1272,211 @@ void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
|
||||
|
||||
#endif /*ATH_11AC_TXCOMPACT */
|
||||
|
||||
#ifdef WLAN_FEATURE_FASTPATH
|
||||
|
||||
/**
|
||||
* ce_tx_completion() - reap off the CE source ring when CE completion happens
|
||||
* @ce_state: Handle to CE
|
||||
* @num_tx_cmpls: Number of completions handled
|
||||
*
|
||||
* API to reap off the CE source ring when CE completion happens:
|
||||
* Update number of src_ring entries based on number of completions.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static void
|
||||
ce_tx_completion(struct CE_state *ce_state, uint32_t num_tx_cmpls)
|
||||
{
|
||||
struct CE_ring_state *src_ring = ce_state->src_ring;
|
||||
uint32_t nentries_mask = src_ring->nentries_mask;
|
||||
|
||||
ASSERT(num_tx_cmpls);
|
||||
|
||||
qdf_spin_lock(&ce_state->ce_index_lock);
|
||||
|
||||
/*
|
||||
* This locks the index manipulation of this CE with those done
|
||||
* in ce_send_fast().
|
||||
*/
|
||||
|
||||
/*
|
||||
* Advance the s/w index:
|
||||
* This effectively simulates completing the CE ring descriptors
|
||||
*/
|
||||
src_ring->sw_index = CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
|
||||
num_tx_cmpls);
|
||||
qdf_spin_unlock(&ce_state->ce_index_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
|
||||
* @ce_state: handle to copy engine state
|
||||
* @cmpl_msdus: Rx msdus
|
||||
* @num_cmpls: number of Rx msdus
|
||||
* @ctrl_addr: CE control address
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static void ce_fastpath_rx_handle(struct CE_state *ce_state,
|
||||
qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
|
||||
uint32_t ctrl_addr)
|
||||
{
|
||||
struct hif_softc *scn = ce_state->scn;
|
||||
struct CE_ring_state *dest_ring = ce_state->dest_ring;
|
||||
struct CE_state *ce_tx_cmpl_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
|
||||
uint32_t nentries_mask = dest_ring->nentries_mask;
|
||||
uint32_t tx_cmpls;
|
||||
uint32_t write_index;
|
||||
|
||||
tx_cmpls = (ce_state->fastpath_handler)(ce_state->context, cmpl_msdus,
|
||||
num_cmpls);
|
||||
|
||||
/* Update Destination Ring Write Index */
|
||||
write_index = dest_ring->write_index;
|
||||
write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
|
||||
CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
|
||||
dest_ring->write_index = write_index;
|
||||
ce_tx_completion(ce_tx_cmpl_state, tx_cmpls);
|
||||
}
|
||||
|
||||
#define MSG_FLUSH_NUM 20
|
||||
/**
|
||||
* ce_per_engine_service_fast() - CE handler routine to service fastpath messages
|
||||
* @scn: hif_context
|
||||
* @ce_id: COpy engine ID
|
||||
* Function:
|
||||
* 1) Go through the CE ring, and find the completions
|
||||
* 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
|
||||
* 3) Unmap buffer & accumulate in an array.
|
||||
* 4) Call message handler when array is full or when exiting the handler
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
|
||||
static int
|
||||
ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
|
||||
{
|
||||
struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
|
||||
struct CE_ring_state *dest_ring = ce_state->dest_ring;
|
||||
struct CE_dest_desc *dest_ring_base =
|
||||
(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
|
||||
|
||||
uint32_t nentries_mask = dest_ring->nentries_mask;
|
||||
uint32_t sw_index = dest_ring->sw_index;
|
||||
uint32_t nbytes;
|
||||
qdf_nbuf_t nbuf;
|
||||
uint32_t paddr_lo;
|
||||
struct CE_dest_desc *dest_desc;
|
||||
uint32_t ce_int_status = (1 << ce_id);
|
||||
qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
|
||||
uint32_t ctrl_addr = ce_state->ctrl_addr;
|
||||
uint32_t nbuf_cmpl_idx = 0;
|
||||
|
||||
more_data:
|
||||
if (ce_int_status == (1 << ce_id)) {
|
||||
for (;;) {
|
||||
|
||||
dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
|
||||
sw_index);
|
||||
|
||||
/*
|
||||
* The following 2 reads are from non-cached memory
|
||||
*/
|
||||
nbytes = dest_desc->nbytes;
|
||||
|
||||
/* If completion is invalid, break */
|
||||
if (qdf_unlikely(nbytes == 0))
|
||||
break;
|
||||
|
||||
|
||||
/*
|
||||
* Build the nbuf list from valid completions
|
||||
*/
|
||||
nbuf = dest_ring->per_transfer_context[sw_index];
|
||||
|
||||
/*
|
||||
* No lock is needed here, since this is the only thread
|
||||
* that accesses the sw_index
|
||||
*/
|
||||
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
|
||||
|
||||
/*
|
||||
* CAREFUL : Uncached write, but still less expensive,
|
||||
* since most modern caches use "write-combining" to
|
||||
* flush multiple cache-writes all at once.
|
||||
*/
|
||||
dest_desc->nbytes = 0;
|
||||
|
||||
/*
|
||||
* Per our understanding this is not required on our
|
||||
* since we are doing the same cache invalidation
|
||||
* operation on the same buffer twice in succession,
|
||||
* without any modifiication to this buffer by CPU in
|
||||
* between.
|
||||
* However, this code with 2 syncs in succession has
|
||||
* been undergoing some testing at a customer site,
|
||||
* and seemed to be showing no problems so far. Would
|
||||
* like to validate from the customer, that this line
|
||||
* is really not required, before we remove this line
|
||||
* completely.
|
||||
*/
|
||||
paddr_lo = QDF_NBUF_CB_PADDR(nbuf);
|
||||
|
||||
OS_SYNC_SINGLE_FOR_CPU(scn->qdf_dev->dev, paddr_lo,
|
||||
(skb_end_pointer(nbuf) - (nbuf)->data),
|
||||
DMA_FROM_DEVICE);
|
||||
qdf_nbuf_put_tail(nbuf, nbytes);
|
||||
|
||||
qdf_assert_always(nbuf->data != NULL);
|
||||
|
||||
cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
|
||||
|
||||
/*
|
||||
* we are not posting the buffers back instead
|
||||
* reusing the buffers
|
||||
*/
|
||||
if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
|
||||
qdf_spin_unlock(&ce_state->ce_index_lock);
|
||||
ce_fastpath_rx_handle(ce_state, cmpl_msdus,
|
||||
MSG_FLUSH_NUM, ctrl_addr);
|
||||
qdf_spin_lock(&ce_state->ce_index_lock);
|
||||
nbuf_cmpl_idx = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are not enough completions to fill the array,
|
||||
* just call the message handler here
|
||||
*/
|
||||
if (nbuf_cmpl_idx) {
|
||||
qdf_spin_unlock(&ce_state->ce_index_lock);
|
||||
ce_fastpath_rx_handle(ce_state, cmpl_msdus,
|
||||
nbuf_cmpl_idx, ctrl_addr);
|
||||
qdf_spin_lock(&ce_state->ce_index_lock);
|
||||
nbuf_cmpl_idx = 0;
|
||||
}
|
||||
qdf_atomic_set(&ce_state->rx_pending, 0);
|
||||
dest_ring->sw_index = sw_index;
|
||||
|
||||
CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
|
||||
HOST_IS_COPY_COMPLETE_MASK);
|
||||
}
|
||||
ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
|
||||
if (ce_int_status & CE_WATERMARK_MASK)
|
||||
goto more_data;
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
#else
|
||||
static int
|
||||
ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
|
||||
{
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
#endif /* WLAN_FEATURE_FASTPATH */
|
||||
|
||||
/*
|
||||
* Number of times to check for any pending tx/rx completion on
|
||||
* a copy engine, this count should be big enough. Once we hit
|
||||
@@ -1310,6 +1520,17 @@ int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
|
||||
|
||||
qdf_spin_lock(&CE_state->ce_index_lock);
|
||||
|
||||
/*
|
||||
* With below check we make sure CE we are handling is datapath CE and
|
||||
* fastpath is enabled.
|
||||
*/
|
||||
if (ce_is_fastpath_handler_registered(CE_state))
|
||||
/* For datapath only Rx CEs */
|
||||
if (!ce_per_engine_service_fast(scn, CE_id)) {
|
||||
qdf_spin_unlock(&CE_state->ce_index_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Clear force_break flag and re-initialize receive_count to 0 */
|
||||
|
||||
/* NAPI: scn variables- thread/multi-processing safety? */
|
||||
|
@@ -138,7 +138,7 @@ struct hif_softc {
|
||||
atomic_t link_suspended;
|
||||
uint32_t *vaddr_rri_on_ddr;
|
||||
int linkstate_vote;
|
||||
int fastpath_mode_on;
|
||||
bool fastpath_mode_on;
|
||||
atomic_t tasklet_from_intr;
|
||||
int htc_endpoint;
|
||||
qdf_dma_addr_t mem_pa;
|
||||
|
@@ -184,4 +184,16 @@ static inline unsigned char *os_malloc(osdev_t nic_dev,
|
||||
#define SET_NETDEV_DEV(ndev, pdev)
|
||||
#endif
|
||||
|
||||
#define OS_SYNC_SINGLE_FOR_CPU(pdev, paddr_lo, len, oprn) \
|
||||
{ \
|
||||
dma_sync_single_for_cpu(pdev, paddr_lo, len, oprn); \
|
||||
}
|
||||
|
||||
#define OS_SYNC_SINGLE_FOR_DEVICE(pdev, paddr_lo, len, oprn) \
|
||||
{ \
|
||||
dma_sync_single_for_device(pdev, paddr_lo, len, oprn);\
|
||||
}
|
||||
|
||||
#define SLOTS_PER_TX 2
|
||||
|
||||
#endif /* end of _OSDEP_H */
|
||||
|
@@ -420,6 +420,24 @@ static inline void qdf_nbuf_free(qdf_nbuf_t buf)
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_FEATURE_FASTPATH
|
||||
/**
|
||||
* qdf_nbuf_init_fast() - before put buf into pool,turn it to init state
|
||||
*
|
||||
* @buf: buf instance
|
||||
* Return: data pointer of this buf where new data has to be
|
||||
* put, or NULL if there is not enough room in this buf.
|
||||
*/
|
||||
|
||||
static inline void
|
||||
qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
|
||||
{
|
||||
atomic_set(&nbuf->users, 1);
|
||||
nbuf->data = nbuf->head + NET_SKB_PAD;
|
||||
skb_reset_tail_pointer(nbuf);
|
||||
}
|
||||
#endif /* WLAN_FEATURE_FASTPATH */
|
||||
|
||||
static inline void qdf_nbuf_tx_free(qdf_nbuf_t buf_list, int tx_err)
|
||||
{
|
||||
__qdf_nbuf_tx_free(buf_list, tx_err);
|
||||
|
Reference in New Issue
Block a user