qcacmn: Cleanup fastpath changes
Do following cleanup on fastpath code changes: 1) Do not reap off Tx HIF buffers in Rx handling, instead handle reaping in Tx fastpath itself. 2) In ce_per_engine_service_fast check for more Rx packets after packet processing. 3) Make stub functions as static inline for non-fastpath enabled case. Change-Id: If07c4344a424ce13b94128bf28931a24255b661a CRs-Fixed: 987182
This commit is contained in:

committed by
Gerrit - the friendly Code Review server

parent
7399f148b5
commit
4a9c3a8fb6
@@ -308,6 +308,8 @@ QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *scn, uint32_t address,
|
|||||||
QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, uint32_t address,
|
QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, uint32_t address,
|
||||||
uint8_t *data, int nbytes);
|
uint8_t *data, int nbytes);
|
||||||
|
|
||||||
|
typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the FASTPATH_mode_on flag in sc, for use by data path
|
* Set the FASTPATH_mode_on flag in sc, for use by data path
|
||||||
*/
|
*/
|
||||||
@@ -315,8 +317,14 @@ QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *scn, uint32_t address,
|
|||||||
void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
|
void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
|
||||||
bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
|
bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
|
||||||
void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int);
|
void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int);
|
||||||
|
int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context);
|
||||||
|
#else
|
||||||
|
static inline int hif_ce_fastpath_cb_register(fastpath_msg_handler handler,
|
||||||
|
void *context)
|
||||||
|
{
|
||||||
|
return QDF_STATUS_E_FAILURE;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *scn);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable/disable CDC max performance workaround
|
* Enable/disable CDC max performance workaround
|
||||||
@@ -386,10 +394,6 @@ struct hif_msg_callbacks {
|
|||||||
struct hif_bus_id;
|
struct hif_bus_id;
|
||||||
typedef struct hif_bus_id hif_bus_id;
|
typedef struct hif_bus_id hif_bus_id;
|
||||||
|
|
||||||
typedef int (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
|
|
||||||
int
|
|
||||||
hif_ce_fastpath_cb_register(fastpath_msg_handler, void *context);
|
|
||||||
|
|
||||||
void hif_post_init(struct hif_opaque_softc *scn, void *hHTC,
|
void hif_post_init(struct hif_opaque_softc *scn, void *hHTC,
|
||||||
struct hif_msg_callbacks *callbacks);
|
struct hif_msg_callbacks *callbacks);
|
||||||
QDF_STATUS hif_start(struct hif_opaque_softc *scn);
|
QDF_STATUS hif_start(struct hif_opaque_softc *scn);
|
||||||
@@ -515,8 +519,6 @@ void hif_set_target_status(struct hif_opaque_softc *hif_ctx, ol_target_status);
|
|||||||
void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
|
void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
|
||||||
struct hif_config_info *cfg);
|
struct hif_config_info *cfg);
|
||||||
|
|
||||||
bool ce_is_fastpath_enabled(struct hif_opaque_softc *scn);
|
|
||||||
bool ce_is_fastpath_handler_registered(struct CE_state *ce_state);
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@@ -357,8 +357,18 @@ struct ce_sendlist_s {
|
|||||||
} item[CE_SENDLIST_ITEMS_MAX];
|
} item[CE_SENDLIST_ITEMS_MAX];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef WLAN_FEATURE_FASTPATH
|
||||||
void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
|
void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
|
||||||
void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
|
void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
|
||||||
|
#else
|
||||||
|
static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* which ring of a CE? */
|
/* which ring of a CE? */
|
||||||
#define CE_RING_SRC 0
|
#define CE_RING_SRC 0
|
||||||
|
@@ -725,6 +725,53 @@ error_no_dma_mem:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef WLAN_FEATURE_FASTPATH
|
#ifdef WLAN_FEATURE_FASTPATH
|
||||||
|
/**
|
||||||
|
* hif_enable_fastpath() Update that we have enabled fastpath mode
|
||||||
|
* @hif_ctx: HIF context
|
||||||
|
*
|
||||||
|
* For use in data path
|
||||||
|
*
|
||||||
|
* Retrun: void
|
||||||
|
*/
|
||||||
|
void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
|
||||||
|
{
|
||||||
|
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
||||||
|
|
||||||
|
HIF_INFO("Enabling fastpath mode\n");
|
||||||
|
scn->fastpath_mode_on = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
|
||||||
|
* @hif_ctx: HIF Context
|
||||||
|
*
|
||||||
|
* For use in data path to skip HTC
|
||||||
|
*
|
||||||
|
* Return: bool
|
||||||
|
*/
|
||||||
|
bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
|
||||||
|
{
|
||||||
|
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
||||||
|
|
||||||
|
return scn->fastpath_mode_on;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hif_get_ce_handle - API to get CE handle for FastPath mode
|
||||||
|
* @hif_ctx: HIF Context
|
||||||
|
* @id: CopyEngine Id
|
||||||
|
*
|
||||||
|
* API to return CE handle for fastpath mode
|
||||||
|
*
|
||||||
|
* Return: void
|
||||||
|
*/
|
||||||
|
void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
|
||||||
|
{
|
||||||
|
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
||||||
|
|
||||||
|
return scn->ce_id_to_state[id];
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
|
* ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
|
||||||
* No processing is required inside this function.
|
* No processing is required inside this function.
|
||||||
@@ -803,13 +850,49 @@ void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
|
|||||||
qdf_nbuf_free(nbuf);
|
qdf_nbuf_free(nbuf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
|
||||||
|
* @scn: HIF handle
|
||||||
|
*
|
||||||
|
* Datapath Rx CEs are special case, where we reuse all the message buffers.
|
||||||
|
* Hence we have to post all the entries in the pipe, even, in the beginning
|
||||||
|
* unlike for other CE pipes where one less than dest_nentries are filled in
|
||||||
|
* the beginning.
|
||||||
|
*
|
||||||
|
* Return: None
|
||||||
|
*/
|
||||||
|
static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
|
||||||
|
{
|
||||||
|
int pipe_num;
|
||||||
|
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
||||||
|
|
||||||
|
if (scn->fastpath_mode_on == false)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
|
||||||
|
struct HIF_CE_pipe_info *pipe_info =
|
||||||
|
&hif_state->pipe_info[pipe_num];
|
||||||
|
struct CE_state *ce_state =
|
||||||
|
scn->ce_id_to_state[pipe_info->pipe_num];
|
||||||
|
|
||||||
|
if (ce_state->htt_rx_data)
|
||||||
|
atomic_inc(&pipe_info->recv_bufs_needed);
|
||||||
|
}
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
|
static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
|
static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
|
||||||
{
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
#endif /* WLAN_FEATURE_FASTPATH */
|
#endif /* WLAN_FEATURE_FASTPATH */
|
||||||
|
|
||||||
@@ -1387,6 +1470,8 @@ QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
|
|||||||
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
||||||
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
||||||
|
|
||||||
|
hif_update_fastpath_recv_bufs_cnt(scn);
|
||||||
|
|
||||||
hif_msg_callbacks_install(scn);
|
hif_msg_callbacks_install(scn);
|
||||||
|
|
||||||
if (hif_completion_thread_startup(hif_state))
|
if (hif_completion_thread_startup(hif_state))
|
||||||
@@ -1400,55 +1485,6 @@ QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
|
|||||||
return QDF_STATUS_SUCCESS;
|
return QDF_STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef WLAN_FEATURE_FASTPATH
|
|
||||||
/**
|
|
||||||
* hif_enable_fastpath() Update that we have enabled fastpath mode
|
|
||||||
* @hif_ctx: HIF context
|
|
||||||
*
|
|
||||||
* For use in data path
|
|
||||||
*
|
|
||||||
* Retrun: void
|
|
||||||
*/
|
|
||||||
void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
|
|
||||||
{
|
|
||||||
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
|
||||||
|
|
||||||
HIF_INFO("Enabling fastpath mode\n");
|
|
||||||
scn->fastpath_mode_on = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
|
|
||||||
* @hif_ctx: HIF Context
|
|
||||||
*
|
|
||||||
* For use in data path to skip HTC
|
|
||||||
*
|
|
||||||
* Return: bool
|
|
||||||
*/
|
|
||||||
bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
|
|
||||||
{
|
|
||||||
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
|
||||||
|
|
||||||
return scn->fastpath_mode_on;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* hif_get_ce_handle - API to get CE handle for FastPath mode
|
|
||||||
* @hif_ctx: HIF Context
|
|
||||||
* @id: CopyEngine Id
|
|
||||||
*
|
|
||||||
* API to return CE handle for fastpath mode
|
|
||||||
*
|
|
||||||
* Return: void
|
|
||||||
*/
|
|
||||||
void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
|
|
||||||
{
|
|
||||||
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
|
||||||
|
|
||||||
return scn->ce_id_to_state[id];
|
|
||||||
}
|
|
||||||
#endif /* WLAN_FEATURE_FASTPATH */
|
|
||||||
|
|
||||||
void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
|
void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
|
||||||
{
|
{
|
||||||
struct hif_softc *scn;
|
struct hif_softc *scn;
|
||||||
@@ -1731,80 +1767,6 @@ void hif_ce_close(struct hif_softc *hif_sc)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef WLAN_FEATURE_FASTPATH
|
|
||||||
/**
|
|
||||||
* ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
|
|
||||||
* @scn: Handle to HIF context
|
|
||||||
*
|
|
||||||
* Return: true if fastpath is enabled else false.
|
|
||||||
*/
|
|
||||||
bool ce_is_fastpath_enabled(struct hif_opaque_softc *hif_hdl)
|
|
||||||
{
|
|
||||||
return HIF_GET_SOFTC(hif_hdl)->fastpath_mode_on;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ce_is_fastpath_handler_registered() - return true for datapath CEs and if
|
|
||||||
* fastpath is enabled.
|
|
||||||
* @ce_state: handle to copy engine
|
|
||||||
*
|
|
||||||
* Return: true if fastpath handler is registered for datapath CE.
|
|
||||||
*/
|
|
||||||
bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
|
|
||||||
{
|
|
||||||
if (ce_state->fastpath_handler)
|
|
||||||
return true;
|
|
||||||
else
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
|
|
||||||
* @scn: HIF handle
|
|
||||||
*
|
|
||||||
* Datapath Rx CEs are special case, where we reuse all the message buffers.
|
|
||||||
* Hence we have to post all the entries in the pipe, even, in the beginning
|
|
||||||
* unlike for other CE pipes where one less than dest_nentries are filled in
|
|
||||||
* the beginning.
|
|
||||||
*
|
|
||||||
* Return: None
|
|
||||||
*/
|
|
||||||
void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *hif_hdl)
|
|
||||||
{
|
|
||||||
int pipe_num;
|
|
||||||
struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
|
|
||||||
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
|
|
||||||
|
|
||||||
if (scn->fastpath_mode_on == false)
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
|
|
||||||
struct HIF_CE_pipe_info *pipe_info =
|
|
||||||
&hif_state->pipe_info[pipe_num];
|
|
||||||
struct CE_state *ce_state =
|
|
||||||
scn->ce_id_to_state[pipe_info->pipe_num];
|
|
||||||
|
|
||||||
if (ce_state->htt_rx_data)
|
|
||||||
atomic_inc(&pipe_info->recv_bufs_needed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
bool ce_is_fastpath_enabled(struct hif_opaque_softc *scn)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *scn)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* WLAN_FEATURE_FASTPATH */
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hif_unconfig_ce() - ensure resources from hif_config_ce are freed
|
* hif_unconfig_ce() - ensure resources from hif_config_ce are freed
|
||||||
* @hif_sc: hif context
|
* @hif_sc: hif context
|
||||||
@@ -2003,11 +1965,6 @@ int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
|
|||||||
|
|
||||||
return QDF_STATUS_SUCCESS;
|
return QDF_STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
|
|
||||||
{
|
|
||||||
return QDF_STATUS_SUCCESS;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef IPA_OFFLOAD
|
#ifdef IPA_OFFLOAD
|
||||||
|
@@ -36,9 +36,6 @@
|
|||||||
#include "epping_main.h"
|
#include "epping_main.h"
|
||||||
#include "hif_main.h"
|
#include "hif_main.h"
|
||||||
#include "hif_debug.h"
|
#include "hif_debug.h"
|
||||||
#include "ol_txrx_types.h"
|
|
||||||
#include <cds_api.h>
|
|
||||||
#include <osdep.h>
|
|
||||||
|
|
||||||
#ifdef IPA_OFFLOAD
|
#ifdef IPA_OFFLOAD
|
||||||
#ifdef QCA_WIFI_3_0
|
#ifdef QCA_WIFI_3_0
|
||||||
@@ -528,14 +525,30 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
|
|||||||
u_int32_t ctrl_addr = ce_state->ctrl_addr;
|
u_int32_t ctrl_addr = ce_state->ctrl_addr;
|
||||||
unsigned int nentries_mask = src_ring->nentries_mask;
|
unsigned int nentries_mask = src_ring->nentries_mask;
|
||||||
unsigned int write_index;
|
unsigned int write_index;
|
||||||
|
unsigned int sw_index;
|
||||||
unsigned int frag_len;
|
unsigned int frag_len;
|
||||||
qdf_nbuf_t msdu;
|
qdf_nbuf_t msdu;
|
||||||
int i;
|
int i;
|
||||||
uint64_t dma_addr;
|
uint64_t dma_addr;
|
||||||
uint32_t user_flags = 0;
|
uint32_t user_flags;
|
||||||
|
|
||||||
qdf_spin_lock_bh(&ce_state->ce_index_lock);
|
qdf_spin_lock_bh(&ce_state->ce_index_lock);
|
||||||
|
Q_TARGET_ACCESS_BEGIN(scn);
|
||||||
|
|
||||||
|
src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
|
||||||
write_index = src_ring->write_index;
|
write_index = src_ring->write_index;
|
||||||
|
sw_index = src_ring->sw_index;
|
||||||
|
|
||||||
|
if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
|
||||||
|
< (SLOTS_PER_DATAPATH_TX * num_msdus))) {
|
||||||
|
HIF_ERROR("Source ring full, required %d, available %d",
|
||||||
|
(SLOTS_PER_DATAPATH_TX * num_msdus),
|
||||||
|
CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
|
||||||
|
OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
|
||||||
|
Q_TARGET_ACCESS_END(scn);
|
||||||
|
qdf_spin_unlock_bh(&ce_state->ce_index_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* 2 msdus per packet */
|
/* 2 msdus per packet */
|
||||||
for (i = 0; i < num_msdus; i++) {
|
for (i = 0; i < num_msdus; i++) {
|
||||||
@@ -631,6 +644,7 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Q_TARGET_ACCESS_END(scn);
|
||||||
qdf_spin_unlock_bh(&ce_state->ce_index_lock);
|
qdf_spin_unlock_bh(&ce_state->ce_index_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -641,6 +655,44 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t *msdus,
|
|||||||
ASSERT(i == num_msdus);
|
ASSERT(i == num_msdus);
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
|
||||||
|
* @scn: Handle to HIF context
|
||||||
|
*
|
||||||
|
* Return: true if fastpath is enabled else false.
|
||||||
|
*/
|
||||||
|
static bool ce_is_fastpath_enabled(struct hif_softc *scn)
|
||||||
|
{
|
||||||
|
return scn->fastpath_mode_on;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ce_is_fastpath_handler_registered() - return true for datapath CEs and if
|
||||||
|
* fastpath is enabled.
|
||||||
|
* @ce_state: handle to copy engine
|
||||||
|
*
|
||||||
|
* Return: true if fastpath handler is registered for datapath CE.
|
||||||
|
*/
|
||||||
|
static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
|
||||||
|
{
|
||||||
|
if (ce_state->fastpath_handler)
|
||||||
|
return true;
|
||||||
|
else
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#else
|
||||||
|
static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
#endif /* WLAN_FEATURE_FASTPATH */
|
#endif /* WLAN_FEATURE_FASTPATH */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -675,9 +727,7 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
|
if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
|
||||||
(ce_is_fastpath_enabled((struct hif_opaque_softc *)scn) &&
|
(ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
|
||||||
CE_state->htt_rx_data &&
|
|
||||||
(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0))) {
|
|
||||||
struct CE_dest_desc *dest_ring_base =
|
struct CE_dest_desc *dest_ring_base =
|
||||||
(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
|
(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
|
||||||
struct CE_dest_desc *dest_desc =
|
struct CE_dest_desc *dest_desc =
|
||||||
@@ -1272,42 +1322,17 @@ void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
|
|||||||
|
|
||||||
#endif /*ATH_11AC_TXCOMPACT */
|
#endif /*ATH_11AC_TXCOMPACT */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Number of times to check for any pending tx/rx completion on
|
||||||
|
* a copy engine, this count should be big enough. Once we hit
|
||||||
|
* this threashold we'll not check for any Tx/Rx comlpetion in same
|
||||||
|
* interrupt handling. Note that this threashold is only used for
|
||||||
|
* Rx interrupt processing, this can be used tor Tx as well if we
|
||||||
|
* suspect any infinite loop in checking for pending Tx completion.
|
||||||
|
*/
|
||||||
|
#define CE_TXRX_COMP_CHECK_THRESHOLD 20
|
||||||
|
|
||||||
#ifdef WLAN_FEATURE_FASTPATH
|
#ifdef WLAN_FEATURE_FASTPATH
|
||||||
|
|
||||||
/**
|
|
||||||
* ce_tx_completion() - reap off the CE source ring when CE completion happens
|
|
||||||
* @ce_state: Handle to CE
|
|
||||||
* @num_tx_cmpls: Number of completions handled
|
|
||||||
*
|
|
||||||
* API to reap off the CE source ring when CE completion happens:
|
|
||||||
* Update number of src_ring entries based on number of completions.
|
|
||||||
*
|
|
||||||
* Return: None
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
ce_tx_completion(struct CE_state *ce_state, uint32_t num_tx_cmpls)
|
|
||||||
{
|
|
||||||
struct CE_ring_state *src_ring = ce_state->src_ring;
|
|
||||||
uint32_t nentries_mask = src_ring->nentries_mask;
|
|
||||||
|
|
||||||
ASSERT(num_tx_cmpls);
|
|
||||||
|
|
||||||
qdf_spin_lock(&ce_state->ce_index_lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This locks the index manipulation of this CE with those done
|
|
||||||
* in ce_send_fast().
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Advance the s/w index:
|
|
||||||
* This effectively simulates completing the CE ring descriptors
|
|
||||||
*/
|
|
||||||
src_ring->sw_index = CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
|
|
||||||
num_tx_cmpls);
|
|
||||||
qdf_spin_unlock(&ce_state->ce_index_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
|
* ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
|
||||||
* @ce_state: handle to copy engine state
|
* @ce_state: handle to copy engine state
|
||||||
@@ -1323,28 +1348,23 @@ static void ce_fastpath_rx_handle(struct CE_state *ce_state,
|
|||||||
{
|
{
|
||||||
struct hif_softc *scn = ce_state->scn;
|
struct hif_softc *scn = ce_state->scn;
|
||||||
struct CE_ring_state *dest_ring = ce_state->dest_ring;
|
struct CE_ring_state *dest_ring = ce_state->dest_ring;
|
||||||
struct CE_state *ce_tx_cmpl_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
|
|
||||||
uint32_t nentries_mask = dest_ring->nentries_mask;
|
uint32_t nentries_mask = dest_ring->nentries_mask;
|
||||||
uint32_t tx_cmpls;
|
|
||||||
uint32_t write_index;
|
uint32_t write_index;
|
||||||
|
|
||||||
tx_cmpls = (ce_state->fastpath_handler)(ce_state->context, cmpl_msdus,
|
(ce_state->fastpath_handler)(ce_state->context, cmpl_msdus, num_cmpls);
|
||||||
num_cmpls);
|
|
||||||
|
|
||||||
/* Update Destination Ring Write Index */
|
/* Update Destination Ring Write Index */
|
||||||
write_index = dest_ring->write_index;
|
write_index = dest_ring->write_index;
|
||||||
write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
|
write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
|
||||||
CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
|
CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
|
||||||
dest_ring->write_index = write_index;
|
dest_ring->write_index = write_index;
|
||||||
ce_tx_completion(ce_tx_cmpl_state, tx_cmpls);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MSG_FLUSH_NUM 20
|
#define MSG_FLUSH_NUM 6
|
||||||
/**
|
/**
|
||||||
* ce_per_engine_service_fast() - CE handler routine to service fastpath messages
|
* ce_per_engine_service_fast() - CE handler routine to service fastpath messages
|
||||||
* @scn: hif_context
|
* @scn: hif_context
|
||||||
* @ce_id: COpy engine ID
|
* @ce_id: Copy engine ID
|
||||||
* Function:
|
|
||||||
* 1) Go through the CE ring, and find the completions
|
* 1) Go through the CE ring, and find the completions
|
||||||
* 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
|
* 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
|
||||||
* 3) Unmap buffer & accumulate in an array.
|
* 3) Unmap buffer & accumulate in an array.
|
||||||
@@ -1353,8 +1373,7 @@ static void ce_fastpath_rx_handle(struct CE_state *ce_state,
|
|||||||
* Return: void
|
* Return: void
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int
|
static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
|
||||||
ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
|
|
||||||
{
|
{
|
||||||
struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
|
struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
|
||||||
struct CE_ring_state *dest_ring = ce_state->dest_ring;
|
struct CE_ring_state *dest_ring = ce_state->dest_ring;
|
||||||
@@ -1371,6 +1390,7 @@ ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
|
|||||||
qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
|
qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
|
||||||
uint32_t ctrl_addr = ce_state->ctrl_addr;
|
uint32_t ctrl_addr = ce_state->ctrl_addr;
|
||||||
uint32_t nbuf_cmpl_idx = 0;
|
uint32_t nbuf_cmpl_idx = 0;
|
||||||
|
unsigned int more_comp_cnt = 0;
|
||||||
|
|
||||||
more_data:
|
more_data:
|
||||||
if (ce_int_status == (1 << ce_id)) {
|
if (ce_int_status == (1 << ce_id)) {
|
||||||
@@ -1422,7 +1442,8 @@ more_data:
|
|||||||
*/
|
*/
|
||||||
paddr_lo = QDF_NBUF_CB_PADDR(nbuf);
|
paddr_lo = QDF_NBUF_CB_PADDR(nbuf);
|
||||||
|
|
||||||
OS_SYNC_SINGLE_FOR_CPU(scn->qdf_dev->dev, paddr_lo,
|
qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev,
|
||||||
|
paddr_lo,
|
||||||
(skb_end_pointer(nbuf) - (nbuf)->data),
|
(skb_end_pointer(nbuf) - (nbuf)->data),
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
qdf_nbuf_put_tail(nbuf, nbytes);
|
qdf_nbuf_put_tail(nbuf, nbytes);
|
||||||
@@ -1462,31 +1483,24 @@ more_data:
|
|||||||
CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
|
CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
|
||||||
HOST_IS_COPY_COMPLETE_MASK);
|
HOST_IS_COPY_COMPLETE_MASK);
|
||||||
}
|
}
|
||||||
ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
|
if (ce_recv_entries_done_nolock(scn, ce_state)) {
|
||||||
if (ce_int_status & CE_WATERMARK_MASK)
|
if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
|
||||||
goto more_data;
|
goto more_data;
|
||||||
|
} else {
|
||||||
return QDF_STATUS_SUCCESS;
|
HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
|
||||||
|
__func__, nentries_mask,
|
||||||
|
ce_state->dest_ring->sw_index,
|
||||||
|
CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static int
|
static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
|
||||||
ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
|
|
||||||
{
|
{
|
||||||
return QDF_STATUS_E_FAILURE;
|
|
||||||
}
|
}
|
||||||
#endif /* WLAN_FEATURE_FASTPATH */
|
#endif /* WLAN_FEATURE_FASTPATH */
|
||||||
|
|
||||||
/*
|
|
||||||
* Number of times to check for any pending tx/rx completion on
|
|
||||||
* a copy engine, this count should be big enough. Once we hit
|
|
||||||
* this threashold we'll not check for any Tx/Rx comlpetion in same
|
|
||||||
* interrupt handling. Note that this threashold is only used for
|
|
||||||
* Rx interrupt processing, this can be used tor Tx as well if we
|
|
||||||
* suspect any infinite loop in checking for pending Tx completion.
|
|
||||||
*/
|
|
||||||
#define CE_TXRX_COMP_CHECK_THRESHOLD 20
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Guts of interrupt handler for per-engine interrupts on a particular CE.
|
* Guts of interrupt handler for per-engine interrupts on a particular CE.
|
||||||
*
|
*
|
||||||
@@ -1524,11 +1538,11 @@ int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
|
|||||||
* With below check we make sure CE we are handling is datapath CE and
|
* With below check we make sure CE we are handling is datapath CE and
|
||||||
* fastpath is enabled.
|
* fastpath is enabled.
|
||||||
*/
|
*/
|
||||||
if (ce_is_fastpath_handler_registered(CE_state))
|
if (ce_is_fastpath_handler_registered(CE_state)) {
|
||||||
/* For datapath only Rx CEs */
|
/* For datapath only Rx CEs */
|
||||||
if (!ce_per_engine_service_fast(scn, CE_id)) {
|
ce_per_engine_service_fast(scn, CE_id);
|
||||||
qdf_spin_unlock(&CE_state->ce_index_lock);
|
qdf_spin_unlock(&CE_state->ce_index_lock);
|
||||||
return 0;
|
return CE_state->receive_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear force_break flag and re-initialize receive_count to 0 */
|
/* Clear force_break flag and re-initialize receive_count to 0 */
|
||||||
|
@@ -184,16 +184,4 @@ static inline unsigned char *os_malloc(osdev_t nic_dev,
|
|||||||
#define SET_NETDEV_DEV(ndev, pdev)
|
#define SET_NETDEV_DEV(ndev, pdev)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define OS_SYNC_SINGLE_FOR_CPU(pdev, paddr_lo, len, oprn) \
|
|
||||||
{ \
|
|
||||||
dma_sync_single_for_cpu(pdev, paddr_lo, len, oprn); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define OS_SYNC_SINGLE_FOR_DEVICE(pdev, paddr_lo, len, oprn) \
|
|
||||||
{ \
|
|
||||||
dma_sync_single_for_device(pdev, paddr_lo, len, oprn);\
|
|
||||||
}
|
|
||||||
|
|
||||||
#define SLOTS_PER_TX 2
|
|
||||||
|
|
||||||
#endif /* end of _OSDEP_H */
|
#endif /* end of _OSDEP_H */
|
||||||
|
@@ -276,6 +276,10 @@ void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
|
|||||||
qdf_size_t size,
|
qdf_size_t size,
|
||||||
__dma_data_direction direction);
|
__dma_data_direction direction);
|
||||||
|
|
||||||
|
void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
|
||||||
|
qdf_dma_addr_t bus_addr,
|
||||||
|
qdf_size_t size,
|
||||||
|
__dma_data_direction direction);
|
||||||
/**
|
/**
|
||||||
* qdf_str_len() - returns the length of a string
|
* qdf_str_len() - returns the length of a string
|
||||||
* @str: input string
|
* @str: input string
|
||||||
|
@@ -429,8 +429,7 @@ static inline void qdf_nbuf_free(qdf_nbuf_t buf)
|
|||||||
* put, or NULL if there is not enough room in this buf.
|
* put, or NULL if there is not enough room in this buf.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline void
|
static inline void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
|
||||||
qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
|
|
||||||
{
|
{
|
||||||
atomic_set(&nbuf->users, 1);
|
atomic_set(&nbuf->users, 1);
|
||||||
nbuf->data = nbuf->head + NET_SKB_PAD;
|
nbuf->data = nbuf->head + NET_SKB_PAD;
|
||||||
|
@@ -935,15 +935,15 @@ EXPORT_SYMBOL(qdf_mem_free_consistent);
|
|||||||
* @osdev: OS device handle
|
* @osdev: OS device handle
|
||||||
* @bus_addr: dma address to give to the device
|
* @bus_addr: dma address to give to the device
|
||||||
* @size: Size of the memory block
|
* @size: Size of the memory block
|
||||||
* @direction: direction data will be dma'ed
|
* @direction: direction data will be DMAed
|
||||||
*
|
*
|
||||||
* Assign memory to the remote device.
|
* Assign memory to the remote device.
|
||||||
* The cache lines are flushed to ram or invalidated as needed.
|
* The cache lines are flushed to ram or invalidated as needed.
|
||||||
*
|
*
|
||||||
* Return: none
|
* Return: none
|
||||||
*/
|
*/
|
||||||
inline void
|
void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
|
||||||
qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, qdf_dma_addr_t bus_addr,
|
qdf_dma_addr_t bus_addr,
|
||||||
qdf_size_t size,
|
qdf_size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
@@ -951,3 +951,21 @@ qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, qdf_dma_addr_t bus_addr,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_device);
|
EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_device);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* qdf_mem_dma_sync_single_for_cpu() - assign memory to CPU
|
||||||
|
* @osdev: OS device handle
|
||||||
|
* @bus_addr: dma address to give to the cpu
|
||||||
|
* @size: Size of the memory block
|
||||||
|
* @direction: direction data will be DMAed
|
||||||
|
*
|
||||||
|
* Assign memory to the CPU.
|
||||||
|
*
|
||||||
|
* Return: none
|
||||||
|
*/
|
||||||
|
void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
|
||||||
|
qdf_dma_addr_t bus_addr,
|
||||||
|
qdf_size_t size,
|
||||||
|
enum dma_data_direction direction)
|
||||||
|
{
|
||||||
|
dma_sync_single_for_cpu(osdev->dev, bus_addr, size, direction);
|
||||||
|
}
|
||||||
|
Reference in New Issue
Block a user