qcacmn: Add GRO support to NAPI+Rx_thread processing model

GRO aggregations are hanging out of NAPI instance and we cannot use the
hif NAPI instance in Rx thread as gro_list inside NAPI is flushed out
at the start and end of NAPI poll, this will corrupt the gro_list on
which rx_thread is working. Address this concern by creating dummy
NAPI instances mapping to each hif NAPI and by not scheduling them.

Change-Id: I517c4c6158ed3ac073f5f617afde46c7ed07ff3e
CRs-Fixed: 2128457
Este commit está contenido en:
Manjunathappa Prakash
2018-03-28 20:05:56 -07:00
cometido por nshrivas
padre 848fc04585
commit 56023f5649
Se han modificado 7 ficheros con 260 adiciones y 32 borrados

Ver fichero

@@ -191,8 +191,12 @@ struct qca_napi_info {
uint8_t cpu;
int irq;
struct qca_napi_stat stats[NR_CPUS];
#ifdef RECEIVE_OFFLOAD
/* will only be present for data rx CE's */
void (*lro_flush_cb)(void *);
void (*offld_flush_cb)(void *);
struct napi_struct rx_thread_napi;
struct net_device rx_thread_netdev;
#endif /* RECEIVE_OFFLOAD */
qdf_lro_ctx_t lro_ctx;
};
@@ -653,6 +657,28 @@ int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
#endif
void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
u32 *revision, const char **target_name);
#ifdef RECEIVE_OFFLOAD
/**
* hif_offld_flush_cb_register() - Register the offld flush callback
* @scn: HIF opaque context
* @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
* Or GRO/LRO flush when RxThread is not enabled. Called
* with corresponding context for flush.
* Return: None
*/
void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
void (offld_flush_handler)(void *ol_ctx));
/**
* hif_offld_flush_cb_deregister() - deRegister the offld flush callback
* @scn: HIF opaque context
*
* Return: None
*/
void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
#endif
void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
@@ -828,11 +854,6 @@ int ol_copy_ramdump(struct hif_opaque_softc *scn);
void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
u32 *revision, const char **target_name);
void hif_lro_flush_cb_register(struct hif_opaque_softc *hif_ctx,
void (lro_flush_handler)(void *arg),
void *(lro_init_handler)(void));
void hif_lro_flush_cb_deregister(struct hif_opaque_softc *hif_ctx,
void (lro_deinit_cb)(void *arg));
bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *

Ver fichero

@@ -173,8 +173,6 @@ static void ce_tasklet(unsigned long data)
ce_per_engine_service(scn, tasklet_entry->ce_id);
qdf_lro_flush(CE_state->lro_data);
if (ce_check_rx_pending(CE_state)) {
/*
* There are frames pending, schedule tasklet to process them.

Ver fichero

@@ -883,6 +883,25 @@ struct hif_target_info *hif_get_target_info_handle(
qdf_export_symbol(hif_get_target_info_handle);
#ifdef RECEIVE_OFFLOAD
void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
void (offld_flush_handler)(void *))
{
if (hif_napi_enabled(scn, -1))
hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
else
HIF_ERROR("NAPI not enabled\n");
}
qdf_export_symbol(hif_offld_flush_cb_register);
void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
{
if (hif_napi_enabled(scn, -1))
hif_napi_rx_offld_flush_cb_deregister(scn);
else
HIF_ERROR("NAPI not enabled\n");
}
qdf_export_symbol(hif_offld_flush_cb_deregister);
int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
{
if (hif_napi_enabled(hif_hdl, -1))
@@ -890,12 +909,12 @@ int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
else
return ctx_id;
}
#else
#else /* RECEIVE_OFFLOAD */
int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
{
return 0;
}
#endif
#endif /* RECEIVE_OFFLOAD */
#if defined(FEATURE_LRO)

Ver fichero

@@ -63,6 +63,40 @@ enum napi_decision_vector {
};
#define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
#ifdef RECEIVE_OFFLOAD
/**
* hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI
* @napi: Rx_thread NAPI
* @budget: NAPI BUDGET
*
* Return: 0 as it is not supposed to be polled at all as it is not scheduled.
*/
static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget)
{
HIF_ERROR("This napi_poll should not be polled as we don't schedule it");
QDF_ASSERT(0);
return 0;
}
/**
* hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI
* @napii: Handle to napi_info holding rx_thread napi
*
* Return: None
*/
static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
{
init_dummy_netdev(&napii->rx_thread_netdev);
netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi,
hif_rxthread_napi_poll, 64);
napi_enable(&napii->rx_thread_napi);
}
#else /* RECEIVE_OFFLOAD */
static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
{
}
#endif
/**
* hif_napi_create() - creates the NAPI structures for a given CE
* @hif : pointer to hif context
@@ -177,6 +211,7 @@ int hif_napi_create(struct hif_opaque_softc *hif_ctx,
napii->netdev.napi_list.prev,
napii->netdev.napi_list.next);
hif_init_rx_thread_napi(napii);
napii->lro_ctx = qdf_lro_init();
NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n",
i, napii->id, napii->lro_ctx);
@@ -213,6 +248,62 @@ hnc_err:
}
qdf_export_symbol(hif_napi_create);
#ifdef RECEIVE_OFFLOAD
void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
void (offld_flush_handler)(void *))
{
int i;
struct CE_state *ce_state;
struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
struct qca_napi_data *napid;
struct qca_napi_info *napii;
if (!scn) {
HIF_ERROR("%s: hif_state NULL!", __func__);
QDF_ASSERT(0);
return;
}
napid = hif_napi_get_all(hif_hdl);
for (i = 0; i < scn->ce_count; i++) {
ce_state = scn->ce_id_to_state[i];
if (ce_state && (ce_state->htt_rx_data)) {
napii = napid->napis[i];
napii->offld_flush_cb = offld_flush_handler;
HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %p\n",
i, napii->id, napii->offld_flush_cb);
}
}
}
void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
{
int i;
struct CE_state *ce_state;
struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
struct qca_napi_data *napid;
struct qca_napi_info *napii;
if (!scn) {
HIF_ERROR("%s: hif_state NULL!", __func__);
QDF_ASSERT(0);
return;
}
napid = hif_napi_get_all(hif_hdl);
for (i = 0; i < scn->ce_count; i++) {
ce_state = scn->ce_id_to_state[i];
if (ce_state && (ce_state->htt_rx_data)) {
napii = napid->napis[i];
HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n",
i, napii->id, napii->offld_flush_cb);
/* Not required */
napii->offld_flush_cb = NULL;
}
}
}
#endif /* RECEIVE_OFFLOAD */
/**
*
* hif_napi_destroy() - destroys the NAPI structures for a given instance
@@ -318,17 +409,7 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
}
qdf_export_symbol(hif_napi_destroy);
/**
* hif_napi_get_lro_info() - returns the address LRO data for napi_id
* @hif: pointer to hif context
* @napi_id: napi instance
*
* Description:
* Returns the address of the LRO structure
*
* Return:
* <addr>: address of the LRO structure
*/
#ifdef FEATURE_LRO
void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
@@ -342,6 +423,7 @@ void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
return napii->lro_ctx;
return 0;
}
#endif
/**
*
@@ -361,11 +443,11 @@ inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
return &(hif->napi_data);
}
struct napi_struct *hif_get_napi(int napi_id, struct qca_napi_data *napid)
struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid)
{
int id = NAPI_ID2PIPE(napi_id);
return &(napid->napis[id]->napi);
return napid->napis[id];
}
/**
@@ -730,6 +812,24 @@ bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
return right_cpu;
}
#ifdef RECEIVE_OFFLOAD
/**
* hif_napi_offld_flush_cb() - Call upper layer flush callback
* @napi_info: Handle to hif_napi_info
*
* Return: None
*/
static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
{
if (napi_info->offld_flush_cb)
napi_info->offld_flush_cb(napi_info);
}
#else
static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
{
}
#endif
/**
* hif_napi_poll() - NAPI poll routine
* @napi : pointer to NAPI struct as kernel holds it
@@ -785,7 +885,7 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
__func__, rc);
qdf_lro_flush(napi_info->lro_ctx);
hif_napi_offld_flush_cb(napi_info);
/* do not return 0, if there was some work done,
* even if it is below the scale
@@ -834,8 +934,8 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
if (normalized >= budget)
normalized = budget - 1;
/* enable interrupts */
napi_complete(napi);
/* enable interrupts */
hif_napi_enable_irq(hif_ctx, napi_info->id);
/* support suspend/resume */
qdf_atomic_dec(&(hif->active_tasklet_cnt));

Ver fichero

@@ -96,6 +96,37 @@ enum qca_napi_event {
#define NAPI_ID2PIPE(i) ((i)-1)
#define NAPI_PIPE2ID(p) ((p)+1)
#ifdef RECEIVE_OFFLOAD
/**
* hif_napi_rx_offld_flush_cb_register() - Register flush callback for Rx offld
* @hif_hdl: pointer to hif context
* @offld_flush_handler: register offld flush callback
*
* Return: None
*/
void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
void (rx_ol_flush_handler)(void *arg));
/**
* hif_napi_rx_offld_flush_cb_deregister() - Degregister offld flush_cb
* @hif_hdl: pointer to hif context
*
* Return: NONE
*/
void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl);
#endif /* RECEIVE_OFFLOAD */
/**
* hif_napi_get_lro_info() - returns the address LRO data for napi_id
* @hif: pointer to hif context
* @napi_id: napi instance
*
* Description:
* Returns the address of the LRO structure
*
* Return:
* <addr>: address of the LRO structure
*/
void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id);
enum qca_blacklist_op {
@@ -127,11 +158,11 @@ struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif);
/**
* hif_get_napi() - get NAPI corresponding to napi_id
* @napi_id: NAPI instance
* @napi_d: Handle NAPI
* @napid: Handle NAPI
*
* Return: napi corresponding napi_id
*/
struct napi_struct *hif_get_napi(int napi_id, struct qca_napi_data *napid);
struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid);
int hif_napi_event(struct hif_opaque_softc *hif,
enum qca_napi_event event,