qcacmn: Add GRO support to NAPI+Rx_thread processing model

GRO aggregations are hanging out of NAPI instance and we cannot use the
hif NAPI instance in Rx thread as gro_list inside NAPI is flushed out
at the start and end of NAPI poll, this will corrupt the gro_list on
which rx_thread is working. Address this concern by creating dummy
NAPI instances mapping to each hif NAPI and by not scheduling them.

Change-Id: I517c4c6158ed3ac073f5f617afde46c7ed07ff3e
CRs-Fixed: 2128457
This commit is contained in:
Manjunathappa Prakash
2018-03-28 20:05:56 -07:00
committed by nshrivas
parent 848fc04585
commit 56023f5649
7 changed files with 260 additions and 32 deletions

View File

@@ -1566,4 +1566,49 @@ cdp_peer_map_attach(ol_txrx_soc_handle soc, uint32_t max_peers)
soc->ops->cmn_drv_ops->txrx_peer_map_attach)
soc->ops->cmn_drv_ops->txrx_peer_map_attach(soc, max_peers);
}
#ifdef RECEIVE_OFFLOAD
/**
* cdp_register_rx_offld_flush_cb() - register LRO/GRO flush cb function pointer
* @soc - data path soc handle
* @pdev - device instance pointer
*
* register rx offload flush callback function pointer
*
* return none
*/
static inline void cdp_register_rx_offld_flush_cb(ol_txrx_soc_handle soc,
void (rx_ol_flush_cb)(void *))
{
if (!soc || !soc->ops || !soc->ops->rx_offld_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->rx_offld_ops->register_rx_offld_flush_cb)
return soc->ops->rx_offld_ops->register_rx_offld_flush_cb(
rx_ol_flush_cb);
}
/**
* cdp_deregister_rx_offld_flush_cb() - deregister Rx offld flush cb function
* @soc - data path soc handle
*
* deregister rx offload flush callback function pointer
*
* return none
*/
static inline void cdp_deregister_rx_offld_flush_cb(ol_txrx_soc_handle soc)
{
if (!soc || !soc->ops || !soc->ops->rx_offld_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb)
return soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb();
}
#endif /* RECEIVE_OFFLOAD */
#endif /* _CDP_TXRX_CMN_H_ */

View File

@@ -685,11 +685,11 @@ struct cdp_pflow_ops {
#define LRO_IPV6_SEED_ARR_SZ 11
/**
* struct cdp_lro_config - set LRO init parameters
* @lro_enable: indicates whether lro is enabled
* struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
* @lro_enable: indicates whether rx_offld is enabled
* @tcp_flag: If the TCP flags from the packet do not match
* the values in this field after masking with TCP flags mask
* below, packet is not LRO eligible
* below, packet is not rx_offld eligible
* @tcp_flag_mask: field for comparing the TCP values provided
* above with the TCP flags field in the received packet
* @toeplitz_hash_ipv4: contains seed needed to compute the flow id
@@ -727,7 +727,7 @@ struct ol_if_ops {
void (*peer_del_wds_entry)(void *ol_soc_handle,
uint8_t *wds_macaddr);
QDF_STATUS (*lro_hash_config)(void *scn_handle,
struct cdp_lro_hash_config *lro_hash);
struct cdp_lro_hash_config *rx_offld_hash);
void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
uint8_t type);
uint8_t (*rx_invalid_peer)(void *osif_pdev, void *msg);
@@ -1088,6 +1088,18 @@ struct cdp_mob_stats_ops {
};
#endif /* CONFIG_WIN */
#ifdef RECEIVE_OFFLOAD
/**
* struct cdp_rx_offld_ops - mcl receive offload ops
* @register_rx_offld_flush_cb:
* @deregister_rx_offld_flush_cb:
*/
struct cdp_rx_offld_ops {
void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
void (*deregister_rx_offld_flush_cb)(void);
};
#endif
struct cdp_ops {
struct cdp_cmn_ops *cmn_drv_ops;
struct cdp_ctrl_ops *ctrl_ops;
@@ -1104,6 +1116,9 @@ struct cdp_ops {
struct cdp_lflowctl_ops *l_flowctl_ops;
#ifdef IPA_OFFLOAD
struct cdp_ipa_ops *ipa_ops;
#endif
#ifdef RECEIVE_OFFLOAD
struct cdp_rx_offld_ops *rx_offld_ops;
#endif
struct cdp_bus_ops *bus_ops;
struct cdp_ocb_ops *ocb_ops;
@@ -1114,5 +1129,4 @@ struct cdp_ops {
struct cdp_pmf_ops *pmf_ops;
#endif /* CONFIG_WIN */
};
#endif

View File

@@ -191,8 +191,12 @@ struct qca_napi_info {
uint8_t cpu;
int irq;
struct qca_napi_stat stats[NR_CPUS];
#ifdef RECEIVE_OFFLOAD
/* will only be present for data rx CE's */
void (*lro_flush_cb)(void *);
void (*offld_flush_cb)(void *);
struct napi_struct rx_thread_napi;
struct net_device rx_thread_netdev;
#endif /* RECEIVE_OFFLOAD */
qdf_lro_ctx_t lro_ctx;
};
@@ -653,6 +657,28 @@ int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
#endif
void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
u32 *revision, const char **target_name);
#ifdef RECEIVE_OFFLOAD
/**
* hif_offld_flush_cb_register() - Register the offld flush callback
* @scn: HIF opaque context
* @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
* Or GRO/LRO flush when RxThread is not enabled. Called
* with corresponding context for flush.
* Return: None
*/
void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
void (offld_flush_handler)(void *ol_ctx));
/**
* hif_offld_flush_cb_deregister() - deRegister the offld flush callback
* @scn: HIF opaque context
*
* Return: None
*/
void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
#endif
void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
@@ -828,11 +854,6 @@ int ol_copy_ramdump(struct hif_opaque_softc *scn);
void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
u32 *revision, const char **target_name);
void hif_lro_flush_cb_register(struct hif_opaque_softc *hif_ctx,
void (lro_flush_handler)(void *arg),
void *(lro_init_handler)(void));
void hif_lro_flush_cb_deregister(struct hif_opaque_softc *hif_ctx,
void (lro_deinit_cb)(void *arg));
bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *

View File

@@ -173,8 +173,6 @@ static void ce_tasklet(unsigned long data)
ce_per_engine_service(scn, tasklet_entry->ce_id);
qdf_lro_flush(CE_state->lro_data);
if (ce_check_rx_pending(CE_state)) {
/*
* There are frames pending, schedule tasklet to process them.

View File

@@ -883,6 +883,25 @@ struct hif_target_info *hif_get_target_info_handle(
qdf_export_symbol(hif_get_target_info_handle);
#ifdef RECEIVE_OFFLOAD
void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
void (offld_flush_handler)(void *))
{
if (hif_napi_enabled(scn, -1))
hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
else
HIF_ERROR("NAPI not enabled\n");
}
qdf_export_symbol(hif_offld_flush_cb_register);
void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
{
if (hif_napi_enabled(scn, -1))
hif_napi_rx_offld_flush_cb_deregister(scn);
else
HIF_ERROR("NAPI not enabled\n");
}
qdf_export_symbol(hif_offld_flush_cb_deregister);
int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
{
if (hif_napi_enabled(hif_hdl, -1))
@@ -890,12 +909,12 @@ int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
else
return ctx_id;
}
#else
#else /* RECEIVE_OFFLOAD */
int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
{
return 0;
}
#endif
#endif /* RECEIVE_OFFLOAD */
#if defined(FEATURE_LRO)

View File

@@ -63,6 +63,40 @@ enum napi_decision_vector {
};
#define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
#ifdef RECEIVE_OFFLOAD
/**
* hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI
* @napi: Rx_thread NAPI
* @budget: NAPI BUDGET
*
* Return: 0 as it is not supposed to be polled at all as it is not scheduled.
*/
static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget)
{
HIF_ERROR("This napi_poll should not be polled as we don't schedule it");
QDF_ASSERT(0);
return 0;
}
/**
* hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI
* @napii: Handle to napi_info holding rx_thread napi
*
* Return: None
*/
static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
{
init_dummy_netdev(&napii->rx_thread_netdev);
netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi,
hif_rxthread_napi_poll, 64);
napi_enable(&napii->rx_thread_napi);
}
#else /* RECEIVE_OFFLOAD */
static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
{
}
#endif
/**
* hif_napi_create() - creates the NAPI structures for a given CE
* @hif : pointer to hif context
@@ -177,6 +211,7 @@ int hif_napi_create(struct hif_opaque_softc *hif_ctx,
napii->netdev.napi_list.prev,
napii->netdev.napi_list.next);
hif_init_rx_thread_napi(napii);
napii->lro_ctx = qdf_lro_init();
NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n",
i, napii->id, napii->lro_ctx);
@@ -213,6 +248,62 @@ hnc_err:
}
qdf_export_symbol(hif_napi_create);
#ifdef RECEIVE_OFFLOAD
void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
void (offld_flush_handler)(void *))
{
int i;
struct CE_state *ce_state;
struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
struct qca_napi_data *napid;
struct qca_napi_info *napii;
if (!scn) {
HIF_ERROR("%s: hif_state NULL!", __func__);
QDF_ASSERT(0);
return;
}
napid = hif_napi_get_all(hif_hdl);
for (i = 0; i < scn->ce_count; i++) {
ce_state = scn->ce_id_to_state[i];
if (ce_state && (ce_state->htt_rx_data)) {
napii = napid->napis[i];
napii->offld_flush_cb = offld_flush_handler;
HIF_DBG("Registering offload for ce_id %d NAPI callback for %d flush_cb %p\n",
i, napii->id, napii->offld_flush_cb);
}
}
}
void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
{
int i;
struct CE_state *ce_state;
struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
struct qca_napi_data *napid;
struct qca_napi_info *napii;
if (!scn) {
HIF_ERROR("%s: hif_state NULL!", __func__);
QDF_ASSERT(0);
return;
}
napid = hif_napi_get_all(hif_hdl);
for (i = 0; i < scn->ce_count; i++) {
ce_state = scn->ce_id_to_state[i];
if (ce_state && (ce_state->htt_rx_data)) {
napii = napid->napis[i];
HIF_DBG("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK\n",
i, napii->id, napii->offld_flush_cb);
/* Not required */
napii->offld_flush_cb = NULL;
}
}
}
#endif /* RECEIVE_OFFLOAD */
/**
*
* hif_napi_destroy() - destroys the NAPI structures for a given instance
@@ -318,17 +409,7 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
}
qdf_export_symbol(hif_napi_destroy);
/**
* hif_napi_get_lro_info() - returns the address LRO data for napi_id
* @hif: pointer to hif context
* @napi_id: napi instance
*
* Description:
* Returns the address of the LRO structure
*
* Return:
* <addr>: address of the LRO structure
*/
#ifdef FEATURE_LRO
void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
@@ -342,6 +423,7 @@ void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
return napii->lro_ctx;
return 0;
}
#endif
/**
*
@@ -361,11 +443,11 @@ inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
return &(hif->napi_data);
}
struct napi_struct *hif_get_napi(int napi_id, struct qca_napi_data *napid)
struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid)
{
int id = NAPI_ID2PIPE(napi_id);
return &(napid->napis[id]->napi);
return napid->napis[id];
}
/**
@@ -730,6 +812,24 @@ bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
return right_cpu;
}
#ifdef RECEIVE_OFFLOAD
/**
* hif_napi_offld_flush_cb() - Call upper layer flush callback
* @napi_info: Handle to hif_napi_info
*
* Return: None
*/
static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
{
if (napi_info->offld_flush_cb)
napi_info->offld_flush_cb(napi_info);
}
#else
static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
{
}
#endif
/**
* hif_napi_poll() - NAPI poll routine
* @napi : pointer to NAPI struct as kernel holds it
@@ -785,7 +885,7 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
__func__, rc);
qdf_lro_flush(napi_info->lro_ctx);
hif_napi_offld_flush_cb(napi_info);
/* do not return 0, if there was some work done,
* even if it is below the scale
@@ -834,8 +934,8 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
if (normalized >= budget)
normalized = budget - 1;
/* enable interrupts */
napi_complete(napi);
/* enable interrupts */
hif_napi_enable_irq(hif_ctx, napi_info->id);
/* support suspend/resume */
qdf_atomic_dec(&(hif->active_tasklet_cnt));

View File

@@ -96,6 +96,37 @@ enum qca_napi_event {
#define NAPI_ID2PIPE(i) ((i)-1)
#define NAPI_PIPE2ID(p) ((p)+1)
#ifdef RECEIVE_OFFLOAD
/**
* hif_napi_rx_offld_flush_cb_register() - Register flush callback for Rx offld
* @hif_hdl: pointer to hif context
* @offld_flush_handler: register offld flush callback
*
* Return: None
*/
void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
void (rx_ol_flush_handler)(void *arg));
/**
* hif_napi_rx_offld_flush_cb_deregister() - Degregister offld flush_cb
* @hif_hdl: pointer to hif context
*
* Return: NONE
*/
void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl);
#endif /* RECEIVE_OFFLOAD */
/**
* hif_napi_get_lro_info() - returns the address LRO data for napi_id
* @hif: pointer to hif context
* @napi_id: napi instance
*
* Description:
* Returns the address of the LRO structure
*
* Return:
* <addr>: address of the LRO structure
*/
void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id);
enum qca_blacklist_op {
@@ -127,11 +158,11 @@ struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif);
/**
* hif_get_napi() - get NAPI corresponding to napi_id
* @napi_id: NAPI instance
* @napi_d: Handle NAPI
* @napid: Handle NAPI
*
* Return: napi corresponding napi_id
*/
struct napi_struct *hif_get_napi(int napi_id, struct qca_napi_data *napid);
struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid);
int hif_napi_event(struct hif_opaque_softc *hif,
enum qca_napi_event event,