From 92f508a5eb7a0faf7a80e95d7918b7ce12f9cdf1 Mon Sep 17 00:00:00 2001 From: Jinwei Chen Date: Thu, 7 May 2020 20:03:51 +0800 Subject: [PATCH] qcacld-3.0: flush batched GRO_NORMAL packets Kernel 5.4 has applied batched GRO_NORMAL packets processing for all napi_gro_receive() users. this requires NAPI users to call napi_complete_done() or napi_complete() at the end of every polling to flush batched GRO_NORMAL packets. However, by current wlan driver design, napi_gro_recevie() is not happened in same NAPI polling context and done in another thread context, it likely packets which have not been flushed from napi->rx_list will get stall until next RX cycle. Fix this by adding a manual flushing of the list right after napi_gro_flush() call to mimic napi_complete() logics. Change-Id: Ib3e851c0822a85c4712a1b817cc19dfecf6d0e7a CRs-Fixed: 2673959 --- core/dp/txrx3.0/dp_rx_thread.c | 16 +++++++++++++++- core/dp/txrx3.0/dp_txrx.h | 17 ++++++++++++++++- core/hdd/src/wlan_hdd_tx_rx.c | 26 ++++++++++++++++++++------ 3 files changed, 51 insertions(+), 8 deletions(-) diff --git a/core/dp/txrx3.0/dp_rx_thread.c b/core/dp/txrx3.0/dp_rx_thread.c index 9edb7e921c..adb4df6096 100644 --- a/core/dp/txrx3.0/dp_rx_thread.c +++ b/core/dp/txrx3.0/dp_rx_thread.c @@ -352,7 +352,7 @@ static void dp_rx_thread_gro_flush(struct dp_rx_thread *rx_thread) dp_debug("flushing packets for thread %u", rx_thread->id); local_bh_disable(); - napi_gro_flush(&rx_thread->napi, false); + dp_rx_napi_gro_flush(&rx_thread->napi); local_bh_enable(); rx_thread->stats.gro_flushes++; @@ -953,3 +953,17 @@ QDF_STATUS dp_rx_tm_set_cpu_mask(struct dp_rx_tm_handle *rx_tm_hdl, } return QDF_STATUS_SUCCESS; } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +void dp_rx_napi_gro_flush(struct napi_struct *napi) +{ + if (napi->poll) { + napi_gro_flush(napi, false); + if (napi->rx_count) { + netif_receive_skb_list(&napi->rx_list); + qdf_init_list_head(&napi->rx_list); + napi->rx_count = 0; + } + } +} +#endif diff --git a/core/dp/txrx3.0/dp_txrx.h b/core/dp/txrx3.0/dp_txrx.h index bbe7ef5c8e..614c06169b 100644 --- a/core/dp/txrx3.0/dp_txrx.h +++ b/core/dp/txrx3.0/dp_txrx.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -46,6 +46,21 @@ struct dp_txrx_handle { struct dp_txrx_config config; }; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +/** + * dp_rx_napi_gro_flush() - do gro flush + * @napi: napi used to do gro flush + * + * if there is RX GRO_NORMAL packets pending in napi + * rx_list, flush them manually right after napi_gro_flush. + * + * return: none + */ +void dp_rx_napi_gro_flush(struct napi_struct *napi); +#else +#define dp_rx_napi_gro_flush(_napi) napi_gro_flush((_napi), false) +#endif + #ifdef FEATURE_WLAN_DP_RX_THREADS /** * dp_txrx_get_cmn_hdl_frm_ext_hdl() - conversion func ext_hdl->txrx_handle_cmn diff --git a/core/hdd/src/wlan_hdd_tx_rx.c b/core/hdd/src/wlan_hdd_tx_rx.c index 6ca550e33d..ffd34117ec 100644 --- a/core/hdd/src/wlan_hdd_tx_rx.c +++ b/core/hdd/src/wlan_hdd_tx_rx.c @@ -1550,6 +1550,20 @@ static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx) } } +/** + * When bus bandwidth is idle, if RX data is delivered with + * napi_gro_receive, to reduce RX delay related with GRO, + * check gro_result returned from napi_gro_receive to determine + * is extra GRO flush still necessary. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#define HDD_IS_EXTRA_GRO_FLUSH_NECESSARY(_gro_ret) \ + ((_gro_ret) != GRO_DROP) +#else +#define HDD_IS_EXTRA_GRO_FLUSH_NECESSARY(_gro_ret) \ + ((_gro_ret) != GRO_DROP && (_gro_ret) != GRO_NORMAL) +#endif + /** * hdd_gro_rx_bh_disable() - GRO RX/flush function. * @napi_to_use: napi to be used to give packets to the stack, gro flush @@ -1569,23 +1583,23 @@ static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter, { QDF_STATUS status = QDF_STATUS_SUCCESS; struct hdd_context *hdd_ctx = adapter->hdd_ctx; - gro_result_t gro_res; + gro_result_t gro_ret; skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4); local_bh_disable(); - gro_res = napi_gro_receive(napi_to_use, skb); + gro_ret = napi_gro_receive(napi_to_use, skb); if (hdd_get_current_throughput_level(hdd_ctx) == PLD_BUS_WIDTH_IDLE) { - if (gro_res != GRO_DROP && gro_res != GRO_NORMAL) { + if (HDD_IS_EXTRA_GRO_FLUSH_NECESSARY(gro_ret)) { adapter->hdd_stats.tx_rx_stats. rx_gro_low_tput_flush++; - napi_gro_flush(napi_to_use, false); + dp_rx_napi_gro_flush(napi_to_use); } } local_bh_enable(); - if (gro_res == GRO_DROP) + if (gro_ret == GRO_DROP) status = QDF_STATUS_E_GRO_DROP; return status; @@ -1688,7 +1702,7 @@ static void hdd_rxthread_napi_gro_flush(void *data) * As we are breaking context in Rxthread mode, there is rx_thread NAPI * corresponds each hif_napi. */ - napi_gro_flush(&qca_napii->rx_thread_napi, false); + dp_rx_napi_gro_flush(&qca_napii->rx_thread_napi); local_bh_enable(); }