Merge "qcacld-3.0: Flush only NAPI listified_rx_normal in low TPUT" into wlan-cld3.driver.lnx.2.0

Tento commit je obsažen v:
CNSS_WLAN Service
2020-09-22 14:47:07 -07:00
odevzdal Gerrit - the friendly Code Review server
5 změnil soubory, kde provedl 68 přidání a 31 odebrání

Zobrazit soubor

@@ -315,7 +315,17 @@ enq_done:
return QDF_STATUS_SUCCESS;
}
static QDF_STATUS dp_rx_tm_thread_gro_flush_ind(struct dp_rx_thread *rx_thread)
/**
* dp_rx_tm_thread_gro_flush_ind() - Rxthread flush ind post
* @rx_thread: rx_thread in which the flush needs to be handled
* @flush_code: flush code to differentiate low TPUT flush
*
* Return: QDF_STATUS_SUCCESS on success or qdf error code on
* failure
*/
static QDF_STATUS
dp_rx_tm_thread_gro_flush_ind(struct dp_rx_thread *rx_thread,
enum dp_rx_gro_flush_code flush_code)
{
struct dp_rx_tm_handle_cmn *tm_handle_cmn;
qdf_wait_queue_head_t *wait_q_ptr;
@@ -323,7 +333,7 @@ static QDF_STATUS dp_rx_tm_thread_gro_flush_ind(struct dp_rx_thread *rx_thread)
tm_handle_cmn = rx_thread->rtm_handle_cmn;
wait_q_ptr = &rx_thread->wait_q;
qdf_atomic_set(&rx_thread->gro_flush_ind, 1);
qdf_atomic_set(&rx_thread->gro_flush_ind, flush_code);
dp_debug("Flush indication received");
@@ -430,16 +440,18 @@ static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
/**
* dp_rx_thread_gro_flush() - flush GRO packets for the RX thread
* @rx_thread - rx_thread to be processed
* @rx_thread: rx_thread to be processed
* @gro_flush_code: flush code to differentiating flushes
*
* Returns: void
* Return: void
*/
static void dp_rx_thread_gro_flush(struct dp_rx_thread *rx_thread)
static void dp_rx_thread_gro_flush(struct dp_rx_thread *rx_thread,
enum dp_rx_gro_flush_code gro_flush_code)
{
dp_debug("flushing packets for thread %u", rx_thread->id);
local_bh_disable();
dp_rx_napi_gro_flush(&rx_thread->napi);
dp_rx_napi_gro_flush(&rx_thread->napi, gro_flush_code);
local_bh_enable();
rx_thread->stats.gro_flushes++;
@@ -459,6 +471,8 @@ static void dp_rx_thread_gro_flush(struct dp_rx_thread *rx_thread)
*/
static int dp_rx_thread_sub_loop(struct dp_rx_thread *rx_thread, bool *shutdown)
{
enum dp_rx_gro_flush_code gro_flush_code;
while (true) {
if (qdf_atomic_test_and_clear_bit(RX_SHUTDOWN_EVENT,
&rx_thread->event_flag)) {
@@ -475,10 +489,12 @@ static int dp_rx_thread_sub_loop(struct dp_rx_thread *rx_thread, bool *shutdown)
dp_rx_thread_process_nbufq(rx_thread);
if (qdf_atomic_read(&rx_thread->gro_flush_ind) |
gro_flush_code = qdf_atomic_read(&rx_thread->gro_flush_ind);
if (gro_flush_code ||
qdf_atomic_test_bit(RX_VDEV_DEL_EVENT,
&rx_thread->event_flag)) {
dp_rx_thread_gro_flush(rx_thread);
dp_rx_thread_gro_flush(rx_thread, gro_flush_code);
qdf_atomic_set(&rx_thread->gro_flush_ind, 0);
}
@@ -1006,12 +1022,14 @@ QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
}
QDF_STATUS
dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle *rx_tm_hdl, int rx_ctx_id)
dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle *rx_tm_hdl, int rx_ctx_id,
enum dp_rx_gro_flush_code flush_code)
{
uint8_t selected_thread_id;
selected_thread_id = dp_rx_tm_select_thread(rx_tm_hdl, rx_ctx_id);
dp_rx_tm_thread_gro_flush_ind(rx_tm_hdl->rx_thread[selected_thread_id]);
dp_rx_tm_thread_gro_flush_ind(rx_tm_hdl->rx_thread[selected_thread_id],
flush_code);
return QDF_STATUS_SUCCESS;
}

Zobrazit soubor

@@ -23,6 +23,7 @@
#include <qdf_event.h>
#include <qdf_threads.h>
#include <wlan_objmgr_vdev_obj.h>
/* Maximum number of REO rings supported (for stats tracking) */
#define DP_RX_TM_MAX_REO_RINGS 4
@@ -139,6 +140,18 @@ struct dp_rx_tm_handle {
qdf_atomic_t allow_dropping;
};
/**
* enum dp_rx_gro_flush_code - enum differentiate different GRO flushes
* @DP_RX_GRO_NOT_FLUSH: not fush indication
* @DP_RX_GRO_NORMAL_FLUSH: Regular full flush
* @DP_RX_GRO_LOW_TPUT_FLUSH: Flush during low tput level
*/
enum dp_rx_gro_flush_code {
DP_RX_GRO_NOT_FLUSH,
DP_RX_GRO_NORMAL_FLUSH,
DP_RX_GRO_LOW_TPUT_FLUSH
};
/**
* dp_rx_tm_init() - initialize DP Rx thread infrastructure
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
@@ -171,11 +184,13 @@ QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
* dp_rx_tm_gro_flush_ind() - flush GRO packets for a RX Context Id
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
* @rx_ctx_id: RX Thread Contex Id for which GRO flush needs to be done
* @flush_code: flush code to differentiate low TPUT flush
*
* Return: QDF_STATUS_SUCCESS
*/
QDF_STATUS dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle *rx_tm_handle,
int rx_ctx_id);
int rx_ctx_id,
enum dp_rx_gro_flush_code flush_code);
/**
* dp_rx_tm_suspend() - suspend all threads in RXTI

Zobrazit soubor

@@ -50,16 +50,21 @@ struct dp_txrx_handle {
/**
* dp_rx_napi_gro_flush() - do gro flush
* @napi: napi used to do gro flush
* @flush_code: flush_code differentiating low_tput_flush and normal_flush
*
* if there is RX GRO_NORMAL packets pending in napi
* rx_list, flush them manually right after napi_gro_flush.
*
* return: none
*/
static inline void dp_rx_napi_gro_flush(struct napi_struct *napi)
static inline void dp_rx_napi_gro_flush(struct napi_struct *napi,
enum dp_rx_gro_flush_code flush_code)
{
if (napi->poll) {
napi_gro_flush(napi, false);
/* Skipping GRO flush in low TPUT */
if (flush_code != DP_RX_GRO_LOW_TPUT_FLUSH)
napi_gro_flush(napi, false);
if (napi->rx_count) {
netif_receive_skb_list(&napi->rx_list);
qdf_init_list_head(&napi->rx_list);
@@ -68,7 +73,7 @@ static inline void dp_rx_napi_gro_flush(struct napi_struct *napi)
}
}
#else
#define dp_rx_napi_gro_flush(_napi) napi_gro_flush((_napi), false)
#define dp_rx_napi_gro_flush(_napi, flush_code) napi_gro_flush((_napi), false)
#endif
#ifdef FEATURE_WLAN_DP_RX_THREADS
@@ -257,11 +262,13 @@ ret:
* dp_rx_gro_flush_ind() - Flush GRO packets for a given RX CTX Id
* @soc: ol_txrx_soc_handle object
* @rx_ctx_id: Context Id (Thread for which GRO packets need to be flushed)
* @flush_code: flush_code differentiating normal_flush from low_tput_flush
*
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
*/
static inline
QDF_STATUS dp_rx_gro_flush_ind(ol_txrx_soc_handle soc, int rx_ctx_id)
QDF_STATUS dp_rx_gro_flush_ind(ol_txrx_soc_handle soc, int rx_ctx_id,
enum dp_rx_gro_flush_code flush_code)
{
struct dp_txrx_handle *dp_ext_hdl;
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
@@ -278,7 +285,8 @@ QDF_STATUS dp_rx_gro_flush_ind(ol_txrx_soc_handle soc, int rx_ctx_id)
goto ret;
}
qdf_status = dp_rx_tm_gro_flush_ind(&dp_ext_hdl->rx_tm_hdl, rx_ctx_id);
qdf_status = dp_rx_tm_gro_flush_ind(&dp_ext_hdl->rx_tm_hdl, rx_ctx_id,
flush_code);
ret:
return qdf_status;
}
@@ -409,7 +417,8 @@ QDF_STATUS dp_rx_enqueue_pkt(ol_txrx_soc_handle soc, qdf_nbuf_t nbuf_list)
}
static inline
QDF_STATUS dp_rx_gro_flush_ind(ol_txrx_soc_handle soc, int rx_ctx_id)
QDF_STATUS dp_rx_gro_flush_ind(ol_txrx_soc_handle soc, int rx_ctx_id,
enum dp_rx_gro_flush_code flush_code)
{
return QDF_STATUS_SUCCESS;
}

Zobrazit soubor

@@ -9595,16 +9595,8 @@ static inline void hdd_pm_qos_update_request(struct hdd_context *hdd_ctx,
}
#endif /* CLD_PM_QOS */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
static inline void hdd_low_tput_gro_flush_skip_check(
struct hdd_context *hdd_ctx,
enum pld_bus_width_type next_vote_level)
{
}
#else
/**
* hdd_low_tput_gro_flush_skip_check() - check GRO flush skip condition
*
* @hdd_ctx: handle to hdd context
* @next_vote_level: next bus bandwidth level
*
@@ -9628,7 +9620,6 @@ static inline void hdd_low_tput_gro_flush_skip_check(
qdf_atomic_set(&hdd_ctx->low_tput_gro_enable, 0);
}
}
#endif
/**
* hdd_pld_request_bus_bandwidth() - Function to control bus bandwidth

Zobrazit soubor

@@ -1666,7 +1666,8 @@ static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
if (HDD_IS_EXTRA_GRO_FLUSH_NECESSARY(gro_ret)) {
adapter->hdd_stats.tx_rx_stats.
rx_gro_low_tput_flush++;
dp_rx_napi_gro_flush(napi_to_use);
dp_rx_napi_gro_flush(napi_to_use,
DP_RX_GRO_NORMAL_FLUSH);
}
if (!rx_aggregation)
hdd_ctx->dp_agg_param.gro_force_flush[rx_ctx_id] = 1;
@@ -1712,7 +1713,8 @@ static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
if (hdd_get_current_throughput_level(hdd_ctx) == PLD_BUS_WIDTH_IDLE) {
if (HDD_IS_EXTRA_GRO_FLUSH_NECESSARY(gro_ret)) {
adapter->hdd_stats.tx_rx_stats.rx_gro_low_tput_flush++;
dp_rx_napi_gro_flush(napi_to_use);
dp_rx_napi_gro_flush(napi_to_use,
DP_RX_GRO_NORMAL_FLUSH);
}
}
local_bh_enable();
@@ -1821,7 +1823,8 @@ static void hdd_rxthread_napi_gro_flush(void *data)
* As we are breaking context in Rxthread mode, there is rx_thread NAPI
* corresponds each hif_napi.
*/
dp_rx_napi_gro_flush(&qca_napii->rx_thread_napi);
dp_rx_napi_gro_flush(&qca_napii->rx_thread_napi,
DP_RX_GRO_NORMAL_FLUSH);
local_bh_enable();
}
@@ -2050,6 +2053,7 @@ static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
QDF_STATUS hdd_rx_thread_gro_flush_ind_cbk(void *adapter, int rx_ctx_id)
{
struct hdd_adapter *hdd_adapter = adapter;
enum dp_rx_gro_flush_code gro_flush_code = DP_RX_GRO_NORMAL_FLUSH;
if (qdf_unlikely((!hdd_adapter) || (!hdd_adapter->hdd_ctx))) {
hdd_err("Null params being passed");
@@ -2058,11 +2062,11 @@ QDF_STATUS hdd_rx_thread_gro_flush_ind_cbk(void *adapter, int rx_ctx_id)
if (hdd_is_low_tput_gro_enable(hdd_adapter->hdd_ctx)) {
hdd_adapter->hdd_stats.tx_rx_stats.rx_gro_flush_skip++;
return QDF_STATUS_SUCCESS;
gro_flush_code = DP_RX_GRO_LOW_TPUT_FLUSH;
}
return dp_rx_gro_flush_ind(cds_get_context(QDF_MODULE_ID_SOC),
rx_ctx_id);
rx_ctx_id, gro_flush_code);
}
QDF_STATUS hdd_rx_pkt_thread_enqueue_cbk(void *adapter,