Browse Source

qcacld-3.0: flush batched GRO_NORMAL packets

  Kernel 5.4 has applied batched GRO_NORMAL packets processing
for all napi_gro_receive() users. this requires NAPI users to
call napi_complete_done() or napi_complete() at the end of every
polling to flush batched GRO_NORMAL packets.
However, by current wlan driver design, napi_gro_recevie()
is not happened in same NAPI polling context and done in another
thread context, it likely packets which have not been flushed from
napi->rx_list will get stall until next RX cycle.

Fix this by adding a manual flushing of the list right after
napi_gro_flush() call to mimic napi_complete() logics.

Change-Id: Id8c1b9360c380c9631c85f5e048ae2cd2798f65b
CRs-Fixed: 2673959
Jinwei Chen 4 years ago
parent
commit
3f4590be9a
3 changed files with 50 additions and 7 deletions
  1. 15 1
      core/dp/txrx3.0/dp_rx_thread.c
  2. 15 0
      core/dp/txrx3.0/dp_txrx.h
  3. 20 6
      core/hdd/src/wlan_hdd_tx_rx.c

+ 15 - 1
core/dp/txrx3.0/dp_rx_thread.c

@@ -352,7 +352,7 @@ static void dp_rx_thread_gro_flush(struct dp_rx_thread *rx_thread)
 	dp_debug("flushing packets for thread %u", rx_thread->id);
 
 	local_bh_disable();
-	napi_gro_flush(&rx_thread->napi, false);
+	dp_rx_napi_gro_flush(&rx_thread->napi);
 	local_bh_enable();
 
 	rx_thread->stats.gro_flushes++;
@@ -953,3 +953,17 @@ QDF_STATUS dp_rx_tm_set_cpu_mask(struct dp_rx_tm_handle *rx_tm_hdl,
 	}
 	return QDF_STATUS_SUCCESS;
 }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
+void dp_rx_napi_gro_flush(struct napi_struct *napi)
+{
+	if (napi->poll) {
+		napi_gro_flush(napi, false);
+		if (napi->rx_count) {
+			netif_receive_skb_list(&napi->rx_list);
+			qdf_init_list_head(&napi->rx_list);
+			napi->rx_count = 0;
+		}
+	}
+}
+#endif

+ 15 - 0
core/dp/txrx3.0/dp_txrx.h

@@ -46,6 +46,21 @@ struct dp_txrx_handle {
 	struct dp_txrx_config config;
 };
 
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
+/**
+ * dp_rx_napi_gro_flush() - do gro flush
+ * @napi: napi used to do gro flush
+ *
+ * if there is RX GRO_NORMAL packets pending in napi
+ * rx_list, flush them manually right after napi_gro_flush.
+ *
+ * return: none
+ */
+void dp_rx_napi_gro_flush(struct napi_struct *napi);
+#else
+#define dp_rx_napi_gro_flush(_napi) napi_gro_flush((_napi), false)
+#endif
+
 #ifdef FEATURE_WLAN_DP_RX_THREADS
 /**
  * dp_txrx_get_cmn_hdl_frm_ext_hdl() - conversion func ext_hdl->txrx_handle_cmn

+ 20 - 6
core/hdd/src/wlan_hdd_tx_rx.c

@@ -1544,6 +1544,20 @@ static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx)
 	}
 }
 
+/**
+ * When bus bandwidth is idle, if RX data is delivered with
+ * napi_gro_receive, to reduce RX delay related with GRO,
+ * check gro_result returned from napi_gro_receive to determine
+ * is extra GRO flush still necessary.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
+#define HDD_IS_EXTRA_GRO_FLUSH_NECESSARY(_gro_ret) \
+	((_gro_ret) != GRO_DROP)
+#else
+#define HDD_IS_EXTRA_GRO_FLUSH_NECESSARY(_gro_ret) \
+	((_gro_ret) != GRO_DROP && (_gro_ret) != GRO_NORMAL)
+#endif
+
 /**
  * hdd_gro_rx_bh_disable() - GRO RX/flush function.
  * @napi_to_use: napi to be used to give packets to the stack, gro flush
@@ -1563,23 +1577,23 @@ static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
 {
 	QDF_STATUS status = QDF_STATUS_SUCCESS;
 	struct hdd_context *hdd_ctx = adapter->hdd_ctx;
-	gro_result_t gro_res;
+	gro_result_t gro_ret;
 
 	skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
 
 	local_bh_disable();
-	gro_res = napi_gro_receive(napi_to_use, skb);
+	gro_ret = napi_gro_receive(napi_to_use, skb);
 
 	if (hdd_get_current_throughput_level(hdd_ctx) == PLD_BUS_WIDTH_IDLE) {
-		if (gro_res != GRO_DROP && gro_res != GRO_NORMAL) {
+		if (HDD_IS_EXTRA_GRO_FLUSH_NECESSARY(gro_ret)) {
 			adapter->hdd_stats.tx_rx_stats.
 					rx_gro_low_tput_flush++;
-			napi_gro_flush(napi_to_use, false);
+			dp_rx_napi_gro_flush(napi_to_use);
 		}
 	}
 	local_bh_enable();
 
-	if (gro_res == GRO_DROP)
+	if (gro_ret == GRO_DROP)
 		status = QDF_STATUS_E_GRO_DROP;
 
 	return status;
@@ -1682,7 +1696,7 @@ static void hdd_rxthread_napi_gro_flush(void *data)
 	 * As we are breaking context in Rxthread mode, there is rx_thread NAPI
 	 * corresponds each hif_napi.
 	 */
-	napi_gro_flush(&qca_napii->rx_thread_napi, false);
+	dp_rx_napi_gro_flush(&qca_napii->rx_thread_napi);
 	local_bh_enable();
 }