瀏覽代碼

qcacld-3.0: Add support for GRO flush indications

Add support for sending GRO flush indication to DP Rx Thread.

CRs-Fixed: 2495719
Change-Id: I8723dc696d8af705067a50dcf5f161ef4f28d485
Mohit Khanna 5 年之前
父節點
當前提交
f0620ce6ca

+ 75 - 7
core/dp/txrx3.0/dp_rx_thread.c

@@ -102,13 +102,14 @@ static void dp_rx_tm_thread_dump_stats(struct dp_rx_thread *rx_thread)
 	if (!total_queued)
 		return;
 
-	dp_info("thread:%u - qlen:%u queued:(total:%u %s) dequeued:%u stack:%u max_len:%u invalid(peer:%u vdev:%u rx-handle:%u others:%u)",
+	dp_info("thread:%u - qlen:%u queued:(total:%u %s) dequeued:%u stack:%u gro_flushes: %u max_len:%u invalid(peer:%u vdev:%u rx-handle:%u others:%u)",
 		rx_thread->id,
 		qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue),
 		total_queued,
 		nbuf_queued_string,
 		rx_thread->stats.nbuf_dequeued,
 		rx_thread->stats.nbuf_sent_to_stack,
+		rx_thread->stats.gro_flushes,
 		rx_thread->stats.nbufq_max_len,
 		rx_thread->stats.dropped_invalid_peer,
 		rx_thread->stats.dropped_invalid_vdev,
@@ -209,12 +210,33 @@ enq_done:
 	if (temp_qlen > rx_thread->stats.nbufq_max_len)
 		rx_thread->stats.nbufq_max_len = temp_qlen;
 
+	dp_debug("enqueue packet thread %pK wait queue %pK qlen %u",
+		 rx_thread, wait_q_ptr,
+		 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue));
+
 	qdf_set_bit(RX_POST_EVENT, &rx_thread->event_flag);
 	qdf_wake_up_interruptible(wait_q_ptr);
 
 	return QDF_STATUS_SUCCESS;
 }
 
+static QDF_STATUS dp_rx_tm_thread_gro_flush_ind(struct dp_rx_thread *rx_thread)
+{
+	struct dp_rx_tm_handle_cmn *tm_handle_cmn;
+	qdf_wait_queue_head_t *wait_q_ptr;
+
+	tm_handle_cmn = rx_thread->rtm_handle_cmn;
+	wait_q_ptr = dp_rx_thread_get_wait_queue(tm_handle_cmn);
+
+	qdf_atomic_set(&rx_thread->gro_flush_ind, 1);
+
+	dp_debug("Flush indication received");
+
+	qdf_set_bit(RX_POST_EVENT, &rx_thread->event_flag);
+	qdf_wake_up_interruptible(wait_q_ptr);
+	return QDF_STATUS_SUCCESS;
+}
+
 /**
  * dp_rx_tm_thread_dequeue() - dequeue nbuf list from rx_thread
  * @rx_thread - rx_thread from which the nbuf needs to be dequeued
@@ -234,6 +256,8 @@ static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
 		qdf_nbuf_set_next(nbuf_list, next_ptr_list);
 		dp_rx_tm_walk_skb_list(nbuf_list);
 	}
+
+	dp_debug("Dequeued %pK nbuf_list", nbuf_list);
 	return nbuf_list;
 }
 
@@ -269,6 +293,9 @@ static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
 		return -EFAULT;
 	}
 
+	dp_debug("enter: qlen  %u",
+		 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue));
+
 	nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
 	while (nbuf_list) {
 		num_list_elements =
@@ -305,6 +332,8 @@ static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
 			qdf_nbuf_list_free(nbuf_list);
 			goto dequeue_rx_thread;
 		}
+		dp_debug("rx_thread %pK sending packet %pK to stack", rx_thread,
+			 nbuf_list);
 		stack_fn(osif_vdev, nbuf_list);
 		rx_thread->stats.nbuf_sent_to_stack += num_list_elements;
 
@@ -312,9 +341,29 @@ dequeue_rx_thread:
 		nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
 	}
 
+	dp_debug("exit: qlen  %u",
+		 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue));
+
 	return 0;
 }
 
+/**
+ * dp_rx_thread_gro_flush() - flush GRO packets for the RX thread
+ * @rx_thread - rx_thread to be processed
+ *
+ * Returns: void
+ */
+static void dp_rx_thread_gro_flush(struct dp_rx_thread *rx_thread)
+{
+	dp_debug("flushing packets for thread %u", rx_thread->id);
+
+	local_bh_disable();
+	napi_gro_flush(&rx_thread->napi, false);
+	local_bh_enable();
+
+	rx_thread->stats.gro_flushes++;
+}
+
 /**
  * dp_rx_thread_sub_loop() - rx thread subloop
  * @rx_thread - rx_thread to be processed
@@ -345,6 +394,11 @@ static int dp_rx_thread_sub_loop(struct dp_rx_thread *rx_thread, bool *shutdown)
 
 		dp_rx_thread_process_nbufq(rx_thread);
 
+		if (qdf_atomic_read(&rx_thread->gro_flush_ind)) {
+			dp_rx_thread_gro_flush(rx_thread);
+			qdf_atomic_set(&rx_thread->gro_flush_ind, 0);
+		}
+
 		if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
 						  &rx_thread->event_flag)) {
 			dp_debug("received suspend ind (%s) id %d pid %d",
@@ -484,6 +538,7 @@ static QDF_STATUS dp_rx_tm_thread_init(struct dp_rx_thread *rx_thread,
 	qdf_event_create(&rx_thread->suspend_event);
 	qdf_event_create(&rx_thread->resume_event);
 	qdf_event_create(&rx_thread->shutdown_event);
+	qdf_atomic_init(&rx_thread->gro_flush_ind);
 	qdf_scnprintf(thread_name, sizeof(thread_name), "dp_rx_thread_%u", id);
 	dp_info("%s %u", thread_name, id);
 
@@ -714,20 +769,19 @@ QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl)
  * dp_rx_tm_select_thread() - select a DP RX thread for a nbuf
  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
  *            infrastructure
- * @nbuf_list: list of nbufs to be enqueued in to the thread
+ * @reo_ring_num: REO ring number corresponding to the thread
  *
- * The function relies on the presence of QDF_NBUF_CB_RX_CTX_ID
- * in the nbuf list. Depending on the RX_CTX (copy engine or reo
+ * The function relies on the presence of QDF_NBUF_CB_RX_CTX_ID passed to it
+ * from the nbuf list. Depending on the RX_CTX (copy engine or reo
  * ring) on which the packet was received, the function selects
  * a corresponding rx_thread.
  *
  * Return: rx thread ID selected for the nbuf
  */
 static uint8_t dp_rx_tm_select_thread(struct dp_rx_tm_handle *rx_tm_hdl,
-				      qdf_nbuf_t nbuf_list)
+				      uint8_t reo_ring_num)
 {
 	uint8_t selected_rx_thread;
-	uint8_t reo_ring_num = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
 
 	if (reo_ring_num >= rx_tm_hdl->num_dp_rx_threads) {
 		dp_err_rl("unexpected ring number");
@@ -736,6 +790,7 @@ static uint8_t dp_rx_tm_select_thread(struct dp_rx_tm_handle *rx_tm_hdl,
 	}
 
 	selected_rx_thread = reo_ring_num;
+	dp_debug("selected thread %u", selected_rx_thread);
 	return selected_rx_thread;
 }
 
@@ -744,12 +799,25 @@ QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
 {
 	uint8_t selected_thread_id;
 
-	selected_thread_id = dp_rx_tm_select_thread(rx_tm_hdl, nbuf_list);
+	selected_thread_id =
+		dp_rx_tm_select_thread(rx_tm_hdl,
+				       QDF_NBUF_CB_RX_CTX_ID(nbuf_list));
 	dp_rx_tm_thread_enqueue(rx_tm_hdl->rx_thread[selected_thread_id],
 				nbuf_list);
 	return QDF_STATUS_SUCCESS;
 }
 
+QDF_STATUS
+dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle *rx_tm_hdl, int rx_ctx_id)
+{
+	uint8_t selected_thread_id;
+
+	selected_thread_id = dp_rx_tm_select_thread(rx_tm_hdl, rx_ctx_id);
+	dp_rx_tm_thread_gro_flush_ind(rx_tm_hdl->rx_thread[selected_thread_id]);
+
+	return QDF_STATUS_SUCCESS;
+}
+
 struct napi_struct *dp_rx_tm_get_napi_context(struct dp_rx_tm_handle *rx_tm_hdl,
 					      uint8_t rx_ctx_id)
 {

+ 13 - 0
core/dp/txrx3.0/dp_rx_thread.h

@@ -50,6 +50,7 @@ struct dp_rx_tm_handle_cmn;
  * @nbuf_dequeued: packets de-queued from the thread
  * @nbuf_sent_to_stack: packets sent to the stack. some dequeued packets may be
  *			dropped due to no peer or vdev, hence this stat.
+ * @gro_flushes: number of GRO flushes
  * @nbufq_max_len: maximum number of nbuf_lists queued for the thread
  * @dropped_invalid_vdev: packets(nbuf_list) dropped due to no vdev
  * @dropped_invalid_peer: packets(nbuf_list) dropped due to no peer
@@ -60,6 +61,7 @@ struct dp_rx_thread_stats {
 	unsigned int nbuf_queued[DP_RX_TM_MAX_REO_RINGS];
 	unsigned int nbuf_dequeued;
 	unsigned int nbuf_sent_to_stack;
+	unsigned int gro_flushes;
 	unsigned int nbufq_max_len;
 	unsigned int dropped_invalid_vdev;
 	unsigned int dropped_invalid_peer;
@@ -92,6 +94,7 @@ struct dp_rx_thread {
 	qdf_event_t suspend_event;
 	qdf_event_t resume_event;
 	qdf_event_t shutdown_event;
+	qdf_atomic_t gro_flush_ind;
 	unsigned long event_flag;
 	qdf_nbuf_queue_head_t nbuf_queue;
 	unsigned long aff_mask;
@@ -160,6 +163,16 @@ QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl);
 QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
 				qdf_nbuf_t nbuf_list);
 
+/**
+ * dp_rx_tm_gro_flush_ind() - flush GRO packets for a RX Context Id
+ * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
+ * @rx_ctx_id: RX Thread Contex Id for which GRO flush needs to be done
+ *
+ * Return: QDF_STATUS_SUCCESS
+ */
+QDF_STATUS dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle *rx_tm_handle,
+				  int rx_ctx_id);
+
 /**
  * dp_rx_tm_suspend() - suspend all threads in RXTI
  * @rx_tm_handle: pointer to dp_rx_tm_handle object

+ 36 - 0
core/dp/txrx3.0/dp_txrx.h

@@ -198,6 +198,36 @@ ret:
 	return qdf_status;
 }
 
+/**
+ * dp_rx_gro_flush_ind() - Flush GRO packets for a given RX CTX Id
+ * @soc: ol_txrx_soc_handle object
+ * @rx_ctx_id: Context Id (Thread for which GRO packets need to be flushed)
+ *
+ * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
+ */
+static inline
+QDF_STATUS dp_rx_gro_flush_ind(ol_txrx_soc_handle soc, int rx_ctx_id)
+{
+	struct dp_txrx_handle *dp_ext_hdl;
+	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
+
+	if (!soc) {
+		qdf_status = QDF_STATUS_E_INVAL;
+		dp_err("invalid input param soc %pK", soc);
+		goto ret;
+	}
+
+	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
+	if (!dp_ext_hdl) {
+		qdf_status = QDF_STATUS_E_FAULT;
+		goto ret;
+	}
+
+	qdf_status = dp_rx_tm_gro_flush_ind(&dp_ext_hdl->rx_tm_hdl, rx_ctx_id);
+ret:
+	return qdf_status;
+}
+
 /**
  * dp_txrx_ext_dump_stats() - dump txrx external module stats
  * @soc: ol_txrx_soc_handle object
@@ -317,6 +347,12 @@ QDF_STATUS dp_rx_enqueue_pkt(ol_txrx_soc_handle soc, qdf_nbuf_t nbuf_list)
 	return QDF_STATUS_SUCCESS;
 }
 
+static inline
+QDF_STATUS dp_rx_gro_flush_ind(ol_txrx_soc_handle soc, int rx_ctx_id)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
 static inline QDF_STATUS dp_txrx_ext_dump_stats(ol_txrx_soc_handle soc,
 						uint8_t stats_id)
 {

+ 1 - 3
core/hdd/inc/wlan_hdd_main.h

@@ -446,13 +446,11 @@ struct hdd_tx_rx_stats {
 	__u32 rx_delivered[NUM_CPUS];
 	__u32 rx_refused[NUM_CPUS];
 	qdf_atomic_t rx_usolict_arp_n_mcast_drp;
+
 	/* rx gro */
 	__u32 rx_aggregated;
 	__u32 rx_gro_dropped;
 	__u32 rx_non_aggregated;
-	__u32 rx_gro_flushes;
-	/* Dynamic GRO disable/enable, flush may be required for UDP GRO */
-	__u32 rx_gro_force_flushes;
 
 	/* txflow stats */
 	bool     is_txflow_paused;

+ 14 - 0
core/hdd/inc/wlan_hdd_tx_rx.h

@@ -116,6 +116,19 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rxBuf);
 QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
 				   struct sk_buff *skb);
 
+/**
+ * hdd_rx_thread_gro_flush_ind_cbk() - receive handler to flush GRO packets
+ * @adapter: pointer to HDD adapter
+ * @rx_ctx_id: RX CTX Id for which flush should happen
+ *
+ * Receive callback registered with DP layer which flushes GRO packets
+ * for a given RX CTX ID (RX Thread)
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS hdd_rx_thread_gro_flush_ind_cbk(void *adapter, int rx_ctx_id);
+
 /**
  * hdd_rx_pkt_thread_enqueue_cbk() - receive pkt handler to enqueue into thread
  * @adapter: pointer to HDD adapter
@@ -123,6 +136,7 @@ QDF_STATUS hdd_rx_deliver_to_stack(struct hdd_adapter *adapter,
  *
  * Receive callback registered with DP layer which enqueues packets into dp rx
  * thread
+ *
  * Return: QDF_STATUS_E_FAILURE if any errors encountered,
  *	   QDF_STATUS_SUCCESS otherwise
  */

+ 2 - 0
core/hdd/src/wlan_hdd_assoc.c

@@ -2135,6 +2135,7 @@ QDF_STATUS hdd_roam_register_sta(struct hdd_adapter *adapter,
 	if (adapter->hdd_ctx->enable_dp_rx_threads) {
 		txrx_ops.rx.rx = hdd_rx_pkt_thread_enqueue_cbk;
 		txrx_ops.rx.rx_stack = hdd_rx_packet_cbk;
+		txrx_ops.rx.rx_gro_flush = hdd_rx_thread_gro_flush_ind_cbk;
 	} else {
 		txrx_ops.rx.rx = hdd_rx_packet_cbk;
 		txrx_ops.rx.rx_stack = NULL;
@@ -4085,6 +4086,7 @@ QDF_STATUS hdd_roam_register_tdlssta(struct hdd_adapter *adapter,
 	if (adapter->hdd_ctx->enable_dp_rx_threads) {
 		txrx_ops.rx.rx = hdd_rx_pkt_thread_enqueue_cbk;
 		txrx_ops.rx.rx_stack = hdd_rx_packet_cbk;
+		txrx_ops.rx.rx_gro_flush = hdd_rx_thread_gro_flush_ind_cbk;
 	} else {
 		txrx_ops.rx.rx = hdd_rx_packet_cbk;
 		txrx_ops.rx.rx_stack = NULL;

+ 1 - 0
core/hdd/src/wlan_hdd_softap_tx_rx.c

@@ -1117,6 +1117,7 @@ QDF_STATUS hdd_softap_register_sta(struct hdd_adapter *adapter,
 	if (adapter->hdd_ctx->enable_dp_rx_threads) {
 		txrx_ops.rx.rx = hdd_rx_pkt_thread_enqueue_cbk;
 		txrx_ops.rx.rx_stack = hdd_softap_rx_packet_cbk;
+		txrx_ops.rx.rx_gro_flush = hdd_rx_thread_gro_flush_ind_cbk;
 	} else {
 		txrx_ops.rx.rx = hdd_softap_rx_packet_cbk;
 		txrx_ops.rx.rx_stack = NULL;

+ 5 - 2
core/hdd/src/wlan_hdd_stats.c

@@ -5795,6 +5795,10 @@ void wlan_hdd_display_txrx_stats(struct hdd_context *ctx)
 		total_rx_delv = 0;
 		total_rx_refused = 0;
 		stats = &adapter->hdd_stats.tx_rx_stats;
+
+		if (adapter->vdev_id == INVAL_VDEV_ID)
+			continue;
+
 		hdd_debug("adapter: %u", adapter->vdev_id);
 		for (; i < NUM_CPUS; i++) {
 			total_rx_pkt += stats->rx_packets[i];
@@ -5814,13 +5818,12 @@ void wlan_hdd_display_txrx_stats(struct hdd_context *ctx)
 				  i, stats->rx_packets[i], stats->rx_dropped[i],
 				  stats->rx_delivered[i], stats->rx_refused[i]);
 		}
-		hdd_debug("RX - packets %u, dropped %u, unsolict_arp_n_mcast_drp %u, delivered %u, refused %u GRO - agg %u drop %u non-agg %u flushes(%u %u) disabled(conc %u low-tput %u)",
+		hdd_debug("RX - packets %u, dropped %u, unsolict_arp_n_mcast_drp %u, delivered %u, refused %u GRO - agg %u drop %u non-agg %u disabled(conc %u low-tput %u)",
 			  total_rx_pkt, total_rx_dropped,
 			  qdf_atomic_read(&stats->rx_usolict_arp_n_mcast_drp),
 			  total_rx_delv,
 			  total_rx_refused, stats->rx_aggregated,
 			  stats->rx_gro_dropped, stats->rx_non_aggregated,
-			  stats->rx_gro_flushes, stats->rx_gro_force_flushes,
 			  qdf_atomic_read(&ctx->disable_rx_ol_in_concurrency),
 			  qdf_atomic_read(&ctx->disable_rx_ol_in_low_tput));
 	}

+ 11 - 7
core/hdd/src/wlan_hdd_tx_rx.c

@@ -1554,22 +1554,16 @@ static QDF_STATUS hdd_gro_rx_bh_disable(struct hdd_adapter *adapter,
 {
 	QDF_STATUS status = QDF_STATUS_SUCCESS;
 	gro_result_t gro_res;
-	bool flush_ind = QDF_NBUF_CB_RX_FLUSH_IND(skb);
 
 	skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
 
 	local_bh_disable();
 	gro_res = napi_gro_receive(napi_to_use, skb);
-	if (flush_ind)
-		napi_gro_flush(napi_to_use, false);
 	local_bh_enable();
 
 	if (gro_res == GRO_DROP)
 		status = QDF_STATUS_E_GRO_DROP;
 
-	if (flush_ind)
-		adapter->hdd_stats.tx_rx_stats.rx_gro_flushes++;
-
 	return status;
 }
 
@@ -1897,10 +1891,20 @@ static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
 }
 #endif
 
+QDF_STATUS hdd_rx_thread_gro_flush_ind_cbk(void *adapter, int rx_ctx_id)
+{
+	if (qdf_unlikely(!adapter)) {
+		hdd_err("Null params being passed");
+		return QDF_STATUS_E_FAILURE;
+	}
+	return dp_rx_gro_flush_ind(cds_get_context(QDF_MODULE_ID_SOC),
+				   rx_ctx_id);
+}
+
 QDF_STATUS hdd_rx_pkt_thread_enqueue_cbk(void *adapter,
 					 qdf_nbuf_t nbuf_list)
 {
-	if (unlikely((!adapter) || (!nbuf_list))) {
+	if (qdf_unlikely(!adapter || !nbuf_list)) {
 		hdd_err("Null params being passed");
 		return QDF_STATUS_E_FAILURE;
 	}

+ 1 - 3
core/hdd/src/wlan_hdd_wext.c

@@ -3115,7 +3115,7 @@ void hdd_wlan_get_stats(struct hdd_adapter *adapter, uint16_t *length,
 			"\n[classified] BK %u, BE %u, VI %u, VO %u"
 			"\n\nReceive[%lu] - "
 			"packets %u, dropped %u, unsolict_arp_n_mcast_drp %u, delivered %u, refused %u\n"
-			"GRO - agg %u non-agg %u flushes(%u %u) disabled(conc %u low-tput %u)\n",
+			"GRO - agg %u non-agg %u disabled(conc %u low-tput %u)\n",
 			qdf_system_ticks(),
 			stats->tx_called,
 			stats->tx_dropped,
@@ -3134,8 +3134,6 @@ void hdd_wlan_get_stats(struct hdd_adapter *adapter, uint16_t *length,
 			total_rx_delv,
 			total_rx_refused,
 			stats->rx_aggregated, stats->rx_non_aggregated,
-			stats->rx_gro_flushes,
-			stats->rx_gro_force_flushes,
 			qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_concurrency),
 			qdf_atomic_read(&hdd_ctx->disable_rx_ol_in_low_tput));