Prechádzať zdrojové kódy

qcacld-3.0: Add support to flush rx packets for a vdev

When a particular vdev is deleted, the corresponding rx
packets which have been queued to the rx thread are not
flushed. Hence when such packets are submitted to the
network stack, the dev for this skb will be invalid,
since we have already freed the adapter.

Flush out the packets in the rx thread queues, before
deleting the vdev.

CRs-Fixed: 2552140
Change-Id: Ia49af2c203c64077f7fd87524bb4caa4060e0044
Rakesh Pillai 5 rokov pred
rodič
commit
246f1dfdd2

+ 119 - 18
core/dp/txrx3.0/dp_rx_thread.c

@@ -102,7 +102,7 @@ static void dp_rx_tm_thread_dump_stats(struct dp_rx_thread *rx_thread)
 	if (!total_queued)
 		return;
 
-	dp_info("thread:%u - qlen:%u queued:(total:%u %s) dequeued:%u stack:%u gro_flushes: %u max_len:%u invalid(peer:%u vdev:%u rx-handle:%u others:%u)",
+	dp_info("thread:%u - qlen:%u queued:(total:%u %s) dequeued:%u stack:%u gro_flushes: %u rx_flushes: %u max_len:%u invalid(peer:%u vdev:%u rx-handle:%u others:%u)",
 		rx_thread->id,
 		qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue),
 		total_queued,
@@ -110,6 +110,7 @@ static void dp_rx_tm_thread_dump_stats(struct dp_rx_thread *rx_thread)
 		rx_thread->stats.nbuf_dequeued,
 		rx_thread->stats.nbuf_sent_to_stack,
 		rx_thread->stats.gro_flushes,
+		rx_thread->stats.rx_flushed,
 		rx_thread->stats.nbufq_max_len,
 		rx_thread->stats.dropped_invalid_peer,
 		rx_thread->stats.dropped_invalid_vdev,
@@ -238,16 +239,15 @@ static QDF_STATUS dp_rx_tm_thread_gro_flush_ind(struct dp_rx_thread *rx_thread)
 }
 
 /**
- * dp_rx_tm_thread_dequeue() - dequeue nbuf list from rx_thread
- * @rx_thread - rx_thread from which the nbuf needs to be dequeued
+ * dp_rx_thread_adjust_nbuf_list() - create an nbuf list from the frag list
+ * @head - nbuf list to be created
  *
- * Returns: nbuf or nbuf_list dequeued from rx_thread
+ * Returns: void
  */
-static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
+static void dp_rx_thread_adjust_nbuf_list(qdf_nbuf_t head)
 {
-	qdf_nbuf_t head, next_ptr_list, nbuf_list;
+	qdf_nbuf_t next_ptr_list, nbuf_list;
 
-	head = qdf_nbuf_queue_head_dequeue(&rx_thread->nbuf_queue);
 	nbuf_list = head;
 	if (head && QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) > 1) {
 		/* move ext list to ->next pointer */
@@ -256,9 +256,58 @@ static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
 		qdf_nbuf_set_next(nbuf_list, next_ptr_list);
 		dp_rx_tm_walk_skb_list(nbuf_list);
 	}
+}
+
+/**
+ * dp_rx_tm_thread_dequeue() - dequeue nbuf list from rx_thread
+ * @rx_thread - rx_thread from which the nbuf needs to be dequeued
+ *
+ * Returns: nbuf or nbuf_list dequeued from rx_thread
+ */
+static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
+{
+	qdf_nbuf_t head;
+
+	head = qdf_nbuf_queue_head_dequeue(&rx_thread->nbuf_queue);
+	dp_rx_thread_adjust_nbuf_list(head);
 
-	dp_debug("Dequeued %pK nbuf_list", nbuf_list);
-	return nbuf_list;
+	dp_debug("Dequeued %pK nbuf_list", head);
+	return head;
+}
+
+/**
+ * dp_rx_thread_get_nbuf_vdev_handle() - get vdev handle from nbuf
+ *			                 dequeued from rx thread
+ * @soc: soc handle
+ * @pdev: pdev handle
+ * @rx_thread: rx_thread whose nbuf was dequeued
+ * @nbuf_list: nbuf list dequeued from rx_thread
+ *
+ * Returns: vdev handle on Success, NULL on failure
+ */
+static struct cdp_vdev *
+dp_rx_thread_get_nbuf_vdev_handle(ol_txrx_soc_handle soc,
+				  struct cdp_pdev *pdev,
+				  struct dp_rx_thread *rx_thread,
+				  qdf_nbuf_t nbuf_list)
+{
+	uint32_t num_list_elements = 0;
+	struct cdp_vdev *vdev;
+	uint8_t vdev_id;
+
+	vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf_list);
+	vdev = cdp_get_vdev_from_vdev_id(soc, pdev, vdev_id);
+	if (!vdev) {
+		num_list_elements =
+			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
+		rx_thread->stats.dropped_invalid_vdev +=
+						num_list_elements;
+		dp_err("vdev not found for vdev_id %u!, pkt dropped",
+		       vdev_id);
+		return NULL;
+	}
+
+	return vdev;
 }
 
 /**
@@ -271,7 +320,6 @@ static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
 {
 	qdf_nbuf_t nbuf_list;
 	struct cdp_vdev *vdev;
-	uint8_t vdev_id;
 	ol_txrx_rx_fp stack_fn;
 	ol_osif_vdev_handle osif_vdev;
 	ol_txrx_soc_handle soc;
@@ -301,18 +349,12 @@ static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
 		rx_thread->stats.nbuf_dequeued += num_list_elements;
 
-		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf_list);
-
-		vdev = cdp_get_vdev_from_vdev_id(soc, pdev, vdev_id);
+		vdev = dp_rx_thread_get_nbuf_vdev_handle(soc, pdev, rx_thread,
+							 nbuf_list);
 		if (!vdev) {
-			rx_thread->stats.dropped_invalid_vdev +=
-							num_list_elements;
-			dp_err("vdev not found for vdev_id %u!, pkt dropped",
-			       vdev_id);
 			qdf_nbuf_list_free(nbuf_list);
 			goto dequeue_rx_thread;
 		}
-
 		cdp_get_os_rx_handles_from_vdev(soc, vdev, &stack_fn,
 						&osif_vdev);
 		if (!stack_fn || !osif_vdev) {
@@ -672,6 +714,65 @@ QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_hdl)
 	return QDF_STATUS_SUCCESS;
 }
 
+/**
+ * dp_rx_thread_flush_by_vdev_id() - flush rx packets by vdev_id in
+				     a particular rx thread queue
+ * @rx_thread - rx_thread pointer of the queue from which packets are
+ *              to be flushed out
+ * @vdev_id: vdev id for which packets are to be flushed
+ *
+ * Return: void
+ */
+static inline
+void dp_rx_thread_flush_by_vdev_id(struct dp_rx_thread *rx_thread,
+				   uint8_t vdev_id)
+{
+	qdf_nbuf_t nbuf_list, tmp_nbuf_list;
+	uint32_t num_list_elements = 0;
+
+	qdf_nbuf_queue_head_lock(&rx_thread->nbuf_queue);
+	QDF_NBUF_QUEUE_WALK_SAFE(&rx_thread->nbuf_queue, nbuf_list,
+				 tmp_nbuf_list) {
+		if (QDF_NBUF_CB_RX_VDEV_ID(nbuf_list) == vdev_id) {
+			qdf_nbuf_unlink_no_lock(nbuf_list,
+						&rx_thread->nbuf_queue);
+			dp_rx_thread_adjust_nbuf_list(nbuf_list);
+			num_list_elements =
+				QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
+			rx_thread->stats.rx_flushed += num_list_elements;
+			qdf_nbuf_list_free(nbuf_list);
+		}
+	}
+	qdf_nbuf_queue_head_unlock(&rx_thread->nbuf_queue);
+}
+
+/**
+ * dp_rx_tm_flush_by_vdev_id() - flush rx packets by vdev_id in all
+				 rx thread queues
+ * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
+ *             infrastructure
+ * @vdev_id: vdev id for which packets are to be flushed
+ *
+ * Return: QDF_STATUS_SUCCESS
+ */
+QDF_STATUS dp_rx_tm_flush_by_vdev_id(struct dp_rx_tm_handle *rx_tm_hdl,
+				     uint8_t vdev_id)
+{
+	struct dp_rx_thread *rx_thread;
+	int i;
+
+	for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
+		rx_thread = rx_tm_hdl->rx_thread[i];
+		if (!rx_thread)
+			continue;
+
+		dp_debug("thread %d", i);
+		dp_rx_thread_flush_by_vdev_id(rx_thread, vdev_id);
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
 /**
  * dp_rx_tm_resume() - resume DP RX threads
  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread

+ 14 - 0
core/dp/txrx3.0/dp_rx_thread.h

@@ -46,6 +46,7 @@ struct dp_rx_tm_handle_cmn;
  * @gro_flushes: number of GRO flushes
  * @nbufq_max_len: maximum number of nbuf_lists queued for the thread
  * @dropped_invalid_vdev: packets(nbuf_list) dropped due to no vdev
+ * @rx_flushed: packets flushed after vdev delete
  * @dropped_invalid_peer: packets(nbuf_list) dropped due to no peer
  * @dropped_others: packets dropped due to other reasons
 
@@ -57,6 +58,7 @@ struct dp_rx_thread_stats {
 	unsigned int gro_flushes;
 	unsigned int nbufq_max_len;
 	unsigned int dropped_invalid_vdev;
+	unsigned int rx_flushed;
 	unsigned int dropped_invalid_peer;
 	unsigned int dropped_invalid_os_rx_handles;
 	unsigned int dropped_others;
@@ -171,6 +173,18 @@ QDF_STATUS dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle *rx_tm_handle,
  */
 QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_handle);
 
+/**
+ * dp_rx_tm_flush_by_vdev_id() - flush rx packets by vdev_id in all
+				 rx thread queues
+ * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
+ *             infrastructure
+ * @vdev_id: vdev id for which packets are to be flushed
+ *
+ * Return: QDF_STATUS_SUCCESS
+ */
+QDF_STATUS dp_rx_tm_flush_by_vdev_id(struct dp_rx_tm_handle *rx_tm_hdl,
+				     uint8_t vdev_id);
+
 /**
  * dp_rx_tm_resume() - resume all threads in RXTI
  * @rx_tm_handle: pointer to dp_rx_tm_handle object

+ 35 - 0
core/dp/txrx3.0/dp_txrx.h

@@ -110,6 +110,35 @@ QDF_STATUS dp_txrx_init(ol_txrx_soc_handle soc, uint8_t pdev_id,
  */
 QDF_STATUS dp_txrx_deinit(ol_txrx_soc_handle soc);
 
+/**
+ * dp_txrx_flush_pkts_by_vdev_id() - flush rx packets for a vdev_id
+ * @soc: ol_txrx_soc_handle object
+ * @vdev_id: vdev_id for which rx packets are to be flushed
+ *
+ * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
+ */
+static inline QDF_STATUS dp_txrx_flush_pkts_by_vdev_id(ol_txrx_soc_handle soc,
+						       uint8_t vdev_id)
+{
+	struct dp_txrx_handle *dp_ext_hdl;
+	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
+
+	if (!soc) {
+		qdf_status = QDF_STATUS_E_INVAL;
+		goto ret;
+	}
+
+	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
+	if (!dp_ext_hdl) {
+		qdf_status = QDF_STATUS_E_FAULT;
+		goto ret;
+	}
+
+	qdf_status = dp_rx_tm_flush_by_vdev_id(&dp_ext_hdl->rx_tm_hdl, vdev_id);
+ret:
+	return qdf_status;
+}
+
 /**
  * dp_txrx_resume() - resume all threads
  * @soc: ol_txrx_soc_handle object
@@ -332,6 +361,12 @@ static inline QDF_STATUS dp_txrx_deinit(ol_txrx_soc_handle soc)
 	return QDF_STATUS_SUCCESS;
 }
 
+static inline QDF_STATUS dp_txrx_flush_pkts_by_vdev_id(ol_txrx_soc_handle soc,
+						       uint8_t vdev_id)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
 static inline QDF_STATUS dp_txrx_resume(ol_txrx_soc_handle soc)
 {
 	return QDF_STATUS_SUCCESS;

+ 13 - 0
core/hdd/inc/wlan_hdd_tx_rx.h

@@ -88,6 +88,19 @@ void hdd_tx_timeout(struct net_device *dev);
 QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter);
 QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter);
 
+/**
+ * hdd_rx_flush_packet_cbk() - flush rx packet handler
+ * @adapter_context: pointer to HDD adapter context
+ * @vdev_id: vdev_id of the packets to be flushed
+ *
+ * Flush rx packet callback registered with data path. DP will call this to
+ * notify HDD when packets for a particular vdev is to be flushed out.
+ *
+ * Return: QDF_STATUS_E_FAILURE if any errors encountered,
+ *	   QDF_STATUS_SUCCESS otherwise
+ */
+QDF_STATUS hdd_rx_flush_packet_cbk(void *adapter_context, uint8_t vdev_id);
+
 /**
  * hdd_rx_packet_cbk() - Receive packet handler
  * @adapter_context: pointer to HDD adapter context

+ 4 - 0
core/hdd/src/wlan_hdd_assoc.c

@@ -2116,10 +2116,12 @@ QDF_STATUS hdd_roam_register_sta(struct hdd_adapter *adapter,
 	if (adapter->hdd_ctx->enable_dp_rx_threads) {
 		txrx_ops.rx.rx = hdd_rx_pkt_thread_enqueue_cbk;
 		txrx_ops.rx.rx_stack = hdd_rx_packet_cbk;
+		txrx_ops.rx.rx_flush = hdd_rx_flush_packet_cbk;
 		txrx_ops.rx.rx_gro_flush = hdd_rx_thread_gro_flush_ind_cbk;
 	} else {
 		txrx_ops.rx.rx = hdd_rx_packet_cbk;
 		txrx_ops.rx.rx_stack = NULL;
+		txrx_ops.rx.rx_flush = NULL;
 	}
 
 	txrx_ops.rx.stats_rx = hdd_tx_rx_collect_connectivity_stats_info;
@@ -4059,10 +4061,12 @@ QDF_STATUS hdd_roam_register_tdlssta(struct hdd_adapter *adapter,
 	if (adapter->hdd_ctx->enable_dp_rx_threads) {
 		txrx_ops.rx.rx = hdd_rx_pkt_thread_enqueue_cbk;
 		txrx_ops.rx.rx_stack = hdd_rx_packet_cbk;
+		txrx_ops.rx.rx_flush = hdd_rx_flush_packet_cbk;
 		txrx_ops.rx.rx_gro_flush = hdd_rx_thread_gro_flush_ind_cbk;
 	} else {
 		txrx_ops.rx.rx = hdd_rx_packet_cbk;
 		txrx_ops.rx.rx_stack = NULL;
+		txrx_ops.rx.rx_flush = NULL;
 	}
 	txrx_vdev = cdp_get_vdev_from_vdev_id(soc,
 					      (struct cdp_pdev *)pdev,

+ 2 - 0
core/hdd/src/wlan_hdd_softap_tx_rx.c

@@ -1069,10 +1069,12 @@ QDF_STATUS hdd_softap_register_sta(struct hdd_adapter *adapter,
 	if (adapter->hdd_ctx->enable_dp_rx_threads) {
 		txrx_ops.rx.rx = hdd_rx_pkt_thread_enqueue_cbk;
 		txrx_ops.rx.rx_stack = hdd_softap_rx_packet_cbk;
+		txrx_ops.rx.rx_flush = hdd_rx_flush_packet_cbk;
 		txrx_ops.rx.rx_gro_flush = hdd_rx_thread_gro_flush_ind_cbk;
 	} else {
 		txrx_ops.rx.rx = hdd_softap_rx_packet_cbk;
 		txrx_ops.rx.rx_stack = NULL;
+		txrx_ops.rx.rx_flush = NULL;
 	}
 
 	txrx_vdev = cdp_get_vdev_from_vdev_id(soc,

+ 34 - 0
core/hdd/src/wlan_hdd_tx_rx.c

@@ -2015,6 +2015,40 @@ static bool hdd_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
 }
 #endif
 
+QDF_STATUS hdd_rx_flush_packet_cbk(void *adapter_context, uint8_t vdev_id)
+{
+	struct hdd_adapter *adapter = NULL;
+	struct hdd_context *hdd_ctx = NULL;
+	ol_txrx_soc_handle soc = cds_get_context(QDF_MODULE_ID_SOC);
+
+	/* Sanity check on inputs */
+	if (unlikely(!adapter_context)) {
+		QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	adapter = (struct hdd_adapter *)adapter_context;
+	if (unlikely(adapter->magic != WLAN_HDD_ADAPTER_MAGIC)) {
+		QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
+			  "Magic cookie(%x) for adapter sanity verification is invalid",
+			  adapter->magic);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	hdd_ctx = WLAN_HDD_GET_CTX(adapter);
+	if (unlikely(!hdd_ctx)) {
+		QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
+			  "%s: HDD context is Null", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (hdd_ctx->enable_dp_rx_threads)
+		dp_txrx_flush_pkts_by_vdev_id(soc, vdev_id);
+
+	return QDF_STATUS_SUCCESS;
+}
+
 QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
 			     qdf_nbuf_t rxBuf)
 {