Просмотр исходного кода

qcacld-3.0: Stop netdev queues when limits reach

1) Stop non priority netdev queues when tx-q stop threshold reaches.
2) Stop priority queue when tx_desc_limit reached.

Change-Id: Idcacc19b6d47dd665bb54f210d6da292cbe5af8d
CRs-fixed: 2236321
Ajit Pal Singh 7 лет назад
Родитель
Сommit
5bcf68a52a
5 измененных файлов с 98 добавлено и 15 удалено
  1. 75 5
      core/dp/txrx/ol_tx.c
  2. 4 0
      core/dp/txrx/ol_tx.h
  3. 2 2
      core/dp/txrx/ol_tx_desc.c
  4. 11 6
      core/dp/txrx/ol_txrx.c
  5. 6 2
      core/dp/txrx/ol_txrx_types.h

+ 75 - 5
core/dp/txrx/ol_tx.c

@@ -553,20 +553,90 @@ struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
 	}
 	return tx_desc;
 }
-#else
+#elif defined(CONFIG_HL_SUPPORT) && defined(QCA_HL_NETDEV_FLOW_CONTROL)
+bool ol_tx_desc_is_high_prio(qdf_nbuf_t msdu)
+{
+	enum qdf_proto_subtype proto_subtype;
+	bool high_prio = false;
+
+	if (qdf_nbuf_is_ipv4_pkt(msdu) == true) {
+		if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+			QDF_NBUF_CB_PACKET_TYPE_DHCP) ||
+		    (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+			QDF_NBUF_CB_PACKET_TYPE_EAPOL))
+			high_prio = true;
+	} else if (QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+			QDF_NBUF_CB_PACKET_TYPE_ARP) {
+		high_prio = true;
+	} else if ((QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
+		   QDF_NBUF_CB_PACKET_TYPE_ICMPv6)) {
+		proto_subtype = qdf_nbuf_get_icmpv6_subtype(msdu);
+		switch (proto_subtype) {
+		case QDF_PROTO_ICMPV6_NA:
+		case QDF_PROTO_ICMPV6_NS:
+			high_prio = true;
+		default:
+			high_prio = false;
+		}
+	}
+	return high_prio;
+}
 
 static inline
 struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
-	struct ol_txrx_vdev_t *vdev,
-	qdf_nbuf_t msdu,
-	struct ol_txrx_msdu_info_t *msdu_info)
+					 struct ol_txrx_vdev_t *vdev,
+					 qdf_nbuf_t msdu,
+					 struct ol_txrx_msdu_info_t *msdu_info)
+{
+	struct ol_tx_desc_t *tx_desc =
+		ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
+
+	if (!tx_desc)
+		return NULL;
+
+	qdf_spin_lock_bh(&pdev->tx_mutex);
+	/* return if TX flow control disabled */
+	if (vdev->tx_desc_limit == 0) {
+		qdf_spin_unlock_bh(&pdev->tx_mutex);
+		return tx_desc;
+	}
+
+	if (!qdf_atomic_read(&vdev->os_q_paused) &&
+	    (qdf_atomic_read(&vdev->tx_desc_count) >= vdev->queue_stop_th)) {
+		/*
+		 * Pause normal priority
+		 * netdev queues if tx desc limit crosses
+		 */
+		pdev->pause_cb(vdev->vdev_id,
+			       WLAN_STOP_NON_PRIORITY_QUEUE,
+			       WLAN_DATA_FLOW_CONTROL);
+		qdf_atomic_set(&vdev->os_q_paused, 1);
+	} else if (ol_tx_desc_is_high_prio(msdu) && !vdev->prio_q_paused &&
+		   (qdf_atomic_read(&vdev->tx_desc_count)
+		    == vdev->tx_desc_limit)) {
+			/* Pause high priority queue */
+		pdev->pause_cb(vdev->vdev_id,
+			       WLAN_NETIF_PRIORITY_QUEUE_OFF,
+			       WLAN_DATA_FLOW_CONTROL_PRIORITY);
+		vdev->prio_q_paused = 1;
+	}
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
+
+	return tx_desc;
+}
+#else
+static inline
+struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
+					 struct ol_txrx_vdev_t *vdev,
+					 qdf_nbuf_t msdu,
+					 struct ol_txrx_msdu_info_t *msdu_info)
 {
 	struct ol_tx_desc_t *tx_desc = NULL;
 
 	tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
 	return tx_desc;
 }
-#endif
+#endif /* CONFIG_HL_SUPPORT && QCA_HL_NETDEV_FLOW_CONTROL */
 
 #if defined(CONFIG_HL_SUPPORT)
 

+ 4 - 0
core/dp/txrx/ol_tx.h

@@ -226,6 +226,10 @@ static inline void ol_tx_tso_update_stats(struct ol_txrx_pdev_t *pdev,
 }
 #endif
 
+#ifdef QCA_HL_NETDEV_FLOW_CONTROL
+bool ol_tx_desc_is_high_prio(qdf_nbuf_t msdu);
+#endif
+
 #if defined(HELIUMPLUS)
 void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc);
 #else

+ 2 - 2
core/dp/txrx/ol_tx_desc.c

@@ -95,7 +95,7 @@ ol_tx_desc_vdev_update(struct ol_tx_desc_t *tx_desc,
 	tx_desc->vdev_id = vdev->vdev_id;
 }
 
-#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
+#ifdef QCA_HL_NETDEV_FLOW_CONTROL
 
 /**
  * ol_tx_desc_count_inc() - tx desc count increment for desc allocation.
@@ -279,7 +279,7 @@ ol_tx_desc_alloc_hl(struct ol_txrx_pdev_t *pdev,
 	return tx_desc;
 }
 
-#if defined(CONFIG_PER_VDEV_TX_DESC_POOL) && defined(CONFIG_HL_SUPPORT)
+#ifdef QCA_HL_NETDEV_FLOW_CONTROL
 
 /**
  * ol_tx_desc_vdev_rm() - decrement the tx desc count for vdev.

+ 11 - 6
core/dp/txrx/ol_txrx.c

@@ -2287,26 +2287,31 @@ static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
 	qdf_mem_free(pdev);
 }
 
-#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+#if defined(QCA_HL_NETDEV_FLOW_CONTROL)
 
 /**
- * ol_txrx_vdev_tx_desc_cnt_init() - initialise tx descriptor count for vdev
+ * ol_txrx_vdev_per_vdev_tx_desc_init() - initialise per vdev tx desc count
+ * related variables.
  * @vdev: the virtual device object
  *
  * Return: None
  */
 static inline void
-ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
+ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
 {
 	qdf_atomic_init(&vdev->tx_desc_count);
+	vdev->tx_desc_limit = 0;
+	vdev->queue_restart_th = 0;
+	vdev->prio_q_paused = 0;
+	vdev->queue_stop_th = 0;
 }
 #else
 
 static inline void
-ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
+ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
 {
 }
-#endif
+#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
 
 /**
  * ol_txrx_vdev_attach - Allocate and initialize the data object
@@ -2349,7 +2354,7 @@ ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
 	vdev->fwd_tx_packets = 0;
 	vdev->fwd_rx_packets = 0;
 
-	ol_txrx_vdev_tx_desc_cnt_init(vdev);
+	ol_txrx_vdev_per_vdev_tx_desc_init(vdev);
 
 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
 		     OL_TXRX_MAC_ADDR_LEN);

+ 6 - 2
core/dp/txrx/ol_txrx_types.h

@@ -1117,9 +1117,13 @@ struct ol_txrx_vdev_t {
 	bool hlTdlsFlag;
 #endif
 
-#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+#if defined(QCA_HL_NETDEV_FLOW_CONTROL)
 	qdf_atomic_t tx_desc_count;
-#endif
+	int tx_desc_limit;
+	int queue_restart_th;
+	int queue_stop_th;
+	int prio_q_paused;
+#endif /* QCA_HL_NETDEV_FLOW_CONTROL */
 
 	uint16_t wait_on_peer_id;
 	union ol_txrx_align_mac_addr_t last_peer_mac_addr;