Răsfoiți Sursa

qcacld-3.0: Send ARP/NS/NA packets from HI PRIO queue

ARP/NS/NA packets will not get chance to be transmitted if
netif queues are paused due to low tx descriptor availability.
Send ARP/NS/NA packets from HI PRIO queue so that it can be transmitted
in noisy environment.

Change-Id: Ibb414463d1471c19d5db99e12517c77b02564318
CRs-Fixed: 2081348
Rakesh Pillai 7 ani în urmă
părinte
comite
3e534db38b

+ 44 - 22
core/dp/txrx/ol_tx_desc.c

@@ -191,33 +191,46 @@ struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
 {
 	struct ol_tx_desc_t *tx_desc = NULL;
 
-	if (pool) {
-		qdf_spin_lock_bh(&pool->flow_pool_lock);
-		if (pool->avail_desc) {
-			tx_desc = ol_tx_get_desc_flow_pool(pool);
-			ol_tx_desc_dup_detect_set(pdev, tx_desc);
-			if (qdf_unlikely(pool->avail_desc < pool->stop_th)) {
-				pool->status = FLOW_POOL_ACTIVE_PAUSED;
-				qdf_spin_unlock_bh(&pool->flow_pool_lock);
-				/* pause network queues */
-				pdev->pause_cb(vdev->vdev_id,
-					       WLAN_STOP_ALL_NETIF_QUEUE,
-					       WLAN_DATA_FLOW_CONTROL);
-			} else {
-				qdf_spin_unlock_bh(&pool->flow_pool_lock);
-			}
-			ol_tx_desc_sanity_checks(pdev, tx_desc);
-			ol_tx_desc_compute_delay(tx_desc);
-			ol_tx_desc_vdev_update(tx_desc, vdev);
-			qdf_atomic_inc(&tx_desc->ref_cnt);
+	if (!pool) {
+		pdev->pool_stats.pkt_drop_no_pool++;
+		goto end;
+	}
+
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
+	if (pool->avail_desc) {
+		tx_desc = ol_tx_get_desc_flow_pool(pool);
+		ol_tx_desc_dup_detect_set(pdev, tx_desc);
+		if (qdf_unlikely(pool->avail_desc < pool->stop_th &&
+				(pool->avail_desc >= pool->stop_priority_th) &&
+				(pool->status == FLOW_POOL_ACTIVE_UNPAUSED))) {
+			pool->status = FLOW_POOL_NON_PRIO_PAUSED;
+			qdf_spin_unlock_bh(&pool->flow_pool_lock);
+			/* pause network NON PRIORITY queues */
+			pdev->pause_cb(vdev->vdev_id,
+				       WLAN_STOP_NON_PRIORITY_QUEUE,
+				       WLAN_DATA_FLOW_CONTROL);
+		} else if (qdf_unlikely((pool->avail_desc <
+						pool->stop_priority_th) &&
+				pool->status == FLOW_POOL_NON_PRIO_PAUSED)) {
+			pool->status = FLOW_POOL_ACTIVE_PAUSED;
+			qdf_spin_unlock_bh(&pool->flow_pool_lock);
+			/* pause priority queue */
+			pdev->pause_cb(vdev->vdev_id,
+				       WLAN_NETIF_PRIORITY_QUEUE_OFF,
+				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
 		} else {
-			pool->pkt_drop_no_desc++;
 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
 		}
+		ol_tx_desc_sanity_checks(pdev, tx_desc);
+		ol_tx_desc_compute_delay(tx_desc);
+		ol_tx_desc_vdev_update(tx_desc, vdev);
+		qdf_atomic_inc(&tx_desc->ref_cnt);
 	} else {
-		pdev->pool_stats.pkt_drop_no_pool++;
+		pool->pkt_drop_no_desc++;
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
 	}
 
+end:
 	return tx_desc;
 }
 
@@ -452,9 +465,18 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 	ol_tx_put_desc_flow_pool(pool, tx_desc);
 	switch (pool->status) {
 	case FLOW_POOL_ACTIVE_PAUSED:
+		if (pool->avail_desc > pool->start_priority_th) {
+			/* unpause priority queue */
+			pdev->pause_cb(pool->member_flow_id,
+			       WLAN_NETIF_PRIORITY_QUEUE_ON,
+			       WLAN_DATA_FLOW_CONTROL_PRIORITY);
+			pool->status = FLOW_POOL_NON_PRIO_PAUSED;
+		}
+		break;
+	case FLOW_POOL_NON_PRIO_PAUSED:
 		if (pool->avail_desc > pool->start_th) {
 			pdev->pause_cb(pool->member_flow_id,
-				       WLAN_WAKE_ALL_NETIF_QUEUE,
+				       WLAN_WAKE_NON_PRIORITY_QUEUE,
 				       WLAN_DATA_FLOW_CONTROL);
 			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
 		}

+ 47 - 26
core/dp/txrx/ol_txrx_flow_control.c

@@ -271,6 +271,26 @@ QDF_STATUS ol_tx_dec_pool_ref(struct ol_tx_flow_pool_t *pool, bool force)
 	return  QDF_STATUS_SUCCESS;
 }
 
+/**
+ * ol_tx_flow_pool_status_to_str() - convert flow pool status to string
+ * @status - flow pool status
+ *
+ * Returns: String corresponding to flow pool status
+ */
+static const char *ol_tx_flow_pool_status_to_str
+					(enum flow_pool_status status)
+{
+	switch (status) {
+	CASE_RETURN_STRING(FLOW_POOL_ACTIVE_UNPAUSED);
+	CASE_RETURN_STRING(FLOW_POOL_ACTIVE_PAUSED);
+	CASE_RETURN_STRING(FLOW_POOL_NON_PRIO_PAUSED);
+	CASE_RETURN_STRING(FLOW_POOL_INVALID);
+	CASE_RETURN_STRING(FLOW_POOL_INACTIVE);
+	default:
+		return "unknown";
+	}
+}
+
 /**
  * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
  *
@@ -282,23 +302,19 @@ void ol_tx_dump_flow_pool_info(void *ctx)
 	struct ol_tx_flow_pool_t *pool = NULL, *pool_prev = NULL;
 	struct ol_tx_flow_pool_t tmp_pool;
 
-	ol_txrx_dbg("Global Pool");
 	if (!pdev) {
 		ol_txrx_err("ERROR: pdev NULL");
 		QDF_ASSERT(0); /* traceback */
 		return;
 	}
-	ol_txrx_dbg("Total %d :: Available %d",
-		pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
-	ol_txrx_dbg("Invalid flow_pool %d",
-		pdev->tx_desc.num_invalid_bin);
-	ol_txrx_dbg("No of pool map received %d",
-		pdev->pool_stats.pool_map_count);
-	ol_txrx_dbg("No of pool unmap received %d",
-		pdev->pool_stats.pool_unmap_count);
-	ol_txrx_dbg(
-		"Pkt dropped due to unavailablity of pool %d",
-		pdev->pool_stats.pkt_drop_no_pool);
+	ol_txrx_info("Global total %d :: avail %d invalid flow_pool %d "
+			"maps %d pool unmaps %d pkt drops %d",
+			pdev->tx_desc.pool_size,
+			pdev->tx_desc.num_free,
+			pdev->tx_desc.num_invalid_bin,
+			pdev->pool_stats.pool_map_count,
+			pdev->pool_stats.pool_unmap_count,
+			pdev->pool_stats.pkt_drop_no_pool);
 
 	/*
 	 * Nested spin lock.
@@ -317,23 +333,20 @@ void ol_tx_dump_flow_pool_info(void *ctx)
 		if (pool_prev)
 			ol_tx_dec_pool_ref(pool_prev, false);
 
-		ol_txrx_dbg("\n");
-		ol_txrx_dbg(
-			"Flow_pool_id %d :: status %d",
-			tmp_pool.flow_pool_id, tmp_pool.status);
-		ol_txrx_dbg(
-			"Total %d :: Available %d :: Deficient %d",
-			tmp_pool.flow_pool_size, tmp_pool.avail_desc,
-			tmp_pool.deficient_desc);
-		ol_txrx_dbg(
-			"Start threshold %d :: Stop threshold %d",
-			 tmp_pool.start_th, tmp_pool.stop_th);
-		ol_txrx_dbg(
-			"Member flow_id  %d :: flow_type %d",
+		ol_txrx_info("flow_pool_id %d ::", tmp_pool.flow_pool_id);
+		ol_txrx_info("status %s flow_id %d flow_type %d",
+			ol_tx_flow_pool_status_to_str(tmp_pool.status),
 			tmp_pool.member_flow_id, tmp_pool.flow_type);
 		ol_txrx_dbg(
-			"Pkt dropped due to unavailablity of descriptors %d",
+			"total %d :: available %d :: deficient %d :: "
+			"pkt dropped (no desc) %d",
+			tmp_pool.flow_pool_size, tmp_pool.avail_desc,
+			tmp_pool.deficient_desc,
 			tmp_pool.pkt_drop_no_desc);
+		ol_txrx_info(
+			"thresh: start %d stop %d prio start %d prio stop %d",
+			 tmp_pool.start_th, tmp_pool.stop_th,
+			 tmp_pool.start_priority_th, tmp_pool.stop_priority_th);
 
 		pool_prev = pool;
 		qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
@@ -510,6 +523,14 @@ struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
 	pool->start_th = (start_threshold * flow_pool_size)/100;
 	pool->stop_th = (stop_threshold * flow_pool_size)/100;
+	pool->stop_priority_th = (TX_PRIORITY_TH * pool->stop_th)/100;
+	if (pool->stop_priority_th >= MAX_TSO_SEGMENT_DESC)
+		pool->stop_priority_th -= MAX_TSO_SEGMENT_DESC;
+
+	pool->start_priority_th = (TX_PRIORITY_TH * pool->start_th)/100;
+	if (pool->start_priority_th >= MAX_TSO_SEGMENT_DESC)
+			pool->start_priority_th -= MAX_TSO_SEGMENT_DESC;
+
 	qdf_spinlock_create(&pool->flow_pool_lock);
 	qdf_atomic_init(&pool->ref_cnt);
 	ol_tx_inc_pool_ref(pool);

+ 23 - 2
core/dp/txrx/ol_txrx_types.h

@@ -407,6 +407,21 @@ enum throttle_phase {
 
 #define THROTTLE_TX_THRESHOLD (100)
 
+/*
+ * Threshold to stop/start priority queue in term of % the actual flow start
+ * and stop thresholds. When num of available descriptors falls below
+ * stop_priority_th, priority queue will be paused. When num of available
+ * descriptors are greater than start_priority_th, priority queue will be
+ * un-paused.
+ */
+#define TX_PRIORITY_TH   (80)
+
+/*
+ * No of maximum descriptor used by TSO jumbo packet with
+ * 64K aggregation.
+ */
+#define MAX_TSO_SEGMENT_DESC (44)
+
 typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *usr_ctxt);
 
 struct ol_tx_queue_group_t {
@@ -437,12 +452,14 @@ struct ol_tx_group_credit_stats_t {
  *			   and network queues are paused
  * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
  * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
+ * @FLOW_POOL_NON_PRIO_PAUSED: non-priority queues are paused
  */
 enum flow_pool_status {
 	FLOW_POOL_ACTIVE_UNPAUSED = 0,
 	FLOW_POOL_ACTIVE_PAUSED = 1,
-	FLOW_POOL_INVALID = 2,
-	FLOW_POOL_INACTIVE = 3,
+	FLOW_POOL_NON_PRIO_PAUSED = 2,
+	FLOW_POOL_INVALID = 3,
+	FLOW_POOL_INACTIVE = 4
 };
 
 /**
@@ -473,6 +490,8 @@ struct ol_txrx_pool_stats {
  * @freelist: tx descriptor freelist
  * @pkt_drop_no_desc: drop due to no descriptors
  * @ref_cnt: pool's ref count
+ * @stop_priority_th: Threshold to stop priority queue
+ * @start_priority_th: Threshold to start priority queue
  */
 struct ol_tx_flow_pool_t {
 	TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
@@ -489,6 +508,8 @@ struct ol_tx_flow_pool_t {
 	union ol_tx_desc_list_elem_t *freelist;
 	uint16_t pkt_drop_no_desc;
 	qdf_atomic_t ref_cnt;
+	uint16_t stop_priority_th;
+	uint16_t start_priority_th;
 };
 
 #endif

+ 12 - 0
core/hdd/inc/wlan_hdd_wmm.h

@@ -96,6 +96,18 @@ enum hdd_wmm_user_mode {
 #define HDD_AC_BK 0x4
 #define HDD_AC_BE 0x8
 
+/**
+ * enum hdd_wmm_linuxac: AC/Queue Index values for Linux Qdisc to
+ * operate on different traffic.
+ */
+enum hdd_wmm_linuxac {
+	HDD_LINUX_AC_VO = 0,
+	HDD_LINUX_AC_VI = 1,
+	HDD_LINUX_AC_BE = 2,
+	HDD_LINUX_AC_BK = 3,
+	HDD_LINUX_AC_HI_PRIO = 4,
+};
+
 /**
  * struct hdd_wmm_qos_context - HDD WMM QoS Context
  *

+ 79 - 0
core/hdd/src/wlan_hdd_tx_rx.c

@@ -1339,6 +1339,7 @@ const char *hdd_reason_type_to_string(enum netif_reason_type reason)
 	CASE_RETURN_STRING(WLAN_VDEV_STOP);
 	CASE_RETURN_STRING(WLAN_PEER_UNAUTHORISED);
 	CASE_RETURN_STRING(WLAN_THERMAL_MITIGATION);
+	CASE_RETURN_STRING(WLAN_DATA_FLOW_CONTROL_PRIORITY);
 	default:
 		return "Invalid";
 	}
@@ -1362,8 +1363,14 @@ const char *hdd_action_type_to_string(enum netif_action_type action)
 	CASE_RETURN_STRING(WLAN_WAKE_ALL_NETIF_QUEUE);
 	CASE_RETURN_STRING(WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER);
 	CASE_RETURN_STRING(WLAN_START_ALL_NETIF_QUEUE_N_CARRIER);
+	CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE);
+	CASE_RETURN_STRING(WLAN_NETIF_TX_DISABLE_N_CARRIER);
 	CASE_RETURN_STRING(WLAN_NETIF_CARRIER_ON);
 	CASE_RETURN_STRING(WLAN_NETIF_CARRIER_OFF);
+	CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_ON);
+	CASE_RETURN_STRING(WLAN_NETIF_PRIORITY_QUEUE_OFF);
+	CASE_RETURN_STRING(WLAN_WAKE_NON_PRIORITY_QUEUE);
+	CASE_RETURN_STRING(WLAN_STOP_NON_PRIORITY_QUEUE);
 	default:
 		return "Invalid";
 	}
@@ -1381,11 +1388,15 @@ static void wlan_hdd_update_queue_oper_stats(struct hdd_adapter *adapter,
 	switch (action) {
 	case WLAN_STOP_ALL_NETIF_QUEUE:
 	case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
+	case WLAN_NETIF_PRIORITY_QUEUE_OFF:
+	case WLAN_STOP_NON_PRIORITY_QUEUE:
 		adapter->queue_oper_stats[reason].pause_count++;
 		break;
 	case WLAN_START_ALL_NETIF_QUEUE:
 	case WLAN_WAKE_ALL_NETIF_QUEUE:
 	case WLAN_START_ALL_NETIF_QUEUE_N_CARRIER:
+	case WLAN_NETIF_PRIORITY_QUEUE_ON:
+	case WLAN_WAKE_NON_PRIORITY_QUEUE:
 		adapter->queue_oper_stats[reason].unpause_count++;
 		break;
 	default:
@@ -1485,6 +1496,34 @@ static void wlan_hdd_update_pause_time(struct hdd_adapter *adapter,
 
 }
 
+/**
+ * wlan_hdd_stop_non_priority_queue() - stop non prority queues
+ * @adapter: adapter handle
+ *
+ * Return: None
+ */
+static inline void wlan_hdd_stop_non_priority_queue(struct hdd_adapter *adapter)
+{
+	netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VO);
+	netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_VI);
+	netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BE);
+	netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_BK);
+}
+
+/**
+ * wlan_hdd_wake_non_priority_queue() - wake non prority queues
+ * @adapter: adapter handle
+ *
+ * Return: None
+ */
+static inline void wlan_hdd_wake_non_priority_queue(struct hdd_adapter *adapter)
+{
+	netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VO);
+	netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_VI);
+	netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BE);
+	netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_BK);
+}
+
 /**
  * wlan_hdd_netif_queue_control() - Use for netif_queue related actions
  * @adapter: adapter handle
@@ -1529,6 +1568,35 @@ void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
 		spin_unlock_bh(&adapter->pause_map_lock);
 		break;
 
+	case WLAN_STOP_NON_PRIORITY_QUEUE:
+		spin_lock_bh(&adapter->pause_map_lock);
+		if (!adapter->pause_map) {
+			wlan_hdd_stop_non_priority_queue(adapter);
+			wlan_hdd_update_txq_timestamp(adapter->dev);
+			wlan_hdd_update_unpause_time(adapter);
+		}
+		adapter->pause_map |= (1 << reason);
+		spin_unlock_bh(&adapter->pause_map_lock);
+		break;
+
+	case WLAN_NETIF_PRIORITY_QUEUE_ON:
+		spin_lock_bh(&adapter->pause_map_lock);
+		temp_map = adapter->pause_map;
+		adapter->pause_map &= ~(1 << reason);
+		netif_wake_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
+		wlan_hdd_update_pause_time(adapter, temp_map);
+		spin_unlock_bh(&adapter->pause_map_lock);
+		break;
+
+	case WLAN_NETIF_PRIORITY_QUEUE_OFF:
+		spin_lock_bh(&adapter->pause_map_lock);
+		netif_stop_subqueue(adapter->dev, HDD_LINUX_AC_HI_PRIO);
+		wlan_hdd_update_txq_timestamp(adapter->dev);
+		wlan_hdd_update_unpause_time(adapter);
+		adapter->pause_map |= (1 << reason);
+		spin_unlock_bh(&adapter->pause_map_lock);
+		break;
+
 	case WLAN_START_ALL_NETIF_QUEUE:
 		spin_lock_bh(&adapter->pause_map_lock);
 		temp_map = adapter->pause_map;
@@ -1551,6 +1619,17 @@ void wlan_hdd_netif_queue_control(struct hdd_adapter *adapter,
 		spin_unlock_bh(&adapter->pause_map_lock);
 		break;
 
+	case WLAN_WAKE_NON_PRIORITY_QUEUE:
+		spin_lock_bh(&adapter->pause_map_lock);
+		temp_map = adapter->pause_map;
+		adapter->pause_map &= ~(1 << reason);
+		if (!adapter->pause_map) {
+			wlan_hdd_wake_non_priority_queue(adapter);
+			wlan_hdd_update_pause_time(adapter, temp_map);
+		}
+		spin_unlock_bh(&adapter->pause_map_lock);
+		break;
+
 	case WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER:
 		spin_lock_bh(&adapter->pause_map_lock);
 		if (!adapter->pause_map) {

+ 24 - 20
core/hdd/src/wlan_hdd_wmm.c

@@ -83,14 +83,6 @@ const uint8_t hdd_wmm_up_to_ac_map[] = {
  * operate on different traffic.
  */
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
-enum hdd_wmm_linuxac {
-	HDD_LINUX_AC_VO = 0,
-	HDD_LINUX_AC_VI = 1,
-	HDD_LINUX_AC_BE = 2,
-	HDD_LINUX_AC_BK = 3,
-	HDD_LINUX_AC_HI_PRIO = 4,
-};
-
 void wlan_hdd_process_peer_unauthorised_pause(struct hdd_adapter *adapter)
 {
 	/* Enable HI_PRIO queue */
@@ -102,13 +94,6 @@ void wlan_hdd_process_peer_unauthorised_pause(struct hdd_adapter *adapter)
 
 }
 #else
-enum hdd_wmm_linuxac {
-	HDD_LINUX_AC_VO = 0,
-	HDD_LINUX_AC_VI = 1,
-	HDD_LINUX_AC_BE = 2,
-	HDD_LINUX_AC_BK = 3
-};
-
 void wlan_hdd_process_peer_unauthorised_pause(struct hdd_adapter *adapter)
 {
 }
@@ -1610,10 +1595,11 @@ uint16_t hdd_wmm_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
 	enum sme_qos_wmmuptype up = SME_QOS_WMM_UP_BE;
 	uint16_t queueIndex;
-	struct hdd_adapter *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
-	bool is_eapol = false;
-	struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(pAdapter);
+	struct hdd_adapter *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
+	bool is_crtical = false;
+	struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
 	int status;
+	enum qdf_proto_subtype proto_subtype;
 
 	status = wlan_hdd_validate_context(hdd_ctx);
 	if (status != 0) {
@@ -1622,9 +1608,27 @@ uint16_t hdd_wmm_select_queue(struct net_device *dev, struct sk_buff *skb)
 	}
 
 	/* Get the user priority from IP header */
-	hdd_wmm_classify_pkt(pAdapter, skb, &up, &is_eapol);
+	hdd_wmm_classify_pkt(adapter, skb, &up, &is_crtical);
+	spin_lock_bh(&adapter->pause_map_lock);
+	if ((adapter->pause_map & (1 <<  WLAN_DATA_FLOW_CONTROL)) &&
+	   !(adapter->pause_map & (1 <<  WLAN_DATA_FLOW_CONTROL_PRIORITY))) {
+		if (qdf_nbuf_is_ipv4_arp_pkt(skb))
+			is_crtical = true;
+		else if (qdf_nbuf_is_icmpv6_pkt(skb)) {
+			proto_subtype = qdf_nbuf_get_icmpv6_subtype(skb);
+			switch (proto_subtype) {
+			case QDF_PROTO_ICMPV6_NA:
+			case QDF_PROTO_ICMPV6_NS:
+				is_crtical = true;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	spin_unlock_bh(&adapter->pause_map_lock);
 	skb->priority = up;
-	queueIndex = hdd_get_queue_index(skb->priority, is_eapol);
+	queueIndex = hdd_get_queue_index(skb->priority, is_crtical);
 
 	return queueIndex;
 }