Parcourir la source

qcacld-3.0: HL: Iterate over txqs of a selected category

When CONFIG_FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL is enabled each group
has its own credit limit.
It may happen that when the High Latency TX Scheduler selects a category
the txq at the head may belong to a group which has credits less than the
"credit_reserve" of that group. In this case the scheduler will return
without downloading any frames although the other group may have credits
and also frames to be downloaded.
The scheduler will be called again if there is a credit update from FW or a
packet arrives from network stack and the next txq will be picked up which
belongs to the group which has sufficient credits.

It is seen that sometimes there is no credit update from FW (since the
host has sufficient credits) and the network stack also does not
transmits packet since it has already queued packets in driver's queue.
In such case the scheduler is not called and throughput drop to zero is
seen although there are enough credits on host.

To avoid such a situation, in case scheduler is unable to download
packets from a txq since its group does not have enough credits, iterate
over to the next txq in the chosen category and download its packets.
Exit from the schduler is case able to download from any txq OR not able
to download from any txq.

Change-Id: I6143d5c3aa40761d1997846896e5e77435252b26
CRs-Fixed: 2485819
Ajit Pal Singh il y a 5 ans
Parent
commit
9a0705e818
3 fichiers modifiés avec 31 ajouts et 39 suppressions
  1. 7 6
      core/dp/txrx/ol_tx_queue.h
  2. 24 32
      core/dp/txrx/ol_tx_sched.c
  3. 0 1
      core/dp/txrx/ol_txrx_types.h

+ 7 - 6
core/dp/txrx/ol_tx_queue.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -456,11 +456,12 @@ static inline void ol_tx_throttle_init_period(struct cdp_pdev *ppdev,
 #endif
 
 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+
 static inline bool
-ol_tx_is_txq_last_serviced_queue(struct ol_txrx_pdev_t *pdev,
-				 struct ol_tx_frms_queue_t *txq)
+ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t *first,
+			  struct ol_tx_frms_queue_t *txq)
 {
-	return txq == pdev->tx_sched.last_used_txq;
+	return (first != txq);
 }
 
 /**
@@ -540,8 +541,8 @@ ol_tx_set_peer_group_ptr(
 #else
 
 static inline bool
-ol_tx_is_txq_last_serviced_queue(struct ol_txrx_pdev_t *pdev,
-				 struct ol_tx_frms_queue_t *txq)
+ol_tx_if_iterate_next_txq(struct ol_tx_frms_queue_t *first,
+			  struct ol_tx_frms_queue_t *txq)
 {
 	return 0;
 }

+ 24 - 32
core/dp/txrx/ol_tx_sched.c

@@ -686,7 +686,6 @@ ol_tx_sched_select_init_wrr_adv(struct ol_txrx_pdev_t *pdev)
 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
 	/* start selection from the front of the ordered list */
 	scheduler->index = 0;
-	pdev->tx_sched.last_used_txq = NULL;
 }
 
 static void
@@ -738,11 +737,12 @@ ol_tx_sched_select_batch_wrr_adv(
 	static int first = 1;
 	int category_index = 0;
 	struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
-	struct ol_tx_frms_queue_t *txq;
+	struct ol_tx_frms_queue_t *txq, *first_txq = NULL;
 	int index;
 	struct ol_tx_sched_wrr_adv_category_info_t *category = NULL;
 	int frames, bytes, used_credits = 0, tx_limit;
 	u_int16_t tx_limit_flag;
+	u32 credit_rem = credit;
 
 	/*
 	 * Just for good measure, do a sanity check that the initial credit
@@ -813,17 +813,11 @@ ol_tx_sched_select_batch_wrr_adv(
 	 */
 	txq = TAILQ_FIRST(&category->state.head);
 
-	if (txq) {
+	while (txq) {
 		TAILQ_REMOVE(&category->state.head, txq, list_elem);
 		credit = ol_tx_txq_group_credit_limit(pdev, txq, credit);
 		if (credit > category->specs.credit_reserve) {
 			credit -= category->specs.credit_reserve;
-			/*
-			 * this tx queue will download some frames,
-			 * so update last_used_txq
-			 */
-			pdev->tx_sched.last_used_txq = txq;
-
 			tx_limit = ol_tx_bad_peer_dequeue_check(txq,
 					category->specs.send_limit,
 					&tx_limit_flag);
@@ -852,33 +846,31 @@ ol_tx_sched_select_batch_wrr_adv(
 			}
 			sctx->frms += frames;
 			ol_tx_txq_group_credit_update(pdev, txq, -credit, 0);
+			break;
 		} else {
-			if (ol_tx_is_txq_last_serviced_queue(pdev, txq)) {
-				/*
-				 * The scheduler has looked at all the active
-				 * tx queues but none were able to download any
-				 * of their tx frames.
-				 * Nothing is changed, so if none were able
-				 * to download before,
-				 * they wont be able to download now.
-				 * Return that no credit has been used, which
-				 * will cause the scheduler to stop.
-				 */
+			/*
+			 * Current txq belongs to a group which does not have
+			 * enough credits,
+			 * Iterate over to next txq and see if we can download
+			 * packets from that queue.
+			 */
+			if (ol_tx_if_iterate_next_txq(first_txq, txq)) {
+				credit = credit_rem;
+				if (!first_txq)
+					first_txq = txq;
+
+				TAILQ_INSERT_TAIL(&category->state.head,
+						  txq, list_elem);
+
+				txq = TAILQ_FIRST(&category->state.head);
+			} else {
 				TAILQ_INSERT_HEAD(&category->state.head, txq,
-						  list_elem);
-				return 0;
-			}
-			TAILQ_INSERT_TAIL(&category->state.head, txq,
 					  list_elem);
-			if (!pdev->tx_sched.last_used_txq)
-				pdev->tx_sched.last_used_txq = txq;
+				break;
+			}
 		}
-		TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
-	} else {
-		used_credits = 0;
-		/* TODO: find its reason */
-		ol_txrx_err("Error, no TXQ can be popped");
-	}
+	} /* while(txq) */
+
 	return used_credits;
 }
 

+ 0 - 1
core/dp/txrx/ol_txrx_types.h

@@ -905,7 +905,6 @@ struct ol_txrx_pdev_t {
 	struct {
 		enum ol_tx_scheduler_status tx_sched_status;
 		struct ol_tx_sched_t *scheduler;
-		struct ol_tx_frms_queue_t *last_used_txq;
 	} tx_sched;
 	/*
 	 * tx_queue only applies for HL, but is defined unconditionally to avoid