Bläddra i källkod

qcacld-3.0: Add stop_th and start_th for tx flow control V1

Add stop_th and start_th for QCA_LL_TX_FLOW_CONTROL_V2 disabled
platform, which is pdev based tx_desc pool. Change pdev tx_desc pool
size from 1056 to 900, default stop_th is 15% start_th is 25%, this
setting is exactly same as QCA_LL_TX_FLOW_CONTROL_V2. Pause netif tx
queues for all vdevs when stop_th reached instead of dropping frames.
Reduce pdev pool size could significantly reduce firmware wmm drop. Both
of host and firmware frame dropps lead to bad TCP throughput.

Change-Id: I77daf8c9fdef624f8ec479885b7705deb1fef142
CRs-Fixed: 2436772
hangtian 6 år sedan
förälder
incheckning
72704809e5

+ 0 - 2
core/cds/inc/cds_config.h

@@ -84,10 +84,8 @@ struct cds_config_info {
 	uint8_t reorder_offload;
 	uint8_t uc_offload_enabled;
 	bool enable_rxthread;
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 	uint32_t tx_flow_stop_queue_th;
 	uint32_t tx_flow_start_queue_offset;
-#endif
 	uint8_t enable_dp_rx_threads;
 #ifdef WLAN_FEATURE_LPSS
 	bool is_lpass_enabled;

+ 0 - 7
core/cds/src/cds_api.c

@@ -294,7 +294,6 @@ cds_cfg_update_ac_specs_params(struct txrx_pdev_cfg_param_t *olcfg,
 	}
 }
 
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 static inline void
 cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc *psoc,
 				struct txrx_pdev_cfg_param_t *cdp_cfg)
@@ -304,12 +303,6 @@ cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc *psoc,
 	cdp_cfg->tx_flow_start_queue_offset =
 		cfg_get(psoc, CFG_DP_TX_FLOW_START_QUEUE_OFFSET);
 }
-#else
-static inline void
-cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc *psoc,
-				struct txrx_pdev_cfg_param_t *cdp_cfg)
-{}
-#endif
 
 /**
  * cds_cdp_cfg_attach() - attach data path config module

+ 6 - 12
core/dp/ol/inc/ol_cfg.h

@@ -49,6 +49,12 @@ enum wlan_frm_fmt {
 #define MAX_THROUGHPUT 800
 #endif
 
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+#define TARGET_TX_CREDIT CFG_TGT_NUM_MSDU_DESC
+#else
+#define TARGET_TX_CREDIT 900
+#endif
+
 /* Throttle period Different level Duty Cycle values*/
 #define THROTTLE_DUTY_CYCLE_LEVEL0 (0)
 #define THROTTLE_DUTY_CYCLE_LEVEL1 (50)
@@ -92,10 +98,8 @@ struct txrx_pdev_cfg_t {
 	bool ip_tcp_udp_checksum_offload;
 	bool enable_rxthread;
 	bool ce_classify_enabled;
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 	uint32_t tx_flow_stop_queue_th;
 	uint32_t tx_flow_start_queue_offset;
-#endif
 	bool flow_steering_enabled;
 	/*
 	 * To track if credit reporting through
@@ -128,16 +132,8 @@ struct txrx_pdev_cfg_t {
  *
  * Return: none
  */
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_ctx,
 				       struct txrx_pdev_cfg_param_t *cfg_param);
-#else
-static inline
-void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_ctx,
-				       struct txrx_pdev_cfg_param_t *cfg_param)
-{
-}
-#endif
 
 /**
  * ol_pdev_cfg_attach - setup configuration parameters
@@ -490,11 +486,9 @@ int ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(struct cdp_cfg *cfg_pdev)
 }
 
 
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 int ol_cfg_get_tx_flow_stop_queue_th(struct cdp_cfg *cfg_pdev);
 
 int ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg *cfg_pdev);
-#endif
 
 bool ol_cfg_is_ce_classify_enabled(struct cdp_cfg *cfg_pdev);
 

+ 1 - 5
core/dp/txrx/ol_cfg.c

@@ -23,7 +23,6 @@
 
 unsigned int vow_config;
 
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 /**
  * ol_tx_set_flow_control_parameters() - set flow control parameters
  * @cfg_ctx: cfg context
@@ -41,7 +40,6 @@ void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
 	cfg_ctx->tx_flow_stop_queue_th =
 					cfg_param->tx_flow_stop_queue_th;
 }
-#endif
 
 #ifdef CONFIG_HL_SUPPORT
 
@@ -145,7 +143,7 @@ struct cdp_cfg *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
 	cfg_ctx->max_thruput_mbps = MAX_THROUGHPUT;
 	cfg_ctx->max_nbuf_frags = 1;
 	cfg_ctx->vow_config = vow_config;
-	cfg_ctx->target_tx_credit = CFG_TGT_NUM_MSDU_DESC;
+	cfg_ctx->target_tx_credit = TARGET_TX_CREDIT;
 	cfg_ctx->throttle_period_ms = 40;
 	cfg_ctx->dutycycle_level[0] = THROTTLE_DUTY_CYCLE_LEVEL0;
 	cfg_ctx->dutycycle_level[1] = THROTTLE_DUTY_CYCLE_LEVEL1;
@@ -409,7 +407,6 @@ int ol_cfg_is_rx_thread_enabled(struct cdp_cfg *cfg_pdev)
 	return cfg->enable_rxthread;
 }
 
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 /**
  * ol_cfg_get_tx_flow_stop_queue_th() - return stop queue threshold
  * @pdev : handle to the physical device
@@ -436,7 +433,6 @@ int ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg *cfg_pdev)
 	return cfg->tx_flow_start_queue_offset;
 }
 
-#endif
 
 #ifdef IPA_OFFLOAD
 unsigned int ol_cfg_ipa_uc_offload_enabled(struct cdp_cfg *cfg_pdev)

+ 61 - 0
core/dp/txrx/ol_tx_desc.c

@@ -142,6 +142,7 @@ struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
 					     struct ol_txrx_vdev_t *vdev)
 {
 	struct ol_tx_desc_t *tx_desc = NULL;
+	struct ol_txrx_vdev_t *vd;
 
 	qdf_spin_lock_bh(&pdev->tx_mutex);
 	if (pdev->tx_desc.freelist) {
@@ -151,6 +152,31 @@ struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
 			return NULL;
 		}
 		ol_tx_desc_dup_detect_set(pdev, tx_desc);
+		if (qdf_unlikely(pdev->tx_desc.num_free <
+					pdev->tx_desc.stop_th &&
+				pdev->tx_desc.num_free >=
+				 pdev->tx_desc.stop_priority_th &&
+				pdev->tx_desc.status ==
+				 FLOW_POOL_ACTIVE_UNPAUSED)) {
+			pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
+			/* pause network NON PRIORITY queues */
+			TAILQ_FOREACH(vd, &pdev->vdev_list, vdev_list_elem) {
+				pdev->pause_cb(vd->vdev_id,
+					       WLAN_STOP_NON_PRIORITY_QUEUE,
+					       WLAN_DATA_FLOW_CONTROL);
+			}
+		} else if (qdf_unlikely((pdev->tx_desc.num_free <
+					 pdev->tx_desc.stop_priority_th) &&
+				pdev->tx_desc.status ==
+				FLOW_POOL_NON_PRIO_PAUSED)) {
+			pdev->tx_desc.status = FLOW_POOL_ACTIVE_PAUSED;
+			/* pause priority queue */
+			TAILQ_FOREACH(vd, &pdev->vdev_list, vdev_list_elem) {
+				pdev->pause_cb(vd->vdev_id,
+					       WLAN_NETIF_PRIORITY_QUEUE_OFF,
+					       WLAN_DATA_FLOW_CONTROL_PRIORITY);
+			}
+		}
 		ol_tx_desc_sanity_checks(pdev, tx_desc);
 		ol_tx_desc_compute_delay(tx_desc);
 		ol_tx_desc_vdev_update(tx_desc, vdev);
@@ -440,6 +466,8 @@ static void ol_tx_desc_free_common(struct ol_txrx_pdev_t *pdev,
  */
 void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 {
+	struct ol_txrx_vdev_t *vdev;
+
 	qdf_spin_lock_bh(&pdev->tx_mutex);
 
 	ol_tx_desc_free_common(pdev, tx_desc);
@@ -447,6 +475,39 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 	ol_tx_put_desc_global_pool(pdev, tx_desc);
 	ol_tx_desc_vdev_rm(tx_desc);
 
+	switch (pdev->tx_desc.status) {
+	case FLOW_POOL_ACTIVE_PAUSED:
+		if (pdev->tx_desc.num_free > pdev->tx_desc.start_priority_th) {
+			/* unpause priority queue */
+			TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+				pdev->pause_cb(vdev->vdev_id,
+				       WLAN_NETIF_PRIORITY_QUEUE_ON,
+				       WLAN_DATA_FLOW_CONTROL_PRIORITY);
+			}
+			pdev->tx_desc.status = FLOW_POOL_NON_PRIO_PAUSED;
+		}
+		break;
+	case FLOW_POOL_NON_PRIO_PAUSED:
+		if (pdev->tx_desc.num_free > pdev->tx_desc.start_th) {
+			TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+				pdev->pause_cb(vdev->vdev_id,
+					       WLAN_WAKE_NON_PRIORITY_QUEUE,
+					       WLAN_DATA_FLOW_CONTROL);
+			}
+			pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
+		}
+		break;
+	case FLOW_POOL_INVALID:
+		if (pdev->tx_desc.num_free == pdev->tx_desc.pool_size)
+			ol_txrx_err("pool is INVALID State!!");
+		break;
+	case FLOW_POOL_ACTIVE_UNPAUSED:
+		break;
+	default:
+		ol_txrx_err("pool is INACTIVE State!!\n");
+		break;
+	};
+
 	qdf_spin_unlock_bh(&pdev->tx_mutex);
 }
 

+ 39 - 1
core/dp/txrx/ol_txrx.c

@@ -59,6 +59,7 @@
 #include <ol_tx_sched.h>           /* ol_tx_sched_attach, etc. */
 #include <ol_txrx.h>
 #include <ol_txrx_types.h>
+#include <ol_cfg.h>
 #include <cdp_txrx_flow_ctrl_legacy.h>
 #include <cdp_txrx_bus.h>
 #include <cdp_txrx_ipa.h>
@@ -869,6 +870,8 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
 	union ol_tx_desc_list_elem_t *c_element;
 	unsigned int sig_bit;
 	uint16_t desc_per_page;
+	uint32_t stop_threshold;
+	uint32_t start_threshold;
 
 	if (!osc) {
 		ret = -EINVAL;
@@ -1012,6 +1015,22 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
 		    (uint32_t *)pdev->tx_desc.freelist,
 		    (uint32_t *)(pdev->tx_desc.freelist + desc_pool_size));
 
+	stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
+	start_threshold = stop_threshold +
+		ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
+	pdev->tx_desc.start_th = (start_threshold * desc_pool_size) / 100;
+	pdev->tx_desc.stop_th = (stop_threshold * desc_pool_size) / 100;
+	pdev->tx_desc.stop_priority_th =
+		(TX_PRIORITY_TH * pdev->tx_desc.stop_th) / 100;
+	if (pdev->tx_desc.stop_priority_th >= MAX_TSO_SEGMENT_DESC)
+		pdev->tx_desc.stop_priority_th -= MAX_TSO_SEGMENT_DESC;
+
+	pdev->tx_desc.start_priority_th =
+		(TX_PRIORITY_TH * pdev->tx_desc.start_th) / 100;
+	if (pdev->tx_desc.start_priority_th >= MAX_TSO_SEGMENT_DESC)
+		pdev->tx_desc.start_priority_th -= MAX_TSO_SEGMENT_DESC;
+	pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
+
 	/* check what format of frames are expected to be delivered by the OS */
 	pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
 	if (pdev->frame_format == wlan_frm_fmt_native_wifi)
@@ -4858,6 +4877,25 @@ exit:
 	return rc;
 }
 
+/**
+ * ol_txrx_register_pause_cb() - register pause callback
+ * @pause_cb: pause callback
+ *
+ * Return: QDF status
+ */
+static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
+					    tx_pause_callback pause_cb)
+{
+	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+
+	if (!pdev || !pause_cb) {
+		ol_txrx_err("pdev or pause_cb is NULL");
+		return QDF_STATUS_E_INVAL;
+	}
+	pdev->pause_cb = pause_cb;
+	return QDF_STATUS_SUCCESS;
+}
+
 #ifdef RECEIVE_OFFLOAD
 /**
  * ol_txrx_offld_flush_handler() - offld flush handler
@@ -5583,8 +5621,8 @@ static struct cdp_misc_ops ol_ops_misc = {
 };
 
 static struct cdp_flowctl_ops ol_ops_flowctl = {
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 	.register_pause_cb = ol_txrx_register_pause_cb,
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 	.set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
 	.dump_flow_pool_info = ol_tx_dump_flow_pool_info,
 	.tx_desc_thresh_reached = ol_tx_desc_thresh_reached,

+ 0 - 2
core/dp/txrx/ol_txrx.h

@@ -296,8 +296,6 @@ uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
 uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev);
-QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
-				     tx_pause_callback pause_cb);
 /**
  * ol_txrx_fwd_desc_thresh_check() - check to forward packet to tx path
  * @vdev: which virtual device the frames were addressed to

+ 0 - 19
core/dp/txrx/ol_txrx_flow_control.c

@@ -105,25 +105,6 @@ bool ol_txrx_fwd_desc_thresh_check(struct cdp_vdev *vdev)
 	return enough_desc_flag;
 }
 
-/**
- * ol_txrx_register_pause_cb() - register pause callback
- * @pause_cb: pause callback
- *
- * Return: QDF status
- */
-QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
-				     tx_pause_callback pause_cb)
-{
-	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
-
-	if (!pdev || !pause_cb) {
-		ol_txrx_err("pdev or pause_cb is NULL");
-		return QDF_STATUS_E_INVAL;
-	}
-	pdev->pause_cb = pause_cb;
-	return QDF_STATUS_SUCCESS;
-}
-
 /**
  * ol_tx_set_desc_global_pool_size() - set global pool size
  * @num_msdu_desc: total number of descriptors

+ 5 - 2
core/dp/txrx/ol_txrx_types.h

@@ -453,7 +453,6 @@ struct ol_tx_group_credit_stats_t {
 	u_int16_t wrap_around;
 };
 
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 
 /**
  * enum flow_pool_status - flow pool status
@@ -527,7 +526,6 @@ struct ol_tx_flow_pool_t {
 	uint16_t start_priority_th;
 };
 
-#endif
 
 #define OL_TXRX_INVALID_PEER_UNMAP_COUNT 0xF
 /*
@@ -774,6 +772,11 @@ struct ol_txrx_pdev_t {
 #ifdef DESC_DUP_DETECT_DEBUG
 		unsigned long *free_list_bitmap;
 #endif
+		uint16_t stop_th;
+		uint16_t start_th;
+		uint16_t stop_priority_th;
+		uint16_t start_priority_th;
+		enum flow_pool_status status;
 	} tx_desc;
 
 	uint8_t is_mgmt_over_wmi_enabled;

+ 0 - 10
core/hdd/src/wlan_hdd_main.c

@@ -9693,7 +9693,6 @@ int hdd_start_ap_adapter(struct hdd_adapter *adapter)
 	return 0;
 }
 
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 /**
  * hdd_txrx_populate_cds_config() - Populate txrx cds configuration
  * @cds_cfg: CDS Configuration
@@ -9712,13 +9711,6 @@ static inline void hdd_txrx_populate_cds_config(struct cds_config_info
 	/* configuration for DP RX Threads */
 	cds_cfg->enable_dp_rx_threads = hdd_ctx->enable_dp_rx_threads;
 }
-#else
-static inline void hdd_txrx_populate_cds_config(struct cds_config_info
-						*cds_cfg,
-						struct hdd_context *hdd_ctx)
-{
-}
-#endif
 
 /**
  * hdd_update_cds_config() - API to update cds configuration parameters
@@ -14176,13 +14168,11 @@ static int hdd_update_dp_config(struct hdd_context *hdd_ctx)
 	soc = cds_get_context(QDF_MODULE_ID_SOC);
 	params.tso_enable = cfg_get(hdd_ctx->psoc, CFG_DP_TSO);
 	params.lro_enable = cfg_get(hdd_ctx->psoc, CFG_DP_LRO);
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
 	params.tx_flow_stop_queue_threshold =
 			cfg_get(hdd_ctx->psoc, CFG_DP_TX_FLOW_STOP_QUEUE_TH);
 	params.tx_flow_start_queue_offset =
 			cfg_get(hdd_ctx->psoc,
 				CFG_DP_TX_FLOW_START_QUEUE_OFFSET);
-#endif
 	params.flow_steering_enable =
 		cfg_get(hdd_ctx->psoc, CFG_DP_FLOW_STEERING_ENABLED);
 	params.napi_enable = hdd_ctx->napi_enable;