浏览代码

qcacld-3.0: Relocate HDD DP ini items

Relocate below HDD DP ini items as per converged infrastructure.

1. TxFlowLowWaterMark
2. TxFlowHighWaterMarkOffset
3. TxFlowMaxQueueDepth
4. TxLbwFlowLowWaterMark
5. TxLbwFlowHighWaterMarkOffset
6. TxLbwFlowMaxQueueDepth
7. TxHbwFlowLowWaterMark
8. TxHbwFlowHighWaterMarkOffset
9. TxHbwFlowMaxQueueDepth
10. gBusBandwidthHighThreshold
11. gBusBandwidthMediumThreshold
12. gBusBandwidthLowThreshold
13. gBusBandwidthComputeInterval
14. gTcpLimitOutputEnable
15. gTcpAdvWinScaleEnable
16. gTcpDelAckEnable
17. gTcpDelAckThresholdHigh
18. gTcpDelAckThresholdLow
19. gTcpDelAckTimerCount
20. gTcpTxHighTputThreshold
21. gEnableTxOrphan
22. rpsRxQueueCpuMapList
23. RX_THREAD_CPU_AFFINITY_MASK
24. NAPI_CPU_AFFINITY_MASK
25. rx_mode

Change-Id: Ib7309fc78842af4bae4d8f705edecce4499a17df
CRs-Fixed: 2323191
jitiphil 6 年之前
父节点
当前提交
869b9f73d9

+ 2 - 0
components/cfg/cfg_all.h

@@ -41,12 +41,14 @@
 
 #include "wlan_pmo_cfg.h"
 #include "hdd_config.h"
+#include "hdd_dp_cfg.h"
 
 /* Maintain Alphabetic order here while adding components */
 #define CFG_ALL \
 	CFG_CONVERGED_ALL \
 	CFG_FWOL_ALL \
 	CFG_HDD_ALL \
+	CFG_HDD_DP_ALL \
 	CFG_MLME_ALL \
 	CFG_NAN_ALL \
 	CFG_P2P_ALL \

+ 797 - 0
core/hdd/inc/hdd_dp_cfg.h

@@ -0,0 +1,797 @@
+/*
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: This file contains centralized definitions of converged configuration.
+ */
+
+#ifndef __HDD_DP_CONFIG_H
+#define __HDD_DP_CONFIG_H
+
+#define CFG_ENABLE_RX_THREAD		BIT(0)
+#define CFG_ENABLE_RPS			BIT(1)
+#define CFG_ENABLE_NAPI			BIT(2)
+#define CFG_ENABLE_DYNAMIC_RPS		BIT(3)
+#define CFG_ENABLE_DP_RX_THREADS	BIT(4)
+#define CFG_RX_MODE_MAX (CFG_ENABLE_RX_THREAD | \
+					  CFG_ENABLE_RPS | \
+					  CFG_ENABLE_NAPI | \
+					  CFG_ENABLE_DYNAMIC_RPS | \
+					  CFG_ENABLE_DP_RX_THREADS)
+#ifdef MDM_PLATFORM
+#define CFG_RX_MODE_DEFAULT 0
+#elif defined(HELIUMPLUS)
+#define CFG_RX_MODE_DEFAULT CFG_ENABLE_NAPI
+#elif defined(QCA_WIFI_QCA6290_11AX)
+#define CFG_RX_MODE_DEFAULT (CFG_ENABLE_DP_RX_THREADS | CFG_ENABLE_NAPI)
+#else
+#define CFG_RX_MODE_DEFAULT (CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI)
+#endif
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+
+/*
+ * <ini>
+ * TxFlowLowWaterMark - Low watermark for pausing network queues
+ *
+ * @Min: 0
+ * @Max: 1000
+ * @Default: 300
+ *
+ * This ini specifies the low watermark of data packets transmitted
+ * before pausing netif queues in tx flow path. It is only applicable
+ * where legacy flow control is used i.e.for Rome.
+ *
+ * Related: TxFlowHighWaterMarkOffset, TxFlowMaxQueueDepth,
+ *          TxLbwFlowLowWaterMark, TxLbwFlowHighWaterMarkOffset,
+ *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
+ *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
+ *
+ * Supported Feature: Dynamic Flow Control
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_LL_TX_FLOW_LWM \
+		CFG_INI_UINT( \
+		"TxFlowLowWaterMark", \
+		0, \
+		1000, \
+		300, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Low watermark for pausing network queues")
+
+/*
+ * <ini>
+ * TxFlowHighWaterMarkOffset - High Watermark offset to unpause Netif queues
+ * @Min: 0
+ * @Max: 300
+ * @Default: 94
+ *
+ * This ini specifies the offset to upause the netif queues
+ * when they are paused due to insufficient descriptors as guided by
+ * ini TxFlowLowWaterMark. It is only applicable where legacy flow control
+ * is used i.e.for Rome.
+ *
+ * Related: TxFlowLowWaterMark, TxFlowMaxQueueDepth,
+ *          TxLbwFlowLowWaterMark, TxLbwFlowHighWaterMarkOffset,
+ *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
+ *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
+ *
+ * Supported Feature: Dynamic Flow Control
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_LL_TX_FLOW_HWM_OFFSET \
+		CFG_INI_UINT( \
+		"TxFlowHighWaterMarkOffset", \
+		0, \
+		300, \
+		94, \
+		CFG_VALUE_OR_DEFAULT, \
+		"High Watermark offset to unpause Netif queues")
+
+/*
+ * <ini>
+ * TxFlowMaxQueueDepth - Max pause queue depth.
+ *
+ * @Min: 400
+ * @Max: 3500
+ * @Default: 1500
+ *
+ * This ini specifies the max queue pause depth.It is only applicable
+ * where legacy flow control is used i.e.for Rome.
+ *
+ * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
+ *          TxLbwFlowLowWaterMark, TxLbwFlowHighWaterMarkOffset,
+ *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
+ *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
+ *
+ * Supported Feature: Dynamic Flow Control
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_LL_TX_FLOW_MAX_Q_DEPTH \
+		CFG_INI_UINT( \
+		"TxFlowMaxQueueDepth", \
+		400, \
+		3500, \
+		1500, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Max pause queue depth")
+
+/*
+ * <ini>
+ * TxLbwFlowLowWaterMark - Low watermark for pausing network queues
+ *                         in low bandwidth band
+ * @Min: 0
+ * @Max: 1000
+ * @Default: 450
+ *
+ * This ini specifies the low watermark of data packets transmitted
+ * before pausing netif queues in tx flow path in low bandwidth band.
+ * It is only applicable where legacy flow control is used i.e.for Rome.
+ *
+ * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
+ *          TxFlowMaxQueueDepth, TxLbwFlowHighWaterMarkOffset,
+ *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
+ *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
+ *
+ * Supported Feature: Dynamic Flow Control
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_LL_TX_LBW_FLOW_LWM \
+		CFG_INI_UINT( \
+		"TxLbwFlowLowWaterMark", \
+		0, \
+		1000, \
+		450, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Low watermark for pausing network queues")
+
+/*
+ * <ini>
+ * TxLbwFlowHighWaterMarkOffset - High Watermark offset to unpause Netif queues
+ *                                in low bandwidth band.
+ * @Min: 0
+ * @Max: 300
+ * @Default: 50
+ *
+ * This ini specifies the offset to upause the netif queues
+ * when they are paused due to insufficient descriptors as guided by
+ * ini TxLbwFlowLowWaterMark in low bandwidth band. It is only applicable
+ * where legacy flow control is used i.e.for Rome.
+ *
+ * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
+ *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
+ *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
+ *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
+ *
+ * Supported Feature: Dynamic Flow Control
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_LL_TX_LBW_FLOW_HWM_OFFSET \
+		CFG_INI_UINT( \
+		"TxLbwFlowHighWaterMarkOffset", \
+		0, \
+		300, \
+		50, \
+		CFG_VALUE_OR_DEFAULT, \
+		"High Watermark offset to unpause Netif queues")
+
+/*
+ * <ini>
+ * TxLbwFlowMaxQueueDepth - Max pause queue depth in low bandwidth band
+ *
+ * @Min: 400
+ * @Max: 3500
+ * @Default: 750
+ *
+ * This ini specifies the max queue pause depth in low bandwidth band.
+ * It is only applicable where legacy flow control is used i.e.for Rome.
+ *
+ * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
+ *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
+ *          TxLbwFlowHighWaterMarkOffset, TxHbwFlowLowWaterMark,
+ *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
+ *
+ * Supported Feature: Dynamic Flow Control
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_LL_TX_LBW_FLOW_MAX_Q_DEPTH \
+		CFG_INI_UINT( \
+		"TxLbwFlowMaxQueueDepth", \
+		400, \
+		3500, \
+		750, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Max pause queue depth in low bandwidth band")
+
+/*
+ * <ini>
+ * TxHbwFlowLowWaterMark - Low watermark for pausing network queues
+ *                         in high bandwidth band
+ * @Min: 0
+ * @Max: 1000
+ * @Default: 406
+ *
+ * This ini specifies the threshold of data packets transmitted
+ * before pausing netif queues.It is only applicable where
+ * legacy flow control is used i.e.for Rome.
+ *
+ * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
+ *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
+ *          TxLbwFlowHighWaterMarkOffset, TxLbwFlowMaxQueueDepth,
+ *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
+ *
+ * Supported Feature: Dynamic Flow Control
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_LL_TX_HBW_FLOW_LWM \
+		CFG_INI_UINT( \
+		"TxHbwFlowLowWaterMark", \
+		0, \
+		1000, \
+		406, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Low watermark for pausing network queues")
+
+/*
+ * <ini>
+ * TxHbwFlowHighWaterMarkOffset - High Watermark offset to unpause Netif queues
+ *                                in high bandwidth band.
+ * @Min: 0
+ * @Max: 300
+ * @Default: 94
+ *
+ * This ini specifies the offset to upause the netif queues
+ * when they are paused due to insufficient descriptors as guided by
+ * ini TxHbwFlowLowWaterMark in high bandwidth band. It is only applicable
+ * where legacy flow control is used i.e.for Rome.
+ *
+ * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
+ *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
+ *          TxLbwFlowHighWaterMarkOffset, TxLbwFlowMaxQueueDepth,
+ *          TxHbwFlowLowWaterMark, TxHbwFlowMaxQueueDepth
+ *
+ * Supported Feature: Dynamic Flow Control
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_LL_TX_HBW_FLOW_HWM_OFFSET \
+		CFG_INI_UINT( \
+		"TxHbwFlowHighWaterMarkOffset", \
+		0, \
+		300, \
+		94, \
+		CFG_VALUE_OR_DEFAULT, \
+		"High Watermark offset to unpause Netif queues")
+
+/*
+ * <ini>
+ * TxHbwFlowMaxQueueDepth - Max pause queue depth in high bandwidth band
+ * @Min: 4000
+ * @Max: 3500
+ * @Default: 1500
+ *
+ * This ini specifies the max queue pause depth in high bandwidth band.
+ * It is only applicable where legacy flow control is used i.e.for Rome.
+ *
+ * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
+ *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
+ *          TxLbwFlowHighWaterMarkOffset, TxLbwFlowMaxQueueDepth,
+ *          TxHbwFlowLowWaterMark, TxHbwFlowHighWaterMarkOffset
+ *
+ * Supported Feature: Dynamic Flow Control
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_LL_TX_HBW_FLOW_MAX_Q_DEPTH \
+		CFG_INI_UINT( \
+		"TxHbwFlowMaxQueueDepth", \
+		400, \
+		3500, \
+		1500, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Max pause queue depth in high bandwidth band")
+
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+#ifdef MSM_PLATFORM
+/*
+ * <ini>
+ * gBusBandwidthHighThreshold - bus bandwidth high threshold
+ *
+ * @Min: 0
+ * @Max: 4294967295UL
+ * @Default: 2000
+ *
+ * This ini specifies thebus bandwidth high threshold
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_BUS_BANDWIDTH_HIGH_THRESHOLD \
+		CFG_INI_UINT( \
+		"gBusBandwidthHighThreshold", \
+		0, \
+		4294967295UL, \
+		2000, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Bus bandwidth high threshold")
+
+/*
+ * <ini>
+ * gBusBandwidthMediumThreshold - bus bandwidth medium threshold
+ *
+ * @Min: 0
+ * @Max: 4294967295UL
+ * @Default: 500
+ *
+ * This ini specifies thebus bandwidth medium threshold
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_BUS_BANDWIDTH_MEDIUM_THRESHOLD \
+		CFG_INI_UINT( \
+		"gBusBandwidthMediumThreshold", \
+		0, \
+		4294967295UL, \
+		500, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Bus bandwidth medium threshold")
+
+/*
+ * <ini>
+ * gBusBandwidthLowThreshold - bus bandwidth low threshold
+ *
+ * @Min: 0
+ * @Max: 4294967295UL
+ * @Default: 150
+ *
+ * This ini specifies thebus bandwidth low threshold
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_BUS_BANDWIDTH_LOW_THRESHOLD \
+		CFG_INI_UINT( \
+		"gBusBandwidthLowThreshold", \
+		0, \
+		4294967295UL, \
+		150, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Bus bandwidth low threshold")
+
+/*
+ * <ini>
+ * gBusBandwidthComputeInterval - bus bandwidth compute interval
+ *
+ * @Min: 0
+ * @Max: 10000
+ * @Default: 100
+ *
+ * This ini specifies thebus bandwidth compute interval
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_BUS_BANDWIDTH_COMPUTE_INTERVAL \
+		CFG_INI_UINT( \
+		"gBusBandwidthComputeInterval", \
+		0, \
+		10000, \
+		100, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Bus bandwidth compute interval")
+
+/*
+ * <ini>
+ * gEnableTcpLimitOutput - Control to enable TCP limit output byte
+ * @Default: true
+ *
+ * This ini is used to enable dynamic configuration of TCP limit output bytes
+ * tcp_limit_output_bytes param. Enabling this will let driver post message to
+ * cnss-daemon, accordingly cnss-daemon will modify the tcp_limit_output_bytes.
+ *
+ * Supported Feature: Tcp limit output bytes
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_ENABLE_TCP_LIMIT_OUTPUT \
+		CFG_INI_BOOL( \
+		"gTcpLimitOutputEnable", \
+		true, \
+		"Control to enable TCP limit output byte")
+
+/*
+ * <ini>
+ * gTcpAdvWinScaleEnable - Control to enable  TCP adv window scaling
+ * @Default: true
+ *
+ * This ini is used to enable dynamic configuration of TCP adv window scaling
+ * system parameter.
+ *
+ * Supported Feature: Tcp Advance Window Scaling
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_ENABLE_TCP_ADV_WIN_SCALE \
+		CFG_INI_BOOL( \
+		"gTcpAdvWinScaleEnable", \
+		true, \
+		"Control to enable  TCP adv window scaling")
+
+/*
+ * <ini>
+ * gTcpDelAckEnable - Control to enable Dynamic Configuration of Tcp Delayed Ack
+ * @Default: true
+ *
+ * This ini is used to enable Dynamic Configuration of Tcp Delayed Ack
+ *
+ * Related: gTcpDelAckThresholdHigh, gTcpDelAckThresholdLow,
+ *          gTcpDelAckTimerCount
+ *
+ * Supported Feature: Tcp Delayed Ack
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_ENABLE_TCP_DELACK \
+		CFG_INI_BOOL( \
+		"gTcpDelAckEnable", \
+		true, \
+		"Control to enable Dynamic Config of Tcp Delayed Ack")
+
+/*
+ * <ini>
+ * gTcpDelAckThresholdHigh - High Threshold inorder to trigger TCP Del Ack
+ *                                          indication
+ * @Min: 0
+ * @Max: 16000
+ * @Default: 500
+ *
+ * This ini is used to mention the High Threshold inorder to trigger TCP Del Ack
+ * indication i.e the threshold of packets received over a period of 100 ms.
+ * i.e to have a low RX throughput requirement
+ * Related: gTcpDelAckEnable, gTcpDelAckThresholdLow, gTcpDelAckTimerCount
+ *
+ * Supported Feature: Tcp Delayed Ack
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_TCP_DELACK_THRESHOLD_HIGH \
+		CFG_INI_UINT( \
+		"gTcpDelAckThresholdHigh", \
+		0, \
+		16000, \
+		500, \
+		CFG_VALUE_OR_DEFAULT, \
+		"High Threshold inorder to trigger TCP Del Ack")
+
+/*
+ * <ini>
+ * gTcpDelAckThresholdLow - Low Threshold inorder to trigger TCP Del Ack
+ *                                          indication
+ * @Min: 0
+ * @Max: 10000
+ * @Default: 1000
+ *
+ * This ini is used to mention the Low Threshold inorder to trigger TCP Del Ack
+ * indication i.e the threshold of packets received over a period of 100 ms.
+ * i.e to have a low RX throughput requirement
+ *
+ * Related: gTcpDelAckEnable, gTcpDelAckThresholdHigh, gTcpDelAckTimerCount
+ *
+ * Supported Feature: Tcp Delayed Ack
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_TCP_DELACK_THRESHOLD_LOW \
+		CFG_INI_UINT( \
+		"gTcpDelAckThresholdLow", \
+		0, \
+		10000, \
+		1000, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Low Threshold inorder to trigger TCP Del Ack")
+
+/*
+ * <ini>
+ * gTcpDelAckTimerCount - Del Ack Timer Count inorder to trigger TCP Del Ack
+ *                                      indication
+ * @Min: 1
+ * @Max: 1000
+ * @Default: 30
+ *
+ * This ini is used to mention the Del Ack Timer Count inorder to
+ * trigger TCP Del Ack indication i.e number of 100 ms periods
+ *
+ * Related: gTcpDelAckEnable, gTcpDelAckThresholdHigh, gTcpDelAckThresholdLow
+ *
+ * Supported Feature: Tcp Delayed Ack
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_TCP_DELACK_TIMER_COUNT \
+		CFG_INI_UINT( \
+		"gTcpDelAckTimerCount", \
+		1, \
+		1000, \
+		30, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Del Ack Timer Count inorder to trigger TCP Del Ack")
+
+/*
+ * <ini>
+ * gTcpTxHighTputThreshold - High Threshold inorder to trigger High
+ *                                          Tx Throughput requirement.
+ * @Min: 0
+ * @Max: 16000
+ * @Default: 500
+ *
+ * This ini specifies the threshold of packets transmitted
+ * over a period of 100 ms beyond which TCP can be considered to have a high
+ * TX throughput requirement. The driver uses this condition to tweak TCP TX
+ * specific parameters (via cnss-daemon)
+ *
+ * Supported Feature: To tweak TCP TX n/w parameters
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_TCP_TX_HIGH_TPUT_THRESHOLD \
+		CFG_INI_UINT( \
+		"gTcpTxHighTputThreshold", \
+		0, \
+		16000, \
+		500, \
+		CFG_VALUE_OR_DEFAULT, \
+		"High Threshold inorder to trigger High Tx Tp")
+
+#endif /* MSM_PLATFORM */
+
+/*
+ * <ini>
+ * NAPI_CPU_AFFINITY_MASK - CPU mask to affine NAPIs
+ *
+ * @Min: 0
+ * @Max: 0xFF
+ * @Default: 0
+ *
+ * This ini is used to set NAPI IRQ CPU affinity
+ *
+ * Supported Feature: NAPI
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_NAPI_CE_CPU_MASK \
+		CFG_INI_UINT( \
+		"NAPI_CPU_AFFINITY_MASK", \
+		0, \
+		0xFF, \
+		0, \
+		CFG_VALUE_OR_DEFAULT, \
+		"CPU mask to affine NAPIs")
+
+/*
+ * <ini>
+ * RX_THREAD_CPU_AFFINITY_MASK - CPU mask to affine Rx_thread
+ *
+ * @Default: e
+ *
+ * This ini is used to set Rx_thread CPU affinity
+ *
+ * Supported Feature: Rx_thread
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_RX_THREAD_CPU_MASK \
+		CFG_INI_UINT( \
+		"RX_THREAD_CPU_AFFINITY_MASK", \
+		0, \
+		0xFF, \
+		0, \
+		CFG_VALUE_OR_DEFAULT, \
+		"CPU mask to affine Rx_thread")
+
+/*
+ * <ini>
+ * RX_THREAD_CPU_AFFINITY_MASK - CPU mask to affine Rx_thread
+ *
+ * @Min: 0
+ * @Max: 0xFF
+ * @Default: 0
+ *
+ * This ini is used to set Rx_thread CPU affinity
+ *
+ * List of RPS CPU maps for different rx queues registered by WLAN driver
+ * Ref - Kernel/Documentation/networking/scaling.txt
+ * RPS CPU map for a particular RX queue, selects CPU(s) for bottom half
+ * processing of RX packets. For example, for a system with 4 CPUs,
+ * 0xe: Use CPU1 - CPU3 and donot use CPU0.
+ * 0x0: RPS is disabled, packets are processed on the interrupting CPU.
+.*
+ * WLAN driver registers NUM_TX_QUEUES queues for tx and rx each during
+ * alloc_netdev_mq. Hence, we need to have a cpu mask for each of the rx queues.
+ *
+ * For example, if the NUM_TX_QUEUES is 4, a sample WLAN ini entry may look like
+ * rpsRxQueueCpuMapList=a b c d
+ * For a 4 CPU system (CPU0 - CPU3), this implies:
+ * 0xa - (1010) use CPU1, CPU3 for rx queue 0
+ * 0xb - (1011) use CPU0, CPU1 and CPU3 for rx queue 1
+ * 0xc - (1100) use CPU2, CPU3 for rx queue 2
+ * 0xd - (1101) use CPU0, CPU2 and CPU3 for rx queue 3
+
+ * In practice, we may want to avoid the cores which are heavily loaded.
+ *
+ * Default value of rpsRxQueueCpuMapList. Different platforms may have
+ * different configurations for NUM_TX_QUEUES and # of cpus, and will need to
+ * configure an appropriate value via ini file. Setting default value to 'e' to
+ * avoid use of CPU0 (since its heavily used by other system processes) by rx
+ * queue 0, which is currently being used for rx packet processing.
+ *
+ * Maximum length of string used to hold a list of cpu maps for various rx
+ * queues. Considering a 16 core system with 5 rx queues, a RPS CPU map
+ * list may look like -
+ * rpsRxQueueCpuMapList = ffff ffff ffff ffff ffff
+ * (all 5 rx queues can be processed on all 16 cores)
+ * max string len = 24 + 1(for '\0'). Considering 30 to be on safe side.
+ *
+ * Supported Feature: Rx_thread
+ *
+ * Usage: Internal
+ * </ini>
+ */
+#define CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST \
+		CFG_INI_STRING( \
+		"rpsRxQueueCpuMapList", \
+		1, \
+		30, \
+		"e", \
+		"specify RPS map for different RX queus")
+
+/*
+ * <ini>
+ * gEnableTxOrphan- Enable/Disable orphaning of Tx packets
+ * @Default: false
+ *
+ * This ini is used to enable/disable orphaning of Tx packets.
+ *
+ * Related: None
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_DP_TX_ORPHAN_ENABLE \
+		CFG_INI_BOOL( \
+		"gEnableTxOrphan", \
+		false, \
+		"orphaning of Tx packets")
+
+/*
+ * <ini>
+ * rx_mode - Control to decide rx mode for packet procesing
+ *
+ * @Min: 0
+ * @Max: (CFG_ENABLE_RX_THREAD | CFG_ENABLE_RPS | CFG_ENABLE_NAPI | \
+ *	 CFG_ENABLE_DYNAMIC_RPS)
+ *
+ * Some possible configurations:
+ * rx_mode=0 - Uses tasklets for bottom half
+ * CFG_ENABLE_NAPI (rx_mode=4) - Uses NAPI for bottom half
+ * CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI (rx_mode=5) - NAPI for bottom half,
+ * rx_thread for stack. Single threaded.
+ * CFG_ENABLE_DP_RX_THREAD | CFG_ENABLE_NAPI (rx_mode=10) - NAPI for bottom
+ * half, dp_rx_thread for stack processing. Supports multiple rx threads.
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_DP_RX_MODE \
+		CFG_INI_UINT("rx_mode", \
+		0, CFG_RX_MODE_MAX, CFG_RX_MODE_DEFAULT, \
+		CFG_VALUE_OR_DEFAULT, \
+		"Control to decide rx mode for packet procesing")
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+#define CFG_HDD_DP_LEGACY_TX_FLOW \
+	CFG(CFG_DP_LL_TX_FLOW_LWM) \
+	CFG(CFG_DP_LL_TX_FLOW_HWM_OFFSET) \
+	CFG(CFG_DP_LL_TX_FLOW_MAX_Q_DEPTH) \
+	CFG(CFG_DP_LL_TX_LBW_FLOW_LWM) \
+	CFG(CFG_DP_LL_TX_LBW_FLOW_HWM_OFFSET) \
+	CFG(CFG_DP_LL_TX_LBW_FLOW_MAX_Q_DEPTH) \
+	CFG(CFG_DP_LL_TX_HBW_FLOW_LWM) \
+	CFG(CFG_DP_LL_TX_HBW_FLOW_HWM_OFFSET) \
+	CFG(CFG_DP_LL_TX_HBW_FLOW_MAX_Q_DEPTH)
+#else
+#define CFG_HDD_DP_LEGACY_TX_FLOW
+#endif
+
+#ifdef MSM_PLATFORM
+#define CFG_HDD_DP_MSM_PLATFORM \
+	CFG(CFG_DP_BUS_BANDWIDTH_HIGH_THRESHOLD) \
+	CFG(CFG_DP_BUS_BANDWIDTH_MEDIUM_THRESHOLD) \
+	CFG(CFG_DP_BUS_BANDWIDTH_LOW_THRESHOLD) \
+	CFG(CFG_DP_BUS_BANDWIDTH_COMPUTE_INTERVAL) \
+	CFG(CFG_DP_ENABLE_TCP_LIMIT_OUTPUT) \
+	CFG(CFG_DP_ENABLE_TCP_ADV_WIN_SCALE) \
+	CFG(CFG_DP_ENABLE_TCP_DELACK) \
+	CFG(CFG_DP_TCP_DELACK_THRESHOLD_HIGH) \
+	CFG(CFG_DP_TCP_DELACK_THRESHOLD_LOW) \
+	CFG(CFG_DP_TCP_DELACK_TIMER_COUNT) \
+	CFG(CFG_DP_TCP_TX_HIGH_TPUT_THRESHOLD)
+#else
+#define CFG_HDD_DP_MSM_PLATFORM
+#endif
+
+#define CFG_HDD_DP \
+	CFG(CFG_DP_NAPI_CE_CPU_MASK) \
+	CFG(CFG_DP_RX_THREAD_CPU_MASK) \
+	CFG(CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST) \
+	CFG(CFG_DP_TX_ORPHAN_ENABLE) \
+	CFG(CFG_DP_RX_MODE)
+#define CFG_HDD_DP_ALL \
+	CFG_HDD_DP \
+	CFG_HDD_DP_MSM_PLATFORM \
+	CFG_HDD_DP_LEGACY_TX_FLOW
+#endif

+ 32 - 611
core/hdd/inc/wlan_hdd_cfg.h

@@ -43,14 +43,11 @@
 
 struct hdd_context;
 
+#define CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST_LEN 30
+
 #define FW_MODULE_LOG_LEVEL_STRING_LENGTH  (512)
 #define TX_SCHED_WRR_PARAM_STRING_LENGTH   (50)
 #define TX_SCHED_WRR_PARAMS_NUM            (5)
-#define CFG_ENABLE_RX_THREAD		BIT(0)
-#define CFG_ENABLE_RPS			BIT(1)
-#define CFG_ENABLE_NAPI			BIT(2)
-#define CFG_ENABLE_DYNAMIC_RPS		BIT(3)
-#define CFG_ENABLE_DP_RX_THREADS	BIT(4)
 
 #ifdef DHCP_SERVER_OFFLOAD
 #define IPADDR_NUM_ENTRIES     (4)
@@ -4668,259 +4665,6 @@ enum hdd_link_speed_rpt_type {
 #define CFG_SET_TXPOWER_LIMIT5G_MAX                (30)
 #define CFG_SET_TXPOWER_LIMIT5G_DEFAULT            (30)
 
-#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
-
-/*
- * <ini>
- * TxFlowLowWaterMark - Low watermark for pausing network queues
- *
- * @Min: 0
- * @Max: 1000
- * @Default: 300
- *
- * This ini specifies the low watermark of data packets transmitted
- * before pausing netif queues in tx flow path. It is only applicable
- * where legacy flow control is used i.e.for Rome.
- *
- * Related: TxFlowHighWaterMarkOffset, TxFlowMaxQueueDepth,
- *          TxLbwFlowLowWaterMark, TxLbwFlowHighWaterMarkOffset,
- *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
- *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
- *
- * Supported Feature: Dynamic Flow Control
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_LL_TX_FLOW_LWM                         "TxFlowLowWaterMark"
-#define CFG_LL_TX_FLOW_LWM_MIN                     (0)
-#define CFG_LL_TX_FLOW_LWM_MAX                     (1000)
-#define CFG_LL_TX_FLOW_LWM_DEFAULT                 (300)
-
-/*
- * <ini>
- * TxFlowHighWaterMarkOffset - High Watermark offset to unpause Netif queues
- * @Min: 0
- * @Max: 300
- * @Default: 94
- *
- * This ini specifies the offset to upause the netif queues
- * when they are paused due to insufficient descriptors as guided by
- * ini TxFlowLowWaterMark. It is only applicable where legacy flow control
- * is used i.e.for Rome.
- *
- * Related: TxFlowLowWaterMark, TxFlowMaxQueueDepth,
- *          TxLbwFlowLowWaterMark, TxLbwFlowHighWaterMarkOffset,
- *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
- *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
- *
- * Supported Feature: Dynamic Flow Control
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_LL_TX_FLOW_HWM_OFFSET                  "TxFlowHighWaterMarkOffset"
-#define CFG_LL_TX_FLOW_HWM_OFFSET_MIN              (0)
-#define CFG_LL_TX_FLOW_HWM_OFFSET_MAX              (300)
-#define CFG_LL_TX_FLOW_HWM_OFFSET_DEFAULT          (94)
-
-/*
- * <ini>
- * TxFlowMaxQueueDepth - Max pause queue depth.
- *
- * @Min: 400
- * @Max: 3500
- * @Default: 1500
- *
- * This ini specifies the max queue pause depth.It is only applicable
- * where legacy flow control is used i.e.for Rome.
- *
- * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
- *          TxLbwFlowLowWaterMark, TxLbwFlowHighWaterMarkOffset,
- *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
- *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
- *
- * Supported Feature: Dynamic Flow Control
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_LL_TX_FLOW_MAX_Q_DEPTH                 "TxFlowMaxQueueDepth"
-#define CFG_LL_TX_FLOW_MAX_Q_DEPTH_MIN             (400)
-#define CFG_LL_TX_FLOW_MAX_Q_DEPTH_MAX             (3500)
-#define CFG_LL_TX_FLOW_MAX_Q_DEPTH_DEFAULT         (1500)
-
-/*
- * <ini>
- * TxLbwFlowLowWaterMark - Low watermark for pausing network queues
- *                         in low bandwidth band
- * @Min: 0
- * @Max: 1000
- * @Default: 450
- *
- * This ini specifies the low watermark of data packets transmitted
- * before pausing netif queues in tx flow path in low bandwidth band.
- * It is only applicable where legacy flow control is used i.e.for Rome.
- *
- * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
- *          TxFlowMaxQueueDepth, TxLbwFlowHighWaterMarkOffset,
- *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
- *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
- *
- * Supported Feature: Dynamic Flow Control
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_LL_TX_LBW_FLOW_LWM                     "TxLbwFlowLowWaterMark"
-#define CFG_LL_TX_LBW_FLOW_LWM_MIN                 (0)
-#define CFG_LL_TX_LBW_FLOW_LWM_MAX                 (1000)
-#define CFG_LL_TX_LBW_FLOW_LWM_DEFAULT             (450)
-
-/*
- * <ini>
- * TxLbwFlowHighWaterMarkOffset - High Watermark offset to unpause Netif queues
- *                                in low bandwidth band.
- * @Min: 0
- * @Max: 300
- * @Default: 50
- *
- * This ini specifies the offset to upause the netif queues
- * when they are paused due to insufficient descriptors as guided by
- * ini TxLbwFlowLowWaterMark in low bandwidth band. It is only applicable
- * where legacy flow control is used i.e.for Rome.
- *
- * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
- *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
- *          TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
- *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
- *
- * Supported Feature: Dynamic Flow Control
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_LL_TX_LBW_FLOW_HWM_OFFSET              "TxLbwFlowHighWaterMarkOffset"
-#define CFG_LL_TX_LBW_FLOW_HWM_OFFSET_MIN          (0)
-#define CFG_LL_TX_LBW_FLOW_HWM_OFFSET_MAX          (300)
-#define CFG_LL_TX_LBW_FLOW_HWM_OFFSET_DEFAULT      (50)
-
-/*
- * <ini>
- * TxLbwFlowMaxQueueDepth - Max pause queue depth in low bandwidth band
- *
- * @Min: 400
- * @Max: 3500
- * @Default: 750
- *
- * This ini specifies the max queue pause depth in low bandwidth band.
- * It is only applicable where legacy flow control is used i.e.for Rome.
- *
- * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
- *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
- *          TxLbwFlowHighWaterMarkOffset, TxHbwFlowLowWaterMark,
- *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
- *
- * Supported Feature: Dynamic Flow Control
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_LL_TX_LBW_FLOW_MAX_Q_DEPTH             "TxLbwFlowMaxQueueDepth"
-#define CFG_LL_TX_LBW_FLOW_MAX_Q_DEPTH_MIN         (400)
-#define CFG_LL_TX_LBW_FLOW_MAX_Q_DEPTH_MAX         (3500)
-#define CFG_LL_TX_LBW_FLOW_MAX_Q_DEPTH_DEFAULT     (750)
-
-/*
- * <ini>
- * TxHbwFlowLowWaterMark - Low watermark for pausing network queues
- *                         in high bandwidth band
- * @Min: 0
- * @Max: 1000
- * @Default: 406
- *
- * This ini specifies the threshold of data packets transmitted
- * before pausing netif queues.It is only applicable where
- * legacy flow control is used i.e.for Rome.
- *
- * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
- *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
- *          TxLbwFlowHighWaterMarkOffset, TxLbwFlowMaxQueueDepth,
- *          TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
- *
- * Supported Feature: Dynamic Flow Control
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_LL_TX_HBW_FLOW_LWM                     "TxHbwFlowLowWaterMark"
-#define CFG_LL_TX_HBW_FLOW_LWM_MIN                 (0)
-#define CFG_LL_TX_HBW_FLOW_LWM_MAX                 (1000)
-#define CFG_LL_TX_HBW_FLOW_LWM_DEFAULT             (406)
-
-/*
- * <ini>
- * TxHbwFlowHighWaterMarkOffset - High Watermark offset to unpause Netif queues
- *                                in high bandwidth band.
- * @Min: 0
- * @Max: 300
- * @Default: 94
- *
- * This ini specifies the offset to upause the netif queues
- * when they are paused due to insufficient descriptors as guided by
- * ini TxHbwFlowLowWaterMark in high bandwidth band. It is only applicable
- * where legacy flow control is used i.e.for Rome.
- *
- * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
- *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
- *          TxLbwFlowHighWaterMarkOffset, TxLbwFlowMaxQueueDepth,
- *          TxHbwFlowLowWaterMark, TxHbwFlowMaxQueueDepth
- *
- * Supported Feature: Dynamic Flow Control
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_LL_TX_HBW_FLOW_HWM_OFFSET              "TxHbwFlowHighWaterMarkOffset"
-#define CFG_LL_TX_HBW_FLOW_HWM_OFFSET_MIN          (0)
-#define CFG_LL_TX_HBW_FLOW_HWM_OFFSET_MAX          (300)
-#define CFG_LL_TX_HBW_FLOW_HWM_OFFSET_DEFAULT      (94)
-
-/*
- * <ini>
- * TxHbwFlowMaxQueueDepth - Max pause queue depth in high bandwidth band
- * @Min: 4000
- * @Max: 3500
- * @Default: 1500
- *
- * This ini specifies the max queue pause depth in high bandwidth band.
- * It is only applicable where legacy flow control is used i.e.for Rome.
- *
- * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
- *          TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
- *          TxLbwFlowHighWaterMarkOffset, TxLbwFlowMaxQueueDepth,
- *          TxHbwFlowLowWaterMark, TxHbwFlowHighWaterMarkOffset
- *
- * Supported Feature: Dynamic Flow Control
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_LL_TX_HBW_FLOW_MAX_Q_DEPTH             "TxHbwFlowMaxQueueDepth"
-#define CFG_LL_TX_HBW_FLOW_MAX_Q_DEPTH_MIN         (400)
-#define CFG_LL_TX_HBW_FLOW_MAX_Q_DEPTH_MAX         (3500)
-#define CFG_LL_TX_HBW_FLOW_MAX_Q_DEPTH_DEFAULT     (1500)
-#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
-
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 
 /*
@@ -5100,190 +4844,6 @@ enum hdd_link_speed_rpt_type {
 #define CFG_ROAMING_DFS_CHANNEL_DEFAULT            (CFG_ROAMING_DFS_CHANNEL_DISABLED)
 
 #ifdef MSM_PLATFORM
-#define CFG_BUS_BANDWIDTH_HIGH_THRESHOLD           "gBusBandwidthHighThreshold"
-#define CFG_BUS_BANDWIDTH_HIGH_THRESHOLD_DEFAULT   (2000)
-#define CFG_BUS_BANDWIDTH_HIGH_THRESHOLD_MIN       (0)
-#define CFG_BUS_BANDWIDTH_HIGH_THRESHOLD_MAX       (4294967295UL)
-
-#define CFG_BUS_BANDWIDTH_MEDIUM_THRESHOLD         "gBusBandwidthMediumThreshold"
-#define CFG_BUS_BANDWIDTH_MEDIUM_THRESHOLD_DEFAULT (500)
-#define CFG_BUS_BANDWIDTH_MEDIUM_THRESHOLD_MIN     (0)
-#define CFG_BUS_BANDWIDTH_MEDIUM_THRESHOLD_MAX     (4294967295UL)
-
-#define CFG_BUS_BANDWIDTH_LOW_THRESHOLD            "gBusBandwidthLowThreshold"
-#define CFG_BUS_BANDWIDTH_LOW_THRESHOLD_DEFAULT    (150)
-#define CFG_BUS_BANDWIDTH_LOW_THRESHOLD_MIN        (0)
-#define CFG_BUS_BANDWIDTH_LOW_THRESHOLD_MAX        (4294967295UL)
-
-#define CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL         "gBusBandwidthComputeInterval"
-#define CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_DEFAULT (100)
-#define CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_MIN     (0)
-#define CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_MAX     (10000)
-
-/*
- * <ini>
- * gEnableTcpLimitOutput - Control to enable TCP limit output byte
- * @Min: 0
- * @Max: 1
- * @Default: 1
- *
- * This ini is used to enable dynamic configuration of TCP limit output bytes
- * tcp_limit_output_bytes param. Enabling this will let driver post message to
- * cnss-daemon, accordingly cnss-daemon will modify the tcp_limit_output_bytes.
- *
- * Supported Feature: Tcp limit output bytes
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_ENABLE_TCP_LIMIT_OUTPUT                      "gTcpLimitOutputEnable"
-#define CFG_ENABLE_TCP_LIMIT_OUTPUT_DEFAULT              (1)
-#define CFG_ENABLE_TCP_LIMIT_OUTPUT_MIN                  (0)
-#define CFG_ENABLE_TCP_LIMIT_OUTPUT_MAX                  (1)
-
-/*
- * <ini>
- * gTcpAdvWinScaleEnable - Control to enable  TCP adv window scaling
- * @Min: -0
- * @Max: 1
- * @Default: 1
- *
- * This ini is used to enable dynamic configuration of TCP adv window scaling system parameter.
- *
- * Supported Feature: Tcp Advance Window Scaling
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_ENABLE_TCP_ADV_WIN_SCALE                      "gTcpAdvWinScaleEnable"
-#define CFG_ENABLE_TCP_ADV_WIN_SCALE_DEFAULT              (1)
-#define CFG_ENABLE_TCP_ADV_WIN_SCALE_MIN                  (0)
-#define CFG_ENABLE_TCP_ADV_WIN_SCALE_MAX                  (1)
-
-/*
- * <ini>
- * gTcpDelAckEnable - Control to enable Dynamic Configuration of Tcp Delayed Ack
- * @Min: 0
- * @Max: 1
- * @Default: 1
- *
- * This ini is used to enable Dynamic Configuration of Tcp Delayed Ack
- *
- * Related: gTcpDelAckThresholdHigh, gTcpDelAckThresholdLow,
- *          gTcpDelAckTimerCount
- *
- * Supported Feature: Tcp Delayed Ack
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_ENABLE_TCP_DELACK                      "gTcpDelAckEnable"
-#define CFG_ENABLE_TCP_DELACK_DEFAULT              (1)
-#define CFG_ENABLE_TCP_DELACK_MIN                  (0)
-#define CFG_ENABLE_TCP_DELACK_MAX                  (1)
-
-
-/*
- * <ini>
- * gTcpDelAckThresholdHigh - High Threshold inorder to trigger TCP Del Ack
- *                                          indication
- * @Min: 0
- * @Max: 16000
- * @Default: 500
- *
- * This ini is used to mention the High Threshold inorder to trigger TCP Del Ack
- * indication i.e the threshold of packets received over a period of 100 ms.
- * i.e to have a low RX throughput requirement
- * Related: gTcpDelAckEnable, gTcpDelAckThresholdLow, gTcpDelAckTimerCount
- *
- * Supported Feature: Tcp Delayed Ack
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_TCP_DELACK_THRESHOLD_HIGH              "gTcpDelAckThresholdHigh"
-#define CFG_TCP_DELACK_THRESHOLD_HIGH_DEFAULT      (500)
-#define CFG_TCP_DELACK_THRESHOLD_HIGH_MIN          (0)
-#define CFG_TCP_DELACK_THRESHOLD_HIGH_MAX          (16000)
-
-/*
- * <ini>
- * gTcpDelAckThresholdLow - Low Threshold inorder to trigger TCP Del Ack
- *                                          indication
- * @Min: 0
- * @Max: 10000
- * @Default: 1000
- *
- * This ini is used to mention the Low Threshold inorder to trigger TCP Del Ack
- * indication i.e the threshold of packets received over a period of 100 ms.
- * i.e to have a low RX throughput requirement
- *
- * Related: gTcpDelAckEnable, gTcpDelAckThresholdHigh, gTcpDelAckTimerCount
- *
- * Supported Feature: Tcp Delayed Ack
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_TCP_DELACK_THRESHOLD_LOW               "gTcpDelAckThresholdLow"
-#define CFG_TCP_DELACK_THRESHOLD_LOW_DEFAULT       (1000)
-#define CFG_TCP_DELACK_THRESHOLD_LOW_MIN           (0)
-#define CFG_TCP_DELACK_THRESHOLD_LOW_MAX           (10000)
-
-/*
- * <ini>
- * gTcpDelAckTimerCount - Del Ack Timer Count  inorder to trigger TCP Del Ack
- *                                      indication
- * @Min: 1
- * @Max: 1000
- * @Default: 30
- *
- * This ini is used to mention the Del Ack Timer Count inorder to
- * trigger TCP Del Ack indication i.e number of 100 ms periods
- *
- * Related: gTcpDelAckEnable, gTcpDelAckThresholdHigh, gTcpDelAckThresholdLow
- *
- * Supported Feature: Tcp Delayed Ack
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_TCP_DELACK_TIMER_COUNT                 "gTcpDelAckTimerCount"
-#define CFG_TCP_DELACK_TIMER_COUNT_DEFAULT         (30)
-#define CFG_TCP_DELACK_TIMER_COUNT_MIN             (1)
-#define CFG_TCP_DELACK_TIMER_COUNT_MAX             (1000)
-
-
-/*
- * <ini>
- * gTcpTxHighTputThreshold - High Threshold inorder to trigger High
- *                                          Tx Throughput requirement.
- * @Min: 0
- * @Max: 16000
- * @Default: 500
- *
- * This ini specifies the threshold of packets transmitted
- * over a period of 100 ms beyond which TCP can be considered to have a high
- * TX throughput requirement. The driver uses this condition to tweak TCP TX
- * specific parameters (via cnss-daemon)
- *
- * Supported Feature: To tweak TCP TX n/w parameters
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_TCP_TX_HIGH_TPUT_THRESHOLD_NAME         "gTcpTxHighTputThreshold"
-#define CFG_TCP_TX_HIGH_TPUT_THRESHOLD_DEFAULT      (500)
-#define CFG_TCP_TX_HIGH_TPUT_THRESHOLD_MIN          (0)
-#define CFG_TCP_TX_HIGH_TPUT_THRESHOLD_MAX          (16000)
-
 /*
  * <ini>
  * periodic_stats_display_time - time(seconds) after which stats will be printed
@@ -6557,40 +6117,6 @@ enum hdd_link_speed_rpt_type {
 #define CFG_RESTART_BEACONING_ON_CH_AVOID_MAX     (CH_AVOID_RULE_RESTART_24G_ONLY)
 #define CFG_RESTART_BEACONING_ON_CH_AVOID_DEFAULT (CH_AVOID_RULE_RESTART)
 
-/*
- * <ini>
- * rx_mode - Control to decide rx mode for packet procesing
- *
- * @Min: 0
- * @Max: (CFG_ENABLE_RX_THREAD | CFG_ENABLE_RPS | CFG_ENABLE_NAPI | \
- *	 CFG_ENABLE_DYNAMIC_RPS)
- * Some possible configurations:
- * rx_mode=0 - Uses tasklets for bottom half
- * CFG_ENABLE_NAPI (rx_mode=4) - Uses NAPI for bottom half
- * CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI (rx_mode=5) - NAPI for bottom half,
- * rx_thread for stack. Single threaded.
- * CFG_ENABLE_DP_RX_THREAD | CFG_ENABLE_NAPI (rx_mode=10) - NAPI for bottom
- * half, dp_rx_thread for stack processing. Supports multiple rx threads.
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_RX_MODE_NAME     "rx_mode"
-#define CFG_RX_MODE_MIN      (0)
-#define CFG_RX_MODE_MAX      (CFG_ENABLE_RX_THREAD | CFG_ENABLE_RPS | \
-				 CFG_ENABLE_NAPI | CFG_ENABLE_DYNAMIC_RPS | \
-				 CFG_ENABLE_DP_RX_THREADS)
-#ifdef MDM_PLATFORM
-#define CFG_RX_MODE_DEFAULT  (0)
-#elif defined(HELIUMPLUS)
-#define CFG_RX_MODE_DEFAULT  CFG_ENABLE_NAPI
-#elif defined(QCA_WIFI_QCA6290_11AX)
-#define CFG_RX_MODE_DEFAULT  (CFG_ENABLE_DP_RX_THREADS | CFG_ENABLE_NAPI)
-#else
-#define CFG_RX_MODE_DEFAULT  (CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI)
-#endif
-
 /*
  * <ini>
  * num_dp_rx_threads - Control to set the number of dp rx threads
@@ -6650,89 +6176,6 @@ enum hdd_link_speed_rpt_type {
 #define CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MAX      (32)
 #define CFG_CE_SERVICE_MAX_RX_IND_FLUSH_DEFAULT  (32)
 
-/*
- * <ini>
- * NAPI_CPU_AFFINITY_MASK - CPU mask to affine NAPIs
- *
- * @Min: 0
- * @Max: 0xFF
- * @Default: 0
- *
- * This ini is used to set NAPI IRQ CPU affinity
- *
- * Supported Feature: NAPI
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_NAPI_CE_CPU_MASK_NAME	"NAPI_CPU_AFFINITY_MASK"
-#define CFG_NAPI_CE_CPU_MASK_MIN	(0)
-#define CFG_NAPI_CE_CPU_MASK_MAX	(0xFF)
-#define CFG_NAPI_CE_CPU_MASK_DEFAULT	(0)
-
-/*
- * <ini>
- * RX_THREAD_CPU_AFFINITY_MASK - CPU mask to affine Rx_thread
- *
- * @Min: 0
- * @Max: 0xFF
- * @Default: 0
- *
- * This ini is used to set Rx_thread CPU affinity
- *
- * Supported Feature: Rx_thread
- *
- * Usage: Internal
- *
- * </ini>
- */
-#define CFG_RX_THREAD_CPU_MASK_NAME	"RX_THREAD_CPU_AFFINITY_MASK"
-#define CFG_RX_THREAD_CPU_MASK_MIN	(0)
-#define CFG_RX_THREAD_CPU_MASK_MAX	(0xFF)
-#define CFG_RX_THREAD_CPU_MASK_DEFAULT	(0)
-
-/* List of RPS CPU maps for different rx queues registered by WLAN driver
- * Ref - Kernel/Documentation/networking/scaling.txt
- * RPS CPU map for a particular RX queue, selects CPU(s) for bottom half
- * processing of RX packets. For example, for a system with 4 CPUs,
- * 0xe: Use CPU1 - CPU3 and donot use CPU0.
- * 0x0: RPS is disabled, packets are processed on the interrupting CPU.
-.*
- * WLAN driver registers NUM_TX_QUEUES queues for tx and rx each during
- * alloc_netdev_mq. Hence, we need to have a cpu mask for each of the rx queues.
- *
- * For example, if the NUM_TX_QUEUES is 4, a sample WLAN ini entry may look like
- * rpsRxQueueCpuMapList=a b c d
- * For a 4 CPU system (CPU0 - CPU3), this implies:
- * 0xa - (1010) use CPU1, CPU3 for rx queue 0
- * 0xb - (1011) use CPU0, CPU1 and CPU3 for rx queue 1
- * 0xc - (1100) use CPU2, CPU3 for rx queue 2
- * 0xd - (1101) use CPU0, CPU2 and CPU3 for rx queue 3
-
- * In practice, we may want to avoid the cores which are heavily loaded.
- */
-
-/* Name of the ini file entry to specify RPS map for different RX queus */
-#define CFG_RPS_RX_QUEUE_CPU_MAP_LIST_NAME         "rpsRxQueueCpuMapList"
-
-/* Default value of rpsRxQueueCpuMapList. Different platforms may have
- * different configurations for NUM_TX_QUEUES and # of cpus, and will need to
- * configure an appropriate value via ini file. Setting default value to 'e' to
- * avoid use of CPU0 (since its heavily used by other system processes) by rx
- * queue 0, which is currently being used for rx packet processing.
- */
-#define CFG_RPS_RX_QUEUE_CPU_MAP_LIST_DEFAULT      "e"
-
-/* Maximum length of string used to hold a list of cpu maps for various rx
- * queues. Considering a 16 core system with 5 rx queues, a RPS CPU map
- * list may look like -
- * rpsRxQueueCpuMapList = ffff ffff ffff ffff ffff
- * (all 5 rx queues can be processed on all 16 cores)
- * max string len = 24 + 1(for '\0'). Considering 30 to be on safe side.
- */
-#define CFG_RPS_RX_QUEUE_CPU_MAP_LIST_LEN 30
-
 /*
  * Support to start sap in indoor channel
  * Customer can config this item to enable/disable sap in indoor channel
@@ -7515,26 +6958,6 @@ enum hdd_link_speed_rpt_type {
 #define CFG_MAWC_NLO_MAX_SCAN_INTERVAL_MAX      (0xFFFFFFFF)
 #define CFG_MAWC_NLO_MAX_SCAN_INTERVAL_DEFAULT  (60000)
 
-/*
- * <ini>
- * gEnableTxOrphan- Enable/Disable orphaning of Tx packets
- * @Min: 0
- * @Max: 1
- * @Default: 0
- *
- * This ini is used to enable/disable orphaning of Tx packets.
- *
- * Related: None
- *
- * Usage: External
- *
- * </ini>
- */
-#define CFG_TX_ORPHAN_ENABLE_NAME    "gEnableTxOrphan"
-#define CFG_TX_ORPHAN_ENABLE_DEFAULT (0)
-#define CFG_TX_ORPHAN_ENABLE_MIN     (0)
-#define CFG_TX_ORPHAN_ENABLE_MAX     (1)
-
 /*
  * <ini>
  * gItoRepeatCount - sets ito repeated count
@@ -8896,17 +8319,6 @@ struct hdd_config {
 	uint8_t gDisableDfsJapanW53;
 	bool gEnableOverLapCh;
 	bool fRegChangeDefCountry;
-#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
-	uint32_t TxFlowLowWaterMark;
-	uint32_t TxFlowHighWaterMarkOffset;
-	uint32_t TxFlowMaxQueueDepth;
-	uint32_t TxLbwFlowLowWaterMark;
-	uint32_t TxLbwFlowHighWaterMarkOffset;
-	uint32_t TxLbwFlowMaxQueueDepth;
-	uint32_t TxHbwFlowLowWaterMark;
-	uint32_t TxHbwFlowHighWaterMarkOffset;
-	uint32_t TxHbwFlowMaxQueueDepth;
-#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 	uint32_t TxFlowStopQueueThreshold;
 	uint32_t TxFlowStartQueueOffset;
@@ -8915,21 +8327,6 @@ struct hdd_config {
 
 	uint8_t allowDFSChannelRoam;
 
-#ifdef MSM_PLATFORM
-	uint32_t busBandwidthHighThreshold;
-	uint32_t busBandwidthMediumThreshold;
-	uint32_t busBandwidthLowThreshold;
-	uint32_t busBandwidthComputeInterval;
-	uint32_t enable_tcp_delack;
-	bool     enable_tcp_limit_output;
-	uint32_t enable_tcp_adv_win_scale;
-	uint32_t tcpDelackThresholdHigh;
-	uint32_t tcpDelackThresholdLow;
-	uint32_t tcp_tx_high_tput_thres;
-	uint32_t tcp_delack_timer_count;
-	u8  periodic_stats_disp_time;
-#endif /* MSM_PLATFORM */
-
 	uint8_t enableFwModuleLogLevel[FW_MODULE_LOG_LEVEL_STRING_LENGTH];
 
 	uint8_t gMaxConcurrentActiveSessions;
@@ -8984,14 +8381,9 @@ struct hdd_config {
 	bool fastpath_enable;
 #endif
 	bool etsi13_srd_chan_in_master_mode;
-	uint8_t rx_mode;
 	uint8_t num_dp_rx_threads;
 	uint32_t ce_service_max_yield_time;
 	uint8_t ce_service_max_rx_ind_flush;
-	uint32_t napi_cpu_affinity_mask;
-	/* CPU affinity mask for rx_thread */
-	uint32_t rx_thread_affinity_mask;
-	uint8_t cpu_map_list[CFG_RPS_RX_QUEUE_CPU_MAP_LIST_LEN];
 	bool ce_classify_enabled;
 	uint32_t dual_mac_feature_disable;
 	uint8_t dbs_scan_selection[CFG_DBS_SCAN_PARAM_LENGTH];
@@ -9181,6 +8573,36 @@ struct hdd_config {
 #ifndef REMOVE_PKT_LOG
 	bool enable_packet_log;
 #endif
+	uint32_t rx_mode;
+#ifdef MSM_PLATFORM
+	uint32_t bus_bw_high_threshold;
+	uint32_t bus_bw_medium_threshold;
+	uint32_t bus_bw_low_threshold;
+	uint32_t bus_bw_compute_interval;
+	uint32_t enable_tcp_delack;
+	bool     enable_tcp_limit_output;
+	uint32_t enable_tcp_adv_win_scale;
+	uint32_t tcp_delack_thres_high;
+	uint32_t tcp_delack_thres_low;
+	uint32_t tcp_tx_high_tput_thres;
+	uint32_t tcp_delack_timer_count;
+	u8  periodic_stats_disp_time;
+#endif /* MSM_PLATFORM */
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+	uint32_t tx_flow_low_watermark;
+	uint32_t tx_flow_hi_watermark_offset;
+	uint32_t tx_flow_max_queue_depth;
+	uint32_t tx_lbw_flow_low_watermark;
+	uint32_t tx_lbw_flow_hi_watermark_offset;
+	uint32_t tx_lbw_flow_max_queue_depth;
+	uint32_t tx_hbw_flow_low_watermark;
+	uint32_t tx_hbw_flow_hi_watermark_offset;
+	uint32_t tx_hbw_flow_max_queue_depth;
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+	uint32_t napi_cpu_affinity_mask;
+	/* CPU affinity mask for rx_thread */
+	uint32_t rx_thread_affinity_mask;
+	uint8_t cpu_map_list[CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST_LEN];
 };
 
 #define VAR_OFFSET(_Struct, _Var) (offsetof(_Struct, _Var))
@@ -9348,5 +8770,4 @@ QDF_STATUS hdd_update_nss(struct hdd_adapter *adapter, uint8_t nss);
  * Return: true on success, else false
  */
 bool hdd_dfs_indicate_radar(struct hdd_context *hdd_ctx);
-
 #endif

+ 2 - 2
core/hdd/inc/wlan_hdd_main.h

@@ -1411,7 +1411,7 @@ struct hdd_adapter {
 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL || QCA_HL_NETDEV_FLOW_CONTROL */
 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
 	unsigned int tx_flow_low_watermark;
-	unsigned int tx_flow_high_watermark_offset;
+	unsigned int tx_flow_hi_watermark_offset;
 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 
 	bool offloads_configured;
@@ -2321,7 +2321,7 @@ int hdd_bus_bandwidth_init(struct hdd_context *hdd_ctx);
 void hdd_bus_bandwidth_deinit(struct hdd_context *hdd_ctx);
 
 #define GET_CUR_RX_LVL(config) ((config)->cur_rx_level)
-#define GET_BW_COMPUTE_INTV(config) ((config)->busBandwidthComputeInterval)
+#define GET_BW_COMPUTE_INTV(config) ((config)->bus_bw_compute_interval)
 
 #else
 

+ 10 - 0
core/hdd/inc/wlan_hdd_tx_rx.h

@@ -338,4 +338,14 @@ hdd_skb_nontso_linearize(struct sk_buff *skb)
 }
 #endif
 
+/**
+ * hdd_dp_cfg_update() - update hdd config for HDD DP INIs
+ * @psoc: Pointer to psoc obj
+ * @hdd_ctx: Pointer to hdd context
+ *
+ * Return: None
+ */
+void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
+		       struct hdd_context *hdd_ctx);
+
 #endif /* end #if !defined(WLAN_HDD_TX_RX_H) */

+ 2 - 212
core/hdd/src/wlan_hdd_cfg.c

@@ -45,6 +45,8 @@
 #include "wlan_mlme_ucfg_api.h"
 #include "wlan_mlme_public_struct.h"
 #include "wlan_fwol_ucfg_api.h"
+#include "cfg_ucfg_api.h"
+#include "hdd_dp_cfg.h"
 
 static void
 cb_notify_set_roam_prefer5_g_hz(struct hdd_context *hdd_ctx,
@@ -1998,68 +2000,6 @@ struct reg_table_entry g_registry_table[] = {
 		     CFG_REG_CHANGE_DEF_COUNTRY_DEFAULT,
 		     CFG_REG_CHANGE_DEF_COUNTRY_MIN,
 		     CFG_REG_CHANGE_DEF_COUNTRY_MAX),
-
-#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
-	REG_VARIABLE(CFG_LL_TX_FLOW_LWM, WLAN_PARAM_Integer,
-		     struct hdd_config, TxFlowLowWaterMark,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_LL_TX_FLOW_LWM_DEFAULT,
-		     CFG_LL_TX_FLOW_LWM_MIN,
-		     CFG_LL_TX_FLOW_LWM_MAX),
-	REG_VARIABLE(CFG_LL_TX_FLOW_HWM_OFFSET, WLAN_PARAM_Integer,
-		     struct hdd_config, TxFlowHighWaterMarkOffset,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_LL_TX_FLOW_HWM_OFFSET_DEFAULT,
-		     CFG_LL_TX_FLOW_HWM_OFFSET_MIN,
-		     CFG_LL_TX_FLOW_HWM_OFFSET_MAX),
-	REG_VARIABLE(CFG_LL_TX_FLOW_MAX_Q_DEPTH, WLAN_PARAM_Integer,
-		     struct hdd_config, TxFlowMaxQueueDepth,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_LL_TX_FLOW_MAX_Q_DEPTH_DEFAULT,
-		     CFG_LL_TX_FLOW_MAX_Q_DEPTH_MIN,
-		     CFG_LL_TX_FLOW_MAX_Q_DEPTH_MAX),
-	REG_VARIABLE(CFG_LL_TX_LBW_FLOW_LWM, WLAN_PARAM_Integer,
-		     struct hdd_config, TxLbwFlowLowWaterMark,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_LL_TX_LBW_FLOW_LWM_DEFAULT,
-		     CFG_LL_TX_LBW_FLOW_LWM_MIN,
-		     CFG_LL_TX_LBW_FLOW_LWM_MAX),
-
-	REG_VARIABLE(CFG_LL_TX_LBW_FLOW_HWM_OFFSET, WLAN_PARAM_Integer,
-		     struct hdd_config, TxLbwFlowHighWaterMarkOffset,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_LL_TX_LBW_FLOW_HWM_OFFSET_DEFAULT,
-		     CFG_LL_TX_LBW_FLOW_HWM_OFFSET_MIN,
-		     CFG_LL_TX_LBW_FLOW_HWM_OFFSET_MAX),
-
-	REG_VARIABLE(CFG_LL_TX_LBW_FLOW_MAX_Q_DEPTH, WLAN_PARAM_Integer,
-		     struct hdd_config, TxLbwFlowMaxQueueDepth,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_LL_TX_LBW_FLOW_MAX_Q_DEPTH_DEFAULT,
-		     CFG_LL_TX_LBW_FLOW_MAX_Q_DEPTH_MIN,
-		     CFG_LL_TX_LBW_FLOW_MAX_Q_DEPTH_MAX),
-
-	REG_VARIABLE(CFG_LL_TX_HBW_FLOW_LWM, WLAN_PARAM_Integer,
-		     struct hdd_config, TxHbwFlowLowWaterMark,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_LL_TX_HBW_FLOW_LWM_DEFAULT,
-		     CFG_LL_TX_HBW_FLOW_LWM_MIN,
-		     CFG_LL_TX_HBW_FLOW_LWM_MAX),
-
-	REG_VARIABLE(CFG_LL_TX_HBW_FLOW_HWM_OFFSET, WLAN_PARAM_Integer,
-		     struct hdd_config, TxHbwFlowHighWaterMarkOffset,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_LL_TX_HBW_FLOW_HWM_OFFSET_DEFAULT,
-		     CFG_LL_TX_HBW_FLOW_HWM_OFFSET_MIN,
-		     CFG_LL_TX_HBW_FLOW_HWM_OFFSET_MAX),
-
-	REG_VARIABLE(CFG_LL_TX_HBW_FLOW_MAX_Q_DEPTH, WLAN_PARAM_Integer,
-		     struct hdd_config, TxHbwFlowMaxQueueDepth,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_LL_TX_HBW_FLOW_MAX_Q_DEPTH_DEFAULT,
-		     CFG_LL_TX_HBW_FLOW_MAX_Q_DEPTH_MIN,
-		     CFG_LL_TX_HBW_FLOW_MAX_Q_DEPTH_MAX),
-#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 
 	REG_VARIABLE(CFG_LL_TX_FLOW_STOP_QUEUE_TH, WLAN_PARAM_Integer,
@@ -2100,82 +2040,6 @@ struct reg_table_entry g_registry_table[] = {
 		     CFG_ADVERTISE_CONCURRENT_OPERATION_MAX),
 
 #ifdef MSM_PLATFORM
-	REG_VARIABLE(CFG_BUS_BANDWIDTH_HIGH_THRESHOLD, WLAN_PARAM_Integer,
-		     struct hdd_config, busBandwidthHighThreshold,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_BUS_BANDWIDTH_HIGH_THRESHOLD_DEFAULT,
-		     CFG_BUS_BANDWIDTH_HIGH_THRESHOLD_MIN,
-		     CFG_BUS_BANDWIDTH_HIGH_THRESHOLD_MAX),
-
-	REG_VARIABLE(CFG_BUS_BANDWIDTH_MEDIUM_THRESHOLD, WLAN_PARAM_Integer,
-		     struct hdd_config, busBandwidthMediumThreshold,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_BUS_BANDWIDTH_MEDIUM_THRESHOLD_DEFAULT,
-		     CFG_BUS_BANDWIDTH_MEDIUM_THRESHOLD_MIN,
-		     CFG_BUS_BANDWIDTH_MEDIUM_THRESHOLD_MAX),
-
-	REG_VARIABLE(CFG_BUS_BANDWIDTH_LOW_THRESHOLD, WLAN_PARAM_Integer,
-		     struct hdd_config, busBandwidthLowThreshold,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_BUS_BANDWIDTH_LOW_THRESHOLD_DEFAULT,
-		     CFG_BUS_BANDWIDTH_LOW_THRESHOLD_MIN,
-		     CFG_BUS_BANDWIDTH_LOW_THRESHOLD_MAX),
-
-	REG_VARIABLE(CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL, WLAN_PARAM_Integer,
-		     struct hdd_config, busBandwidthComputeInterval,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_DEFAULT,
-		     CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_MIN,
-		     CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_MAX),
-
-	REG_VARIABLE(CFG_ENABLE_TCP_LIMIT_OUTPUT, WLAN_PARAM_Integer,
-		     struct hdd_config, enable_tcp_limit_output,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_ENABLE_TCP_LIMIT_OUTPUT_DEFAULT,
-		     CFG_ENABLE_TCP_LIMIT_OUTPUT_MIN,
-		     CFG_ENABLE_TCP_LIMIT_OUTPUT_MAX),
-
-	REG_VARIABLE(CFG_ENABLE_TCP_ADV_WIN_SCALE, WLAN_PARAM_Integer,
-		     struct hdd_config, enable_tcp_adv_win_scale,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_ENABLE_TCP_ADV_WIN_SCALE_DEFAULT,
-		     CFG_ENABLE_TCP_ADV_WIN_SCALE_MIN,
-		     CFG_ENABLE_TCP_ADV_WIN_SCALE_MAX),
-
-	REG_VARIABLE(CFG_ENABLE_TCP_DELACK, WLAN_PARAM_Integer,
-		     struct hdd_config, enable_tcp_delack,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_ENABLE_TCP_DELACK_DEFAULT,
-		     CFG_ENABLE_TCP_DELACK_MIN,
-		     CFG_ENABLE_TCP_DELACK_MAX),
-
-	REG_VARIABLE(CFG_TCP_DELACK_THRESHOLD_HIGH, WLAN_PARAM_Integer,
-		     struct hdd_config, tcpDelackThresholdHigh,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_TCP_DELACK_THRESHOLD_HIGH_DEFAULT,
-		     CFG_TCP_DELACK_THRESHOLD_HIGH_MIN,
-		     CFG_TCP_DELACK_THRESHOLD_HIGH_MAX),
-
-	REG_VARIABLE(CFG_TCP_DELACK_THRESHOLD_LOW, WLAN_PARAM_Integer,
-		     struct hdd_config, tcpDelackThresholdLow,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_TCP_DELACK_THRESHOLD_LOW_DEFAULT,
-		     CFG_TCP_DELACK_THRESHOLD_LOW_MIN,
-		     CFG_TCP_DELACK_THRESHOLD_LOW_MAX),
-
-	REG_VARIABLE(CFG_TCP_DELACK_TIMER_COUNT, WLAN_PARAM_Integer,
-		     struct hdd_config, tcp_delack_timer_count,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_TCP_DELACK_TIMER_COUNT_DEFAULT,
-		     CFG_TCP_DELACK_TIMER_COUNT_MIN,
-		     CFG_TCP_DELACK_TIMER_COUNT_MAX),
-
-	REG_VARIABLE(CFG_TCP_TX_HIGH_TPUT_THRESHOLD_NAME, WLAN_PARAM_Integer,
-		     struct hdd_config, tcp_tx_high_tput_thres,
-		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		     CFG_TCP_TX_HIGH_TPUT_THRESHOLD_DEFAULT,
-		     CFG_TCP_TX_HIGH_TPUT_THRESHOLD_MIN,
-		     CFG_TCP_TX_HIGH_TPUT_THRESHOLD_MAX),
 	REG_VARIABLE(CFG_PERIODIC_STATS_DISPLAY_TIME_NAME, WLAN_PARAM_Integer,
 		     struct hdd_config, periodic_stats_disp_time,
 		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
@@ -2786,13 +2650,6 @@ struct reg_table_entry g_registry_table[] = {
 		CFG_ADAPT_DWELL_WIFI_THRESH_MIN,
 		CFG_ADAPT_DWELL_WIFI_THRESH_MAX),
 
-	REG_VARIABLE(CFG_RX_MODE_NAME, WLAN_PARAM_Integer,
-		struct hdd_config, rx_mode,
-		VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		CFG_RX_MODE_DEFAULT,
-		CFG_RX_MODE_MIN,
-		CFG_RX_MODE_MAX),
-
 	REG_VARIABLE(CFG_NUM_DP_RX_THREADS_NAME, WLAN_PARAM_Integer,
 		     struct hdd_config, num_dp_rx_threads,
 		     VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
@@ -2814,26 +2671,6 @@ struct reg_table_entry g_registry_table[] = {
 		CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MIN,
 		CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MAX),
 
-	REG_VARIABLE(CFG_NAPI_CE_CPU_MASK_NAME, WLAN_PARAM_HexInteger,
-		struct hdd_config, napi_cpu_affinity_mask,
-		VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		CFG_NAPI_CE_CPU_MASK_DEFAULT,
-		CFG_NAPI_CE_CPU_MASK_MIN,
-		CFG_NAPI_CE_CPU_MASK_MAX),
-
-	REG_VARIABLE(CFG_RX_THREAD_CPU_MASK_NAME, WLAN_PARAM_HexInteger,
-		struct hdd_config, rx_thread_affinity_mask,
-		VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		CFG_RX_THREAD_CPU_MASK_DEFAULT,
-		CFG_RX_THREAD_CPU_MASK_MIN,
-		CFG_RX_THREAD_CPU_MASK_MAX),
-
-	REG_VARIABLE_STRING(CFG_RPS_RX_QUEUE_CPU_MAP_LIST_NAME,
-				 WLAN_PARAM_String,
-				 struct hdd_config, cpu_map_list,
-				 VAR_FLAGS_OPTIONAL,
-				 (void *)CFG_RPS_RX_QUEUE_CPU_MAP_LIST_DEFAULT),
-
 	REG_VARIABLE(CFG_INDOOR_CHANNEL_SUPPORT_NAME,
 		     WLAN_PARAM_Integer,
 		     struct hdd_config, indoor_channel_support,
@@ -3016,13 +2853,6 @@ struct reg_table_entry g_registry_table[] = {
 		CFG_FORCE_1X1_MIN,
 		CFG_FORCE_1X1_MAX),
 
-	REG_VARIABLE(CFG_TX_ORPHAN_ENABLE_NAME, WLAN_PARAM_Integer,
-		struct hdd_config, tx_orphan_enable,
-		VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
-		CFG_TX_ORPHAN_ENABLE_DEFAULT,
-		CFG_TX_ORPHAN_ENABLE_MIN,
-		CFG_TX_ORPHAN_ENABLE_MAX),
-
 	REG_VARIABLE(CFG_ITO_REPEAT_COUNT_NAME, WLAN_PARAM_Integer,
 		struct hdd_config, ito_repeat_count,
 		VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
@@ -4261,45 +4091,6 @@ static void hdd_override_all_ps(struct hdd_context *hdd_ctx)
 	ucfg_pmo_set_wow_enable(hdd_ctx->psoc, PMO_WOW_DISABLE_BOTH);
 }
 
-/**
- * hdd_set_rx_mode_value() - set rx_mode values
- * @hdd_ctx: hdd context
- *
- * Return: none
- */
-static void hdd_set_rx_mode_value(struct hdd_context *hdd_ctx)
-{
-	/* RPS has higher priority than dynamic RPS when both bits are set */
-	if (hdd_ctx->config->rx_mode & CFG_ENABLE_RPS &&
-	    hdd_ctx->config->rx_mode & CFG_ENABLE_DYNAMIC_RPS)
-		hdd_ctx->config->rx_mode &= ~CFG_ENABLE_DYNAMIC_RPS;
-
-	if (hdd_ctx->config->rx_mode & CFG_ENABLE_RX_THREAD &&
-		 hdd_ctx->config->rx_mode & CFG_ENABLE_RPS) {
-		hdd_warn("rx_mode wrong configuration. Make it default");
-		hdd_ctx->config->rx_mode = CFG_RX_MODE_DEFAULT;
-	}
-
-	if (hdd_ctx->config->rx_mode & CFG_ENABLE_RX_THREAD)
-		hdd_ctx->enable_rxthread = true;
-	else if (hdd_ctx->config->rx_mode & CFG_ENABLE_DP_RX_THREADS)
-		hdd_ctx->enable_dp_rx_threads = true;
-
-	if (hdd_ctx->config->rx_mode & CFG_ENABLE_RPS)
-		hdd_ctx->rps = true;
-
-	if (hdd_ctx->config->rx_mode & CFG_ENABLE_NAPI)
-		hdd_ctx->napi_enable = true;
-
-	if (hdd_ctx->config->rx_mode & CFG_ENABLE_DYNAMIC_RPS)
-		hdd_ctx->dynamic_rps = true;
-
-	hdd_info("rx_mode:%u dp_rx_threads:%u rx_thread:%u napi:%u rps:%u dynamic rps %u",
-		 hdd_ctx->config->rx_mode, hdd_ctx->enable_dp_rx_threads,
-		 hdd_ctx->enable_rxthread, hdd_ctx->napi_enable,
-		 hdd_ctx->rps, hdd_ctx->dynamic_rps);
-}
-
 /**
  * hdd_parse_config_ini() - parse the ini configuration file
  * @hdd_ctx: the pointer to hdd context
@@ -4410,7 +4201,6 @@ QDF_STATUS hdd_parse_config_ini(struct hdd_context *hdd_ctx)
 
 	/* Loop through the registry table and apply all these configs */
 	qdf_status = hdd_apply_cfg_ini(hdd_ctx, cfg_ini_table, i);
-	hdd_set_rx_mode_value(hdd_ctx);
 	if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam())
 		hdd_override_all_ps(hdd_ctx);
 

+ 29 - 28
core/hdd/src/wlan_hdd_ipa.c

@@ -148,25 +148,26 @@ void hdd_ipa_set_tx_flow_info(void)
 				if (ucfg_ipa_uc_is_enabled() &&
 				    (QDF_SAP_MODE == adapter->device_mode)) {
 					adapter->tx_flow_low_watermark =
-					hdd_ctx->config->TxFlowLowWaterMark +
+					hdd_ctx->config->tx_flow_low_watermark +
 					WLAN_TFC_IPAUC_TX_DESC_RESERVE;
 				} else {
 					adapter->tx_flow_low_watermark =
 						hdd_ctx->config->
-							TxFlowLowWaterMark;
+							tx_flow_low_watermark;
 				}
-				adapter->tx_flow_high_watermark_offset =
-				   hdd_ctx->config->TxFlowHighWaterMarkOffset;
+				adapter->tx_flow_hi_watermark_offset =
+				   hdd_ctx->config->tx_flow_hi_watermark_offset;
 				cdp_fc_ll_set_tx_pause_q_depth(soc,
-					adapter->session_id,
-					hdd_ctx->config->TxFlowMaxQueueDepth);
+						adapter->session_id,
+						hdd_ctx->config->
+						tx_flow_max_queue_depth);
 				hdd_info("MODE %d,CH %d,LWM %d,HWM %d,TXQDEP %d",
 				    adapter->device_mode,
 				    targetChannel,
 				    adapter->tx_flow_low_watermark,
 				    adapter->tx_flow_low_watermark +
-				    adapter->tx_flow_high_watermark_offset,
-				    hdd_ctx->config->TxFlowMaxQueueDepth);
+				    adapter->tx_flow_hi_watermark_offset,
+				    hdd_ctx->config->tx_flow_max_queue_depth);
 				preAdapterChannel = targetChannel;
 				preAdapterContext = adapter;
 			} else {
@@ -181,11 +182,11 @@ void hdd_ipa_set_tx_flow_info(void)
 					/* Current adapter */
 					adapter->tx_flow_low_watermark = 0;
 					adapter->
-					tx_flow_high_watermark_offset = 0;
+					tx_flow_hi_watermark_offset = 0;
 					cdp_fc_ll_set_tx_pause_q_depth(soc,
 						adapter->session_id,
 						hdd_ctx->config->
-						TxHbwFlowMaxQueueDepth);
+						tx_hbw_flow_max_queue_depth);
 					hdd_info("SCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
 					       hdd_device_mode_to_string(
 							adapter->device_mode),
@@ -194,9 +195,9 @@ void hdd_ipa_set_tx_flow_info(void)
 					       adapter->tx_flow_low_watermark,
 					       adapter->tx_flow_low_watermark +
 					       adapter->
-					       tx_flow_high_watermark_offset,
+					       tx_flow_hi_watermark_offset,
 					       hdd_ctx->config->
-					       TxHbwFlowMaxQueueDepth);
+					       tx_hbw_flow_max_queue_depth);
 
 					if (!preAdapterContext) {
 						hdd_err("SCC: Previous adapter context NULL");
@@ -207,11 +208,11 @@ void hdd_ipa_set_tx_flow_info(void)
 					preAdapterContext->
 					tx_flow_low_watermark = 0;
 					preAdapterContext->
-					tx_flow_high_watermark_offset = 0;
+					tx_flow_hi_watermark_offset = 0;
 					cdp_fc_ll_set_tx_pause_q_depth(soc,
 						preAdapterContext->session_id,
 						hdd_ctx->config->
-						TxHbwFlowMaxQueueDepth);
+						tx_hbw_flow_max_queue_depth);
 					hdd_info("SCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
 					       hdd_device_mode_to_string(
 						preAdapterContext->device_mode
@@ -223,9 +224,9 @@ void hdd_ipa_set_tx_flow_info(void)
 					       preAdapterContext->
 					       tx_flow_low_watermark +
 					       preAdapterContext->
-					       tx_flow_high_watermark_offset,
+					       tx_flow_hi_watermark_offset,
 					       hdd_ctx->config->
-					       TxHbwFlowMaxQueueDepth);
+					       tx_hbw_flow_max_queue_depth);
 				}
 				/*
 				 * MCC, each adapter will have dedicated
@@ -253,15 +254,15 @@ void hdd_ipa_set_tx_flow_info(void)
 					}
 					adapter5->tx_flow_low_watermark =
 						hdd_ctx->config->
-						TxHbwFlowLowWaterMark;
+						tx_hbw_flow_low_watermark;
 					adapter5->
-					tx_flow_high_watermark_offset =
+					tx_flow_hi_watermark_offset =
 						hdd_ctx->config->
-						TxHbwFlowHighWaterMarkOffset;
+						tx_hbw_flow_hi_watermark_offset;
 					cdp_fc_ll_set_tx_pause_q_depth(soc,
 						adapter5->session_id,
 						hdd_ctx->config->
-						TxHbwFlowMaxQueueDepth);
+						tx_hbw_flow_max_queue_depth);
 					hdd_info("MCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
 					    hdd_device_mode_to_string(
 						    adapter5->device_mode),
@@ -271,9 +272,9 @@ void hdd_ipa_set_tx_flow_info(void)
 					    adapter5->
 					    tx_flow_low_watermark +
 					    adapter5->
-					    tx_flow_high_watermark_offset,
+					    tx_flow_hi_watermark_offset,
 					    hdd_ctx->config->
-					    TxHbwFlowMaxQueueDepth);
+					    tx_hbw_flow_max_queue_depth);
 
 					if (!adapter2_4) {
 						hdd_err("MCC: 2.4GHz adapter context NULL");
@@ -281,15 +282,15 @@ void hdd_ipa_set_tx_flow_info(void)
 					}
 					adapter2_4->tx_flow_low_watermark =
 						hdd_ctx->config->
-						TxLbwFlowLowWaterMark;
+						tx_lbw_flow_low_watermark;
 					adapter2_4->
-					tx_flow_high_watermark_offset =
+					tx_flow_hi_watermark_offset =
 						hdd_ctx->config->
-						TxLbwFlowHighWaterMarkOffset;
+						tx_lbw_flow_hi_watermark_offset;
 					cdp_fc_ll_set_tx_pause_q_depth(soc,
 						adapter2_4->session_id,
 						hdd_ctx->config->
-						TxLbwFlowMaxQueueDepth);
+						tx_lbw_flow_max_queue_depth);
 					hdd_info("MCC: MODE %s(%d), CH %d, LWM %d, HWM %d, TXQDEP %d",
 						hdd_device_mode_to_string(
 						    adapter2_4->device_mode),
@@ -300,9 +301,9 @@ void hdd_ipa_set_tx_flow_info(void)
 						adapter2_4->
 						tx_flow_low_watermark +
 						adapter2_4->
-						tx_flow_high_watermark_offset,
+						tx_flow_hi_watermark_offset,
 						hdd_ctx->config->
-						TxLbwFlowMaxQueueDepth);
+						tx_lbw_flow_max_queue_depth);
 
 				}
 			}

+ 19 - 18
core/hdd/src/wlan_hdd_main.c

@@ -2691,9 +2691,9 @@ static void hdd_update_ipa_component_config(struct hdd_context *hdd_ctx)
 	ipa_cfg.ipa_config = cfg->IpaConfig;
 	ipa_cfg.desc_size = cfg->IpaDescSize;
 	ipa_cfg.txbuf_count = cfg->IpaUcTxBufCount;
-	ipa_cfg.bus_bw_high = cfg->busBandwidthHighThreshold;
-	ipa_cfg.bus_bw_medium = cfg->busBandwidthMediumThreshold;
-	ipa_cfg.bus_bw_low = cfg->busBandwidthLowThreshold;
+	ipa_cfg.bus_bw_high = cfg->bus_bw_high_threshold;
+	ipa_cfg.bus_bw_medium = cfg->bus_bw_medium_threshold;
+	ipa_cfg.bus_bw_low = cfg->bus_bw_low_threshold;
 	ipa_cfg.ipa_bw_high = cfg->IpaHighBandwidthMbps;
 	ipa_cfg.ipa_bw_medium = cfg->IpaMediumBandwidthMbps;
 	ipa_cfg.ipa_bw_low = cfg->IpaLowBandwidthMbps;
@@ -7494,7 +7494,7 @@ static void hdd_display_periodic_stats(struct hdd_context *hdd_ctx,
 	if (data_in_interval)
 		data_in_time_period = data_in_interval;
 
-	if (counter * hdd_ctx->config->busBandwidthComputeInterval >=
+	if (counter * hdd_ctx->config->bus_bw_compute_interval >=
 		hdd_ctx->config->periodic_stats_disp_time * 1000) {
 		if (data_in_time_period) {
 			wlan_hdd_display_txrx_stats(hdd_ctx);
@@ -7559,11 +7559,11 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx,
 	bool rxthread_high_tput_req = false;
 	bool dptrace_high_tput_req;
 
-	if (total_pkts > hdd_ctx->config->busBandwidthHighThreshold)
+	if (total_pkts > hdd_ctx->config->bus_bw_high_threshold)
 		next_vote_level = PLD_BUS_WIDTH_HIGH;
-	else if (total_pkts > hdd_ctx->config->busBandwidthMediumThreshold)
+	else if (total_pkts > hdd_ctx->config->bus_bw_medium_threshold)
 		next_vote_level = PLD_BUS_WIDTH_MEDIUM;
-	else if (total_pkts > hdd_ctx->config->busBandwidthLowThreshold)
+	else if (total_pkts > hdd_ctx->config->bus_bw_low_threshold)
 		next_vote_level = PLD_BUS_WIDTH_LOW;
 	else
 		next_vote_level = PLD_BUS_WIDTH_NONE;
@@ -7599,7 +7599,7 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx,
 							 tx_packets,
 							 rx_packets);
 
-		if (rx_packets < hdd_ctx->config->busBandwidthLowThreshold)
+		if (rx_packets < hdd_ctx->config->bus_bw_low_threshold)
 			hdd_disable_rx_ol_for_low_tput(hdd_ctx, true);
 		else
 			hdd_disable_rx_ol_for_low_tput(hdd_ctx, false);
@@ -7631,7 +7631,7 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx,
 	 * 3)For UDP cases
 	 */
 	if (avg_no_rx_offload_pkts >
-			hdd_ctx->config->busBandwidthHighThreshold)
+			hdd_ctx->config->bus_bw_high_threshold)
 		rxthread_high_tput_req = true;
 	else
 		rxthread_high_tput_req = false;
@@ -7641,7 +7641,7 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx,
 			 rxthread_high_tput_req);
 
 	/* fine-tuning parameters for RX Flows */
-	if (avg_rx > hdd_ctx->config->tcpDelackThresholdHigh) {
+	if (avg_rx > hdd_ctx->config->tcp_delack_thres_high) {
 		if ((hdd_ctx->cur_rx_level != WLAN_SVC_TP_HIGH) &&
 		   (++hdd_ctx->rx_high_ind_cnt == delack_timer_cnt)) {
 			next_rx_level = WLAN_SVC_TP_HIGH;
@@ -7822,7 +7822,7 @@ restart_timer:
 	qdf_spinlock_acquire(&hdd_ctx->bus_bw_timer_lock);
 	if (hdd_ctx->bus_bw_timer_running)
 		qdf_timer_mod(&hdd_ctx->bus_bw_timer,
-				hdd_ctx->config->busBandwidthComputeInterval);
+			      hdd_ctx->config->bus_bw_compute_interval);
 	qdf_spinlock_release(&hdd_ctx->bus_bw_timer_lock);
 }
 
@@ -7956,16 +7956,16 @@ void wlan_hdd_display_tx_rx_histogram(struct hdd_context *hdd_ctx)
 
 #ifdef MSM_PLATFORM
 	hdd_nofl_info("BW compute Interval: %dms",
-		      hdd_ctx->config->busBandwidthComputeInterval);
+		      hdd_ctx->config->bus_bw_compute_interval);
 	hdd_nofl_info("BW High TH: %d BW Med TH: %d BW Low TH: %d",
-		      hdd_ctx->config->busBandwidthHighThreshold,
-		      hdd_ctx->config->busBandwidthMediumThreshold,
-		      hdd_ctx->config->busBandwidthLowThreshold);
+		      hdd_ctx->config->bus_bw_high_threshold,
+		      hdd_ctx->config->bus_bw_medium_threshold,
+		      hdd_ctx->config->bus_bw_low_threshold);
 	hdd_nofl_info("Enable TCP DEL ACK: %d",
 		      hdd_ctx->en_tcp_delack_no_lro);
 	hdd_nofl_info("TCP DEL High TH: %d TCP DEL Low TH: %d",
-		      hdd_ctx->config->tcpDelackThresholdHigh,
-		      hdd_ctx->config->tcpDelackThresholdLow);
+		      hdd_ctx->config->tcp_delack_thres_high,
+		      hdd_ctx->config->tcp_delack_thres_low);
 	hdd_nofl_info("TCP TX HIGH TP TH: %d (Use to set tcp_output_bytes_limit)",
 		      hdd_ctx->config->tcp_tx_high_tput_thres);
 #endif
@@ -9210,6 +9210,7 @@ static void hdd_cfg_params_init(struct hdd_context *hdd_ctx)
 	hdd_init_wlan_auto_shutdown(config, psoc);
 	hdd_init_wlan_logging_params(config, psoc);
 	hdd_init_packet_log(config, psoc);
+	hdd_dp_cfg_update(psoc, hdd_ctx);
 }
 
 /**
@@ -12266,7 +12267,7 @@ static void __hdd_bus_bw_compute_timer_start(struct hdd_context *hdd_ctx)
 	qdf_spinlock_acquire(&hdd_ctx->bus_bw_timer_lock);
 	hdd_ctx->bus_bw_timer_running = true;
 	qdf_timer_start(&hdd_ctx->bus_bw_timer,
-			hdd_ctx->config->busBandwidthComputeInterval);
+			hdd_ctx->config->bus_bw_compute_interval);
 	qdf_spinlock_release(&hdd_ctx->bus_bw_timer_lock);
 }
 

+ 1 - 1
core/hdd/src/wlan_hdd_napi.c

@@ -385,7 +385,7 @@ int hdd_napi_apply_throughput_policy(struct hdd_context *hddctx,
 		return rc;
 	}
 
-	if (packets > hddctx->config->busBandwidthHighThreshold)
+	if (packets > hddctx->config->bus_bw_high_threshold)
 		req_state = QCA_NAPI_TPUT_HI;
 	else
 		req_state = QCA_NAPI_TPUT_LO;

+ 159 - 2
core/hdd/src/wlan_hdd_tx_rx.c

@@ -60,6 +60,7 @@
 
 #include "wlan_hdd_nud_tracking.h"
 #include "dp_txrx.h"
+#include "cfg_ucfg_api.h"
 
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 /*
@@ -363,10 +364,10 @@ void hdd_get_tx_resource(struct hdd_adapter *adapter,
 	if (false ==
 	    cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC), STAId,
 				   adapter->tx_flow_low_watermark,
-				   adapter->tx_flow_high_watermark_offset)) {
+				   adapter->tx_flow_hi_watermark_offset)) {
 		hdd_debug("Disabling queues lwm %d hwm offset %d",
 			 adapter->tx_flow_low_watermark,
-			 adapter->tx_flow_high_watermark_offset);
+			 adapter->tx_flow_hi_watermark_offset);
 		wlan_hdd_netif_queue_control(adapter, WLAN_STOP_ALL_NETIF_QUEUE,
 					     WLAN_DATA_FLOW_CONTROL);
 		if ((adapter->tx_flow_timer_initialized == true) &&
@@ -2596,3 +2597,159 @@ void hdd_reset_tcp_delack(struct hdd_context *hdd_ctx)
 				    &rx_tp_data, sizeof(rx_tp_data));
 }
 #endif /* MSM_PLATFORM */
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+/**
+ * hdd_ini_tx_flow_control() - Initialize INIs concerned about tx flow control
+ * @config: pointer to hdd config
+ * @psoc: pointer to psoc obj
+ *
+ * Return: none
+ */
+static void hdd_ini_tx_flow_control(struct hdd_config *config,
+				    struct wlan_objmgr_psoc *psoc)
+{
+	config->tx_flow_low_watermark =
+		cfg_get(psoc, CFG_DP_LL_TX_FLOW_LWM);
+	config->tx_flow_hi_watermark_offset =
+		cfg_get(psoc, CFG_DP_LL_TX_FLOW_HWM_OFFSET);
+	config->tx_flow_max_queue_depth =
+		cfg_get(psoc, CFG_DP_LL_TX_FLOW_MAX_Q_DEPTH);
+	config->tx_lbw_flow_low_watermark =
+		cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_LWM);
+	config->tx_lbw_flow_hi_watermark_offset =
+		cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_HWM_OFFSET);
+	config->tx_lbw_flow_max_queue_depth =
+		cfg_get(psoc, CFG_DP_LL_TX_LBW_FLOW_MAX_Q_DEPTH);
+	config->tx_hbw_flow_low_watermark =
+		cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_LWM);
+	config->tx_hbw_flow_hi_watermark_offset =
+		cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_HWM_OFFSET);
+	config->tx_hbw_flow_max_queue_depth =
+		cfg_get(psoc, CFG_DP_LL_TX_HBW_FLOW_MAX_Q_DEPTH);
+}
+#else
+static void hdd_ini_tx_flow_control(struct hdd_config *config,
+				    struct wlan_objmgr_psoc *psoc)
+{
+}
+#endif
+
+#ifdef MSM_PLATFORM
+/**
+ * hdd_ini_tx_flow_control() - Initialize INIs concerned about bus bandwidth
+ * @config: pointer to hdd config
+ * @psoc: pointer to psoc obj
+ *
+ * Return: none
+ */
+static void hdd_ini_bus_bandwidth(struct hdd_config *config,
+				  struct wlan_objmgr_psoc *psoc)
+{
+	config->bus_bw_high_threshold =
+		cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_HIGH_THRESHOLD);
+	config->bus_bw_medium_threshold =
+		cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_MEDIUM_THRESHOLD);
+	config->bus_bw_low_threshold =
+		cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_LOW_THRESHOLD);
+	config->bus_bw_compute_interval =
+		cfg_get(psoc, CFG_DP_BUS_BANDWIDTH_COMPUTE_INTERVAL);
+}
+
+/**
+ * hdd_ini_tx_flow_control() - Initialize INIs concerned about tcp settings
+ * @config: pointer to hdd config
+ * @psoc: pointer to psoc obj
+ *
+ * Return: none
+ */
+static void hdd_ini_tcp_settings(struct hdd_config *config,
+				 struct wlan_objmgr_psoc *psoc)
+{
+	config->enable_tcp_limit_output =
+		cfg_get(psoc, CFG_DP_ENABLE_TCP_LIMIT_OUTPUT);
+	config->enable_tcp_adv_win_scale =
+		cfg_get(psoc, CFG_DP_ENABLE_TCP_ADV_WIN_SCALE);
+	config->enable_tcp_delack =
+		cfg_get(psoc, CFG_DP_ENABLE_TCP_DELACK);
+	config->tcp_delack_thres_high =
+		cfg_get(psoc, CFG_DP_TCP_DELACK_THRESHOLD_HIGH);
+	config->tcp_delack_thres_low =
+		cfg_get(psoc, CFG_DP_TCP_DELACK_THRESHOLD_LOW);
+	config->tcp_delack_timer_count =
+		cfg_get(psoc, CFG_DP_TCP_DELACK_TIMER_COUNT);
+	config->tcp_tx_high_tput_thres =
+		cfg_get(psoc, CFG_DP_TCP_TX_HIGH_TPUT_THRESHOLD);
+}
+#else
+static void hdd_ini_bus_bandwidth(struct hdd_config *config,
+				  struct wlan_objmgr_psoc *psoc)
+{
+}
+
+static void hdd_ini_tcp_settings(struct hdd_config *config,
+				 struct wlan_objmgr_psoc *psoc)
+{
+}
+#endif
+
+/**
+ * hdd_set_rx_mode_value() - set rx_mode values
+ * @hdd_ctx: hdd context
+ *
+ * Return: none
+ */
+static void hdd_set_rx_mode_value(struct hdd_context *hdd_ctx)
+{
+	uint32_t rx_mode = hdd_ctx->config->rx_mode;
+
+	/* RPS has higher priority than dynamic RPS when both bits are set */
+	if (rx_mode & CFG_ENABLE_RPS && rx_mode & CFG_ENABLE_DYNAMIC_RPS)
+		rx_mode &= ~CFG_ENABLE_DYNAMIC_RPS;
+
+	if (rx_mode & CFG_ENABLE_RX_THREAD && rx_mode & CFG_ENABLE_RPS) {
+		hdd_warn("rx_mode wrong configuration. Make it default");
+		rx_mode = CFG_RX_MODE_DEFAULT;
+	}
+
+	if (rx_mode & CFG_ENABLE_RX_THREAD)
+		hdd_ctx->enable_rxthread = true;
+	else if (rx_mode & CFG_ENABLE_DP_RX_THREADS)
+		hdd_ctx->enable_dp_rx_threads = true;
+
+	if (rx_mode & CFG_ENABLE_RPS)
+		hdd_ctx->rps = true;
+
+	if (rx_mode & CFG_ENABLE_NAPI)
+		hdd_ctx->napi_enable = true;
+
+	if (rx_mode & CFG_ENABLE_DYNAMIC_RPS)
+		hdd_ctx->dynamic_rps = true;
+
+	hdd_debug("rx_mode:%u dp_rx_threads:%u rx_thread:%u napi:%u rps:%u dynamic rps %u",
+		  rx_mode, hdd_ctx->enable_dp_rx_threads,
+		  hdd_ctx->enable_rxthread, hdd_ctx->napi_enable,
+		  hdd_ctx->rps, hdd_ctx->dynamic_rps);
+}
+
+void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
+		       struct hdd_context *hdd_ctx)
+{
+	struct hdd_config *config;
+	qdf_size_t cpu_map_list_len;
+
+	config = hdd_ctx->config;
+	hdd_ini_tx_flow_control(config, psoc);
+	hdd_ini_bus_bandwidth(config, psoc);
+	hdd_ini_tcp_settings(config, psoc);
+	config->napi_cpu_affinity_mask =
+		cfg_get(psoc, CFG_DP_NAPI_CE_CPU_MASK);
+	config->rx_thread_affinity_mask =
+		cfg_get(psoc, CFG_DP_RX_THREAD_CPU_MASK);
+	qdf_uint8_array_parse(cfg_get(psoc, CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST),
+			      config->cpu_map_list,
+			      sizeof(config->cpu_map_list), &cpu_map_list_len);
+	config->tx_orphan_enable = cfg_get(psoc, CFG_DP_TX_ORPHAN_ENABLE);
+	config->rx_mode = cfg_get(psoc, CFG_DP_RX_MODE);
+	hdd_set_rx_mode_value(hdd_ctx);
+}