Browse Source

qcacld-3.0: Add INI option to set NAPI CPU affinity

Current NAPI IRQ affinity logic doesnot have way to specify
configurable CPU affinity option. Instead it starts affining them from
starting from biggest available core.
Current profiling experiment suggest that NAPI IRQ affinity is not
required, so set default as no affinity.

Change-Id: I0bea3389a7565f8ec157d4587a442b5e11c33fb2
CRs-Fixed: 2185186
Manjunathappa Prakash 6 years ago
parent
commit
cb6df76414

+ 22 - 0
core/hdd/inc/wlan_hdd_cfg.h

@@ -9950,6 +9950,27 @@ enum dot11p_mode {
 #define CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MAX      (32)
 #define CFG_CE_SERVICE_MAX_RX_IND_FLUSH_DEFAULT  (32)
 
+/*
+ * <ini>
+ * NAPI_CPU_AFFINITY_MASK - CPU mask to affine NAPIs
+ *
+ * @Min: 0
+ * @Max: 0xFF
+ * @Default: 0
+ *
+ * This ini is used to set NAPI IRQ CPU affinity
+ *
+ * Supported Feature: NAPI
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_NAPI_CE_CPU_MASK_NAME	"NAPI_CPU_AFFINITY_MASK"
+#define CFG_NAPI_CE_CPU_MASK_MIN	(0)
+#define CFG_NAPI_CE_CPU_MASK_MAX	(0xFF)
+#define CFG_NAPI_CE_CPU_MASK_DEFAULT	(0)
+
 
 /* List of RPS CPU maps for different rx queues registered by WLAN driver
  * Ref - Kernel/Documentation/networking/scaling.txt
@@ -14807,6 +14828,7 @@ struct hdd_config {
 	uint8_t rx_mode;
 	uint32_t ce_service_max_yield_time;
 	uint8_t ce_service_max_rx_ind_flush;
+	uint32_t napi_cpu_affinity_mask;
 	uint8_t cpu_map_list[CFG_RPS_RX_QUEUE_CPU_MAP_LIST_LEN];
 #ifdef FEATURE_WLAN_EXTSCAN
 	bool     extscan_enabled;

+ 7 - 0
core/hdd/src/wlan_hdd_cfg.c

@@ -4148,6 +4148,13 @@ struct reg_table_entry g_registry_table[] = {
 		CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MIN,
 		CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MAX),
 
+	REG_VARIABLE(CFG_NAPI_CE_CPU_MASK_NAME, WLAN_PARAM_HexInteger,
+		struct hdd_config, napi_cpu_affinity_mask,
+		VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+		CFG_NAPI_CE_CPU_MASK_DEFAULT,
+		CFG_NAPI_CE_CPU_MASK_MIN,
+		CFG_NAPI_CE_CPU_MASK_MAX),
+
 	REG_VARIABLE_STRING(CFG_RPS_RX_QUEUE_CPU_MAP_LIST_NAME,
 				 WLAN_PARAM_String,
 				 struct hdd_config, cpu_map_list,

+ 4 - 1
core/hdd/src/wlan_hdd_main.c

@@ -7309,7 +7309,10 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx,
 			if (hdd_ctx->dynamic_rps)
 				hdd_set_rps_cpu_mask(hdd_ctx);
 		}
-		hdd_napi_apply_throughput_policy(hdd_ctx, tx_packets, rx_packets);
+		if (hdd_ctx->config->napi_cpu_affinity_mask)
+			hdd_napi_apply_throughput_policy(hdd_ctx,
+							 tx_packets,
+							 rx_packets);
 	}
 
 	qdf_dp_trace_throttle_live_mode(

+ 3 - 0
core/hdd/src/wlan_hdd_napi.c

@@ -98,6 +98,7 @@ int hdd_napi_create(void)
 	int     rc = 0;
 	struct hdd_context *hdd_ctx;
 	uint8_t feature_flags = 0;
+	struct qca_napi_data *napid = hdd_napi_get_all();
 
 	NAPI_DEBUG("-->");
 
@@ -127,6 +128,8 @@ int hdd_napi_create(void)
 			} else {
 				rc = hdd_napi_event(NAPI_EVT_INI_FILE,
 					(void *)hdd_ctx->napi_enable);
+				napid->user_cpu_affin_mask =
+					hdd_ctx->config->napi_cpu_affinity_mask;
 			}
 		}