Przeglądaj źródła

qcacmn: Back pressure in REO2SW1 ring resulting in REO panic

Update QCA_NAPI_DEF_SCALE_BIN_SHIFT for defconfig builds to
the same value as the perf build.
Move the WLAN ext irqs to gold cores for defconfig builds.
Disable cpu isolation before moving the IRQs to gold cores.
Add the ability to move IRQs to gold cores when the cpus
hotplug in.

Change-Id: I4cfecd02a1a2200dc99adee9a324b90c877c13fd
CRs-Fixed: 2638820
Nisha Menon 5 lat temu
rodzic
commit
046f3623de

+ 1 - 0
dp/wifi3.0/dp_main.c

@@ -2140,6 +2140,7 @@ static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
 	}
 
 	hif_configure_ext_group_interrupts(soc->hif_handle);
+	hif_config_irq_set_perf_affinity_hint(soc->hif_handle);
 
 	return QDF_STATUS_SUCCESS;
 }

+ 21 - 4
hif/inc/hif.h

@@ -39,6 +39,7 @@ extern "C" {
 #include <linux/ipa.h>
 #endif
 #include "cfg_ucfg_api.h"
+#include "qdf_dev.h"
 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
 
 typedef void __iomem *A_target_id_t;
@@ -125,13 +126,9 @@ struct CE_state;
 #endif
 
 #ifndef NAPI_YIELD_BUDGET_BASED
-#ifdef HIF_CONFIG_SLUB_DEBUG_ON
-#define QCA_NAPI_DEF_SCALE_BIN_SHIFT 3
-#else
 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT   4
 #endif
-#endif /* SLUB_DEBUG_ON */
 #else  /* NAPI_YIELD_BUDGET_BASED */
 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
 #endif /* NAPI_YIELD_BUDGET_BASED */
@@ -1447,4 +1444,24 @@ void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
 {
 }
 #endif
+
+#ifdef HIF_CPU_PERF_AFFINE_MASK
+/**
+ * hif_config_irq_set_perf_affinity_hint() - API to set affinity
+ * @hif_ctx: hif opaque handle
+ *
+ * This function is used to move the WLAN IRQs to perf cores in
+ * case of defconfig builds.
+ *
+ * Return:  None
+ */
+void hif_config_irq_set_perf_affinity_hint(
+	struct hif_opaque_softc *hif_ctx);
+
+#else
+static inline void hif_config_irq_set_perf_affinity_hint(
+	struct hif_opaque_softc *hif_ctx)
+{
+}
+#endif
 #endif /* _HIF_H_ */

+ 10 - 1
hif/src/dispatcher/dummy.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -368,3 +368,12 @@ int hif_dummy_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
 	return 0;
 }
 
+/**
+ * hif_dummy_config_irq_affinity - dummy call
+ * @scn: hif context
+ *
+ * Return: None
+ */
+void hif_dummy_config_irq_affinity(struct hif_softc *scn)
+{
+}

+ 2 - 1
hif/src/dispatcher/dummy.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -59,3 +59,4 @@ void hif_dummy_set_bundle_mode(struct hif_softc *hif_ctx,
 int hif_dummy_bus_reset_resume(struct hif_softc *hif_ctx);
 int hif_dummy_map_ce_to_irq(struct hif_softc *scn, int ce_id);
 int hif_dummy_addr_in_boundary(struct hif_softc *scn, uint32_t offset);
+void hif_dummy_config_irq_affinity(struct hif_softc *scn);

+ 7 - 0
hif/src/dispatcher/multibus.c

@@ -58,6 +58,8 @@ static void hif_initialize_default_ops(struct hif_softc *hif_sc)
 	bus_ops->hif_bus_late_resume = &hif_dummy_bus_resume;
 	bus_ops->hif_map_ce_to_irq = &hif_dummy_map_ce_to_irq;
 	bus_ops->hif_grp_irq_configure = &hif_dummy_grp_irq_configure;
+	bus_ops->hif_config_irq_affinity =
+		&hif_dummy_config_irq_affinity;
 }
 
 #define NUM_OPS (sizeof(struct hif_bus_ops) / sizeof(void *))
@@ -525,3 +527,8 @@ bool hif_needs_bmi(struct hif_opaque_softc *scn)
 }
 qdf_export_symbol(hif_needs_bmi);
 #endif /* WLAN_FEATURE_BMI */
+
+void hif_config_irq_affinity(struct hif_softc *hif_sc)
+{
+	hif_sc->bus_ops.hif_config_irq_affinity(hif_sc);
+}

+ 12 - 0
hif/src/dispatcher/multibus.h

@@ -80,6 +80,7 @@ struct hif_bus_ops {
 	int (*hif_map_ce_to_irq)(struct hif_softc *hif_sc, int ce_id);
 	int (*hif_addr_in_boundary)(struct hif_softc *scn, uint32_t offset);
 	bool (*hif_needs_bmi)(struct hif_softc *hif_sc);
+	void (*hif_config_irq_affinity)(struct hif_softc *hif_sc);
 };
 
 #ifdef HIF_SNOC
@@ -229,4 +230,15 @@ static inline int hif_usb_get_context_size(void)
 	return 0;
 }
 #endif /* HIF_USB */
+
+/**
+ * hif_config_irq_affinity() - Set IRQ affinity for WLAN IRQs
+ * @hif_sc - hif context
+ *
+ * Set IRQ affinity hint for WLAN IRQs in order to affine to
+ * gold cores.
+ *
+ * Return: None
+ */
+void hif_config_irq_affinity(struct hif_softc *hif_sc);
 #endif /* _MULTIBUS_H_ */

+ 2 - 0
hif/src/dispatcher/multibus_ahb.c

@@ -71,6 +71,8 @@ QDF_STATUS hif_initialize_ahb_ops(struct hif_bus_ops *bus_ops)
 	bus_ops->hif_needs_bmi = &hif_ahb_needs_bmi;
 	bus_ops->hif_display_stats = &hif_ahb_display_stats;
 	bus_ops->hif_clear_stats = &hif_ahb_clear_stats;
+	bus_ops->hif_config_irq_affinity =
+		&hif_dummy_config_irq_affinity;
 	return QDF_STATUS_SUCCESS;
 }
 

+ 2 - 0
hif/src/dispatcher/multibus_ipci.c

@@ -71,6 +71,8 @@ QDF_STATUS hif_initialize_ipci_ops(struct hif_softc *hif_sc)
 		&hif_ipci_clear_stats;
 	bus_ops->hif_addr_in_boundary = &hif_dummy_addr_in_boundary;
 	bus_ops->hif_needs_bmi = &hif_ipci_needs_bmi;
+	bus_ops->hif_config_irq_affinity =
+		&hif_dummy_config_irq_affinity;
 
 	return QDF_STATUS_SUCCESS;
 }

+ 3 - 1
hif/src/dispatcher/multibus_pci.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -85,6 +85,8 @@ QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc)
 	bus_ops->hif_map_ce_to_irq = &hif_pci_legacy_map_ce_to_irq;
 	bus_ops->hif_needs_bmi = &hif_pci_needs_bmi;
 
+	bus_ops->hif_config_irq_affinity =
+		&hif_pci_config_irq_affinity;
 	return QDF_STATUS_SUCCESS;
 }
 

+ 11 - 1
hif/src/dispatcher/pci_api.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -53,4 +53,14 @@ void hif_pci_clear_stats(struct hif_softc *hif_ctx);
 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id);
 bool hif_pci_needs_bmi(struct hif_softc *scn);
 const char *hif_pci_get_irq_name(int irq_no);
+
+/** hif_pci_config_irq_affinity() - Set the IRQ affinity
+ * @scn: hif context
+ *
+ * Set IRQ affinity hint for WLAN IRQs to gold cores only for
+ * defconfig builds.
+ *
+ * Return: None
+ */
+void hif_pci_config_irq_affinity(struct hif_softc *scn);
 #endif /* _PCI_API_H_ */

+ 14 - 2
hif/src/hif_exec.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -18,7 +18,6 @@
 
 #include <hif_exec.h>
 #include <ce_main.h>
-#include <hif_irq_affinity.h>
 #include "qdf_module.h"
 #include "qdf_net_if.h"
 /* mapping NAPI budget 0 to internal budget 0
@@ -580,6 +579,7 @@ static void hif_exec_napi_kill(struct hif_exec_context *ctx)
 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
 
+	hif_core_ctl_set_boost(false);
 	netif_napi_del(&(n_ctx->napi));
 }
 
@@ -694,6 +694,18 @@ int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
 
 qdf_export_symbol(hif_get_int_ctx_irq_num);
 
+#ifdef HIF_CPU_PERF_AFFINE_MASK
+void hif_config_irq_set_perf_affinity_hint(
+	struct hif_opaque_softc *hif_ctx)
+{
+	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
+
+	hif_config_irq_affinity(scn);
+}
+
+qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
+#endif
+
 uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
 {
 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);

+ 25 - 1
hif/src/hif_exec.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -20,6 +20,7 @@
 #define __HIF_EXEC_H__
 
 #include <hif.h>
+#include <hif_irq_affinity.h>
 #include <linux/cpumask.h>
 /*Number of buckets for latency*/
 #define HIF_SCHED_LATENCY_BUCKETS 8
@@ -107,6 +108,10 @@ struct hif_exec_context {
 	enum hif_exec_type type;
 	unsigned long long poll_start_time;
 	bool force_break;
+#ifdef HIF_CPU_PERF_AFFINE_MASK
+	/* Stores the affinity hint mask for each WLAN IRQ */
+	qdf_cpu_mask new_cpu_mask[HIF_MAX_GRP_IRQ];
+#endif
 };
 
 /**
@@ -155,5 +160,24 @@ irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context);
 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *hif,
 					  uint8_t id);
 void hif_exec_kill(struct hif_opaque_softc *scn);
+
+#ifdef HIF_CPU_PERF_AFFINE_MASK
+/**
+ * hif_pci_irq_set_affinity_hint() - API to set IRQ affinity
+ * @hif_ext_group: hif_ext_group to extract the irq info
+ *
+ * This function will set the IRQ affinity to the gold cores
+ * only for defconfig builds
+ *
+ * Return: none
+ */
+void hif_pci_irq_set_affinity_hint(
+	struct hif_exec_context *hif_ext_group);
+#else
+static inline void hif_pci_irq_set_affinity_hint(
+	struct hif_exec_context *hif_ext_group)
+{
+}
+#endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
 #endif
 

+ 1 - 17
hif/src/hif_irq_affinity.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -32,9 +32,6 @@
 #include <linux/topology.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
-#ifdef CONFIG_SCHED_CORE_CTL
-#include <linux/sched/core_ctl.h>
-#endif
 #include <linux/pm.h>
 #include <hif_napi.h>
 #include <hif_irq_affinity.h>
@@ -462,19 +459,6 @@ static inline void hif_exec_bl_irq(struct qca_napi_data *napid, bool bl_flag)
 	}
 }
 
-#ifdef CONFIG_SCHED_CORE_CTL
-/* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */
-static inline int hif_napi_core_ctl_set_boost(bool boost)
-{
-	return core_ctl_set_boost(boost);
-}
-#else
-static inline int hif_napi_core_ctl_set_boost(bool boost)
-{
-	return 0;
-}
-#endif
-
 /**
  * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts.
  * @napid: pointer to qca_napi_data structure

+ 28 - 1
hif/src/hif_irq_affinity.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -61,4 +61,31 @@ static inline int hif_exec_event(struct hif_opaque_softc     *hif,
 	return 0;
 }
 #endif
+
+/**
+ * hif_napi_core_ctl_set_boost() - This API is used to move
+ * tasks to CPUs with higher capacity
+ * @boost: If set move tasks to higher capacity CPUs
+ *
+ * This function moves tasks to higher capacity CPUs than those
+ * where the tasks would have  normally ended up
+ *
+ * Return:  None
+ */
+static inline int hif_napi_core_ctl_set_boost(bool boost)
+{
+	return qdf_core_ctl_set_boost(boost);
+}
+
+#ifdef HIF_CPU_PERF_AFFINE_MASK
+static inline int hif_core_ctl_set_boost(bool boost)
+{
+	return hif_napi_core_ctl_set_boost(boost);
+}
+#else
+static inline int hif_core_ctl_set_boost(bool boost)
+{
+	return 0;
+}
+#endif
 #endif

+ 89 - 1
hif/src/hif_main.c

@@ -514,6 +514,93 @@ QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
 }
 #endif
 
+#ifdef HIF_CPU_PERF_AFFINE_MASK
+/**
+ * __hif_cpu_hotplug_notify() - CPU hotplug event handler
+ * @cpu: CPU Id of the CPU generating the event
+ * @cpu_up: true if the CPU is online
+ *
+ * Return: None
+ */
+static void __hif_cpu_hotplug_notify(void *context,
+				     uint32_t cpu, bool cpu_up)
+{
+	struct hif_softc *scn = context;
+
+	if (!scn)
+		return;
+	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
+		return;
+
+	if (cpu_up) {
+		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
+		hif_debug("Setting affinity for online CPU: %d", cpu);
+	} else {
+		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
+	}
+}
+
+/**
+ * hif_cpu_hotplug_notify - cpu core up/down notification
+ * handler
+ * @cpu: CPU generating the event
+ * @cpu_up: true if the CPU is online
+ *
+ * Return: None
+ */
+static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
+{
+	struct qdf_op_sync *op_sync;
+
+	if (qdf_op_protect(&op_sync))
+		return;
+
+	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
+
+	qdf_op_unprotect(op_sync);
+}
+
+static void hif_cpu_online_cb(void *context, uint32_t cpu)
+{
+	hif_cpu_hotplug_notify(context, cpu, true);
+}
+
+static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
+{
+	hif_cpu_hotplug_notify(context, cpu, false);
+}
+
+static void hif_cpuhp_register(struct hif_softc *scn)
+{
+	if (!scn) {
+		hif_info_high("cannot register hotplug notifiers");
+		return;
+	}
+	qdf_cpuhp_register(&scn->cpuhp_event_handle,
+			   scn,
+			   hif_cpu_online_cb,
+			   hif_cpu_before_offline_cb);
+}
+
+static void hif_cpuhp_unregister(struct hif_softc *scn)
+{
+	if (!scn) {
+		hif_info_high("cannot unregister hotplug notifiers");
+		return;
+	}
+	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
+}
+
+#else
+static void hif_cpuhp_register(struct hif_softc *scn)
+{
+}
+
+static void hif_cpuhp_unregister(struct hif_softc *scn)
+{
+}
+#endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
+
 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
 				  uint32_t mode,
 				  enum qdf_bus_type bus_type,
@@ -556,7 +643,7 @@ struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
 		qdf_mem_free(scn);
 		scn = NULL;
 	}
-
+	hif_cpuhp_register(scn);
 	return GET_HIF_OPAQUE_HDL(scn);
 }
 
@@ -606,6 +693,7 @@ void hif_close(struct hif_opaque_softc *hif_ctx)
 	}
 
 	hif_uninit_rri_on_ddr(scn);
+	hif_cpuhp_unregister(scn);
 
 	hif_bus_close(scn);
 

+ 4 - 0
hif/src/hif_main.h

@@ -245,6 +245,10 @@ struct hif_softc {
 #ifdef HIF_CE_LOG_INFO
 	qdf_notif_block hif_recovery_notifier;
 #endif
+#ifdef HIF_CPU_PERF_AFFINE_MASK
+	/* The CPU hotplug event registration handle */
+	struct qdf_cpuhp_handler *cpuhp_event_handle;
+#endif
 };
 
 static inline

+ 1 - 13
hif/src/hif_napi.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -1679,18 +1679,6 @@ static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
 	}
 }
 
-#ifdef CONFIG_SCHED_CORE_CTL
-/* Enable this API only if kernel feature - CONFIG_SCHED_CORE_CTL is defined */
-static inline int hif_napi_core_ctl_set_boost(bool boost)
-{
-	return core_ctl_set_boost(boost);
-}
-#else
-static inline int hif_napi_core_ctl_set_boost(bool boost)
-{
-	return 0;
-}
-#endif
 /**
  * hif_napi_cpu_blacklist() - en(dis)ables blacklisting for NAPI RX interrupts.
  * @napid: pointer to qca_napi_data structure

+ 74 - 0
hif/src/pcie/if_pci.c

@@ -3563,6 +3563,80 @@ const char *hif_pci_get_irq_name(int irq_no)
 	return "pci-dummy";
 }
 
+#ifdef HIF_CPU_PERF_AFFINE_MASK
+/**
+ * hif_pci_irq_set_affinity_hint() - API to set IRQ affinity
+ * @hif_ext_group: hif_ext_group to extract the irq info
+ *
+ * This function will set the IRQ affinity to the gold cores
+ * only for defconfig builds
+ *
+ * @hif_ext_group: hif_ext_group to extract the irq info
+ *
+ * Return: none
+ */
+void hif_pci_irq_set_affinity_hint(
+	struct hif_exec_context *hif_ext_group)
+{
+	int i, ret;
+	unsigned int cpus;
+	bool mask_set = false;
+
+	for (i = 0; i < hif_ext_group->numirq; i++)
+		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
+
+	for (i = 0; i < hif_ext_group->numirq; i++) {
+		qdf_for_each_online_cpu(cpus) {
+			if (qdf_topology_physical_package_id(cpus) ==
+				CPU_CLUSTER_TYPE_PERF) {
+				qdf_cpumask_set_cpu(cpus,
+						    &hif_ext_group->
+						    new_cpu_mask[i]);
+				mask_set = true;
+			}
+		}
+	}
+	for (i = 0; i < hif_ext_group->numirq; i++) {
+		if (mask_set) {
+			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
+						  IRQ_NO_BALANCING, 0);
+			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
+						       (struct qdf_cpu_mask *)
+						       &hif_ext_group->
+						       new_cpu_mask[i]);
+			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
+						  0, IRQ_NO_BALANCING);
+			if (ret)
+				qdf_err("Set affinity %*pbl fails for IRQ %d ",
+					qdf_cpumask_pr_args(&hif_ext_group->
+							    new_cpu_mask[i]),
+					hif_ext_group->os_irq[i]);
+			else
+				qdf_debug("Set affinity %*pbl for IRQ: %d",
+					  qdf_cpumask_pr_args(&hif_ext_group->
+							      new_cpu_mask[i]),
+					  hif_ext_group->os_irq[i]);
+		} else {
+			qdf_err("Offline CPU: Set affinity fails for IRQ: %d",
+				hif_ext_group->os_irq[i]);
+		}
+	}
+}
+#endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
+
+void hif_pci_config_irq_affinity(struct hif_softc *scn)
+{
+	int i;
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
+	struct hif_exec_context *hif_ext_group;
+
+	hif_core_ctl_set_boost(true);
+	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
+		hif_ext_group = hif_state->hif_ext_group[i];
+		hif_pci_irq_set_affinity_hint(hif_ext_group);
+	}
+}
+
 int hif_pci_configure_grp_irq(struct hif_softc *scn,
 			      struct hif_exec_context *hif_ext_group)
 {

+ 26 - 4
qdf/inc/qdf_dev.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -28,9 +28,9 @@
 #include <qdf_types.h>
 #include "i_qdf_dev.h"
 
-struct qdf_cpu_mask;
-struct qdf_devm;
-struct qdf_dev;
+#define qdf_cpumask_pr_args(maskp) __qdf_cpumask_pr_args(maskp)
+#define qdf_for_each_possible_cpu(cpu) __qdf_for_each_possible_cpu(cpu)
+#define qdf_for_each_online_cpu(cpu) __qdf_for_each_online_cpu(cpu)
 
 #ifdef ENHANCED_OS_ABSTRACTION
 /**
@@ -112,4 +112,26 @@ qdf_dev_set_irq_affinity(uint32_t irnum, struct qdf_cpu_mask *cpmask)
 	return __qdf_dev_set_irq_affinity(irnum, cpmask);
 }
 #endif
+
+static inline int qdf_topology_physical_package_id(unsigned int cpu)
+{
+	return __qdf_topology_physical_package_id(cpu);
+}
+
+static inline int qdf_cpumask_subset(qdf_cpu_mask *srcp1,
+				     const qdf_cpu_mask *srcp2)
+{
+	return __qdf_cpumask_subset(srcp1, srcp2);
+}
+
+static inline int qdf_cpumask_intersects(qdf_cpu_mask *srcp1,
+					 const qdf_cpu_mask *srcp2)
+{
+	return __qdf_cpumask_intersects(srcp1, srcp2);
+}
+
+static inline int qdf_core_ctl_set_boost(bool boost)
+{
+	return __qdf_core_ctl_set_boost(boost);
+}
 #endif /* __QDF_DEV_H */

+ 3 - 0
qdf/inc/qdf_types.h

@@ -37,6 +37,9 @@
 /* Preprocessor definitions and constants */
 #define QDF_MAX_SGLIST 4
 
+#define CPU_CLUSTER_TYPE_LITTLE 0
+#define CPU_CLUSTER_TYPE_PERF 1
+
 /**
  * struct qdf_sglist - scatter-gather list
  * @nsegs: total number of segments

+ 76 - 2
qdf/linux/src/i_qdf_dev.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -28,11 +28,18 @@
 #include <qdf_types.h>
 #include "qdf_util.h"
 #include <linux/irq.h>
+#ifdef CONFIG_SCHED_CORE_CTL
+#include <linux/sched/core_ctl.h>
+#endif
 
 struct qdf_cpu_mask;
 struct qdf_devm;
 struct qdf_dev;
 
+#define __qdf_cpumask_pr_args(maskp) cpumask_pr_args(maskp)
+#define __qdf_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
+#define __qdf_for_each_online_cpu(cpu) for_each_online_cpu(cpu)
+
 /**
  * __qdf_dev_alloc_mem() - allocate memory
  * @qdfdev: Device handle
@@ -107,7 +114,74 @@ __qdf_dev_set_irq_affinity(uint32_t irnum, struct qdf_cpu_mask *cpmask)
 	int ret;
 
 	ret = irq_set_affinity_hint(irnum, (struct cpumask *)cpmask);
-
 	return qdf_status_from_os_return(ret);
 }
+
+/**
+ * __qdf_topology_physical_package_id() - API to retrieve the
+ * cluster info
+ * @cpu: cpu core
+ *
+ * This function returns the cluster information for give cpu
+ * core
+ *
+ * Return: 1 for perf and 0 for non-perf cluster
+ */
+static inline int __qdf_topology_physical_package_id(unsigned int cpu)
+{
+	return topology_physical_package_id(cpu);
+}
+
+/**
+ * __qdf_cpumask_subset() - API to check for subset in cpumasks
+ * @srcp1: first cpu mask
+ * @srcp1: second cpu mask
+ *
+ * This checks for *srcp1 & ~*srcp2
+ *
+ * Return: 1 if srcp1 is subset of srcp2 else 0
+ */
+static inline int __qdf_cpumask_subset(qdf_cpu_mask *srcp1,
+				       const qdf_cpu_mask *srcp2)
+{
+	return cpumask_subset(srcp1, srcp2);
+}
+
+/**
+ * __qdf_cpumask_intersects() - API to check if cpumasks
+ * intersect
+ * @srcp1: first cpu mask
+ * @srcp1: second cpu mask
+ *
+ * This checks for (*srcp1 & *srcp2) != 0
+ *
+ * Return: 1 if srcp1 and srcp2 intersect else 0
+ */
+static inline int __qdf_cpumask_intersects(qdf_cpu_mask *srcp1,
+					   const qdf_cpu_mask *srcp2)
+{
+	return cpumask_intersects(srcp1, srcp2);
+}
+
+#ifdef CONFIG_SCHED_CORE_CTL
+/**
+ * __qdf_core_ctl_set_boost() - This API is used to move tasks
+ * to CPUs with higher capacity
+ *
+ * This function moves tasks to higher capacity CPUs than those
+ * where the tasks would have  normally ended up. This is
+ * applicable only to defconfig builds.
+ *
+ * Return: 0 on success
+ */
+static inline int __qdf_core_ctl_set_boost(bool boost)
+{
+	return core_ctl_set_boost(boost);
+}
+#else
+static inline int __qdf_core_ctl_set_boost(bool boost)
+{
+	return 0;
+}
+#endif
 #endif /* __I_QDF_DEV_H */