|
@@ -1,5 +1,5 @@
|
|
/*
|
|
/*
|
|
- * Copyright (c) 2019 The Linux Foundation. All rights reserved.
|
|
|
|
|
|
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
|
|
*
|
|
*
|
|
* Permission to use, copy, modify, and/or distribute this software for
|
|
* Permission to use, copy, modify, and/or distribute this software for
|
|
* any purpose with or without fee is hereby granted, provided that the
|
|
* any purpose with or without fee is hereby granted, provided that the
|
|
@@ -28,11 +28,18 @@
|
|
#include <qdf_types.h>
|
|
#include <qdf_types.h>
|
|
#include "qdf_util.h"
|
|
#include "qdf_util.h"
|
|
#include <linux/irq.h>
|
|
#include <linux/irq.h>
|
|
|
|
+#ifdef CONFIG_SCHED_CORE_CTL
|
|
|
|
+#include <linux/sched/core_ctl.h>
|
|
|
|
+#endif
|
|
|
|
|
|
struct qdf_cpu_mask;
|
|
struct qdf_cpu_mask;
|
|
struct qdf_devm;
|
|
struct qdf_devm;
|
|
struct qdf_dev;
|
|
struct qdf_dev;
|
|
|
|
|
|
|
|
+#define __qdf_cpumask_pr_args(maskp) cpumask_pr_args(maskp)
|
|
|
|
+#define __qdf_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
|
|
|
|
+#define __qdf_for_each_online_cpu(cpu) for_each_online_cpu(cpu)
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* __qdf_dev_alloc_mem() - allocate memory
|
|
* __qdf_dev_alloc_mem() - allocate memory
|
|
* @qdfdev: Device handle
|
|
* @qdfdev: Device handle
|
|
@@ -107,7 +114,74 @@ __qdf_dev_set_irq_affinity(uint32_t irnum, struct qdf_cpu_mask *cpmask)
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
ret = irq_set_affinity_hint(irnum, (struct cpumask *)cpmask);
|
|
ret = irq_set_affinity_hint(irnum, (struct cpumask *)cpmask);
|
|
-
|
|
|
|
return qdf_status_from_os_return(ret);
|
|
return qdf_status_from_os_return(ret);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * __qdf_topology_physical_package_id() - API to retrieve the
|
|
|
|
+ * cluster info
|
|
|
|
+ * @cpu: cpu core
|
|
|
|
+ *
|
|
|
|
+ * This function returns the cluster information for give cpu
|
|
|
|
+ * core
|
|
|
|
+ *
|
|
|
|
+ * Return: 1 for perf and 0 for non-perf cluster
|
|
|
|
+ */
|
|
|
|
+static inline int __qdf_topology_physical_package_id(unsigned int cpu)
|
|
|
|
+{
|
|
|
|
+ return topology_physical_package_id(cpu);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * __qdf_cpumask_subset() - API to check for subset in cpumasks
|
|
|
|
+ * @srcp1: first cpu mask
|
|
|
|
+ * @srcp1: second cpu mask
|
|
|
|
+ *
|
|
|
|
+ * This checks for *srcp1 & ~*srcp2
|
|
|
|
+ *
|
|
|
|
+ * Return: 1 if srcp1 is subset of srcp2 else 0
|
|
|
|
+ */
|
|
|
|
+static inline int __qdf_cpumask_subset(qdf_cpu_mask *srcp1,
|
|
|
|
+ const qdf_cpu_mask *srcp2)
|
|
|
|
+{
|
|
|
|
+ return cpumask_subset(srcp1, srcp2);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * __qdf_cpumask_intersects() - API to check if cpumasks
|
|
|
|
+ * intersect
|
|
|
|
+ * @srcp1: first cpu mask
|
|
|
|
+ * @srcp1: second cpu mask
|
|
|
|
+ *
|
|
|
|
+ * This checks for (*srcp1 & *srcp2) != 0
|
|
|
|
+ *
|
|
|
|
+ * Return: 1 if srcp1 and srcp2 intersect else 0
|
|
|
|
+ */
|
|
|
|
+static inline int __qdf_cpumask_intersects(qdf_cpu_mask *srcp1,
|
|
|
|
+ const qdf_cpu_mask *srcp2)
|
|
|
|
+{
|
|
|
|
+ return cpumask_intersects(srcp1, srcp2);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_SCHED_CORE_CTL
|
|
|
|
+/**
|
|
|
|
+ * __qdf_core_ctl_set_boost() - This API is used to move tasks
|
|
|
|
+ * to CPUs with higher capacity
|
|
|
|
+ *
|
|
|
|
+ * This function moves tasks to higher capacity CPUs than those
|
|
|
|
+ * where the tasks would have normally ended up. This is
|
|
|
|
+ * applicable only to defconfig builds.
|
|
|
|
+ *
|
|
|
|
+ * Return: 0 on success
|
|
|
|
+ */
|
|
|
|
+static inline int __qdf_core_ctl_set_boost(bool boost)
|
|
|
|
+{
|
|
|
|
+ return core_ctl_set_boost(boost);
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static inline int __qdf_core_ctl_set_boost(bool boost)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
#endif /* __I_QDF_DEV_H */
|
|
#endif /* __I_QDF_DEV_H */
|