
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle are: - Various NUMA scheduling updates: harmonize the load-balancer and NUMA placement logic to not work against each other. The intended result is better locality, better utilization and fewer migrations. - Introduce Thermal Pressure tracking and optimizations, to improve task placement on thermally overloaded systems. - Implement frequency invariant scheduler accounting on (some) x86 CPUs. This is done by observing and sampling the 'recent' CPU frequency average at ~tick boundaries. The CPU provides this data via the APERF/MPERF MSRs. This hopefully makes our capacity estimates more precise and keeps tasks on the same CPU better even if it might seem overloaded at a lower momentary frequency. (As usual, turbo mode is a complication that we resolve by observing the maximum frequency and renormalizing to it.) - Add asymmetric CPU capacity wakeup scan to improve capacity utilization on asymmetric topologies. (big.LITTLE systems) - PSI fixes and optimizations. - RT scheduling capacity awareness fixes & improvements. - Optimize the CONFIG_RT_GROUP_SCHED constraints code. - Misc fixes, cleanups and optimizations - see the changelog for details" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (62 commits) threads: Update PID limit comment according to futex UAPI change sched/fair: Fix condition of avg_load calculation sched/rt: cpupri_find: Trigger a full search as fallback kthread: Do not preempt current task if it is going to call schedule() sched/fair: Improve spreading of utilization sched: Avoid scale real weight down to zero psi: Move PF_MEMSTALL out of task->flags MAINTAINERS: Add maintenance information for psi psi: Optimize switching tasks inside shared cgroups psi: Fix cpu.pressure for cpu.max and competing cgroups sched/core: Distribute tasks within affinity masks sched/fair: Fix enqueue_task_fair warning thermal/cpu-cooling, sched/core: Move the arch_set_thermal_pressure() API to generic scheduler code sched/rt: Remove unnecessary push for unfit tasks sched/rt: Allow pulling unfitting task sched/rt: Optimize cpupri_find() on non-heterogenous systems sched/rt: Re-instate old behavior in select_task_rq_rt() sched/rt: cpupri_find: Implement fallback mechanism for !fit case sched/fair: Fix reordering of enqueue/dequeue_task_fair() sched/fair: Fix runnable_avg for throttled cfs ...
71 lines
1.9 KiB
C
71 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* include/linux/arch_topology.h - arch specific cpu topology information
|
|
*/
|
|
#ifndef _LINUX_ARCH_TOPOLOGY_H_
|
|
#define _LINUX_ARCH_TOPOLOGY_H_
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/percpu.h>
|
|
|
|
void topology_normalize_cpu_scale(void);
|
|
int topology_update_cpu_topology(void);
|
|
|
|
struct device_node;
|
|
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
|
|
|
|
DECLARE_PER_CPU(unsigned long, cpu_scale);
|
|
|
|
static inline unsigned long topology_get_cpu_scale(int cpu)
|
|
{
|
|
return per_cpu(cpu_scale, cpu);
|
|
}
|
|
|
|
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
|
|
|
|
DECLARE_PER_CPU(unsigned long, freq_scale);
|
|
|
|
static inline unsigned long topology_get_freq_scale(int cpu)
|
|
{
|
|
return per_cpu(freq_scale, cpu);
|
|
}
|
|
|
|
DECLARE_PER_CPU(unsigned long, thermal_pressure);
|
|
|
|
static inline unsigned long topology_get_thermal_pressure(int cpu)
|
|
{
|
|
return per_cpu(thermal_pressure, cpu);
|
|
}
|
|
|
|
void arch_set_thermal_pressure(struct cpumask *cpus,
|
|
unsigned long th_pressure);
|
|
|
|
struct cpu_topology {
|
|
int thread_id;
|
|
int core_id;
|
|
int package_id;
|
|
int llc_id;
|
|
cpumask_t thread_sibling;
|
|
cpumask_t core_sibling;
|
|
cpumask_t llc_sibling;
|
|
};
|
|
|
|
#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
|
|
extern struct cpu_topology cpu_topology[NR_CPUS];
|
|
|
|
#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
|
|
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
|
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
|
|
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
|
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
|
|
void init_cpu_topology(void);
|
|
void store_cpu_topology(unsigned int cpuid);
|
|
const struct cpumask *cpu_coregroup_mask(int cpu);
|
|
void update_siblings_masks(unsigned int cpu);
|
|
void remove_cpu_topology(unsigned int cpuid);
|
|
void reset_cpu_topology(void);
|
|
int parse_acpi_topology(void);
|
|
#endif
|
|
|
|
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
|