Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes are: - Migrate CPU-intense 'misfit' tasks on asymmetric capacity systems, to better utilize (much) faster 'big core' CPUs. (Morten Rasmussen, Valentin Schneider) - Topology handling improvements, in particular when CPU capacity changes and related load-balancing fixes/improvements (Morten Rasmussen) - ... plus misc other improvements, fixes and updates" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (28 commits) sched/completions/Documentation: Add recommendation for dynamic and ONSTACK completions sched/completions/Documentation: Clean up the document some more sched/completions/Documentation: Fix a couple of punctuation nits cpu/SMT: State SMT is disabled even with nosmt and without "=force" sched/core: Fix comment regarding nr_iowait_cpu() and get_iowait_load() sched/fair: Remove setting task's se->runnable_weight during PELT update sched/fair: Disable LB_BIAS by default sched/pelt: Fix warning and clean up IRQ PELT config sched/topology: Make local variables static sched/debug: Use symbolic names for task state constants sched/numa: Remove unused numa_stats::nr_running field sched/numa: Remove unused code from update_numa_stats() sched/debug: Explicitly cast sched_feat() to bool sched/core: Disable SD_PREFER_SIBLING on asymmetric CPU capacity domains sched/fair: Don't move tasks to lower capacity CPUs unless necessary sched/fair: Set rq->rd->overload when misfit sched/fair: Wrap rq->rd->overload accesses with READ/WRITE_ONCE() sched/core: Change root_domain->overload type to int sched/fair: Change 'prefer_sibling' type to bool sched/fair: Kick nohz balance if rq->misfit_task_load ...
This commit is contained in:
@@ -15,6 +15,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/sched/topology.h>
|
||||
#include <linux/cpuset.h>
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
|
||||
|
||||
@@ -47,6 +48,9 @@ static ssize_t cpu_capacity_show(struct device *dev,
|
||||
return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
|
||||
}
|
||||
|
||||
static void update_topology_flags_workfn(struct work_struct *work);
|
||||
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
|
||||
|
||||
static ssize_t cpu_capacity_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
@@ -72,6 +76,8 @@ static ssize_t cpu_capacity_store(struct device *dev,
|
||||
topology_set_cpu_scale(i, new_capacity);
|
||||
mutex_unlock(&cpu_scale_mutex);
|
||||
|
||||
schedule_work(&update_topology_flags_work);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@@ -96,6 +102,25 @@ static int register_cpu_capacity_sysctl(void)
|
||||
}
|
||||
subsys_initcall(register_cpu_capacity_sysctl);
|
||||
|
||||
static int update_topology;
|
||||
|
||||
int topology_update_cpu_topology(void)
|
||||
{
|
||||
return update_topology;
|
||||
}
|
||||
|
||||
/*
|
||||
* Updating the sched_domains can't be done directly from cpufreq callbacks
|
||||
* due to locking, so queue the work for later.
|
||||
*/
|
||||
static void update_topology_flags_workfn(struct work_struct *work)
|
||||
{
|
||||
update_topology = 1;
|
||||
rebuild_sched_domains();
|
||||
pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
|
||||
update_topology = 0;
|
||||
}
|
||||
|
||||
static u32 capacity_scale;
|
||||
static u32 *raw_capacity;
|
||||
|
||||
@@ -201,6 +226,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
|
||||
|
||||
if (cpumask_empty(cpus_to_visit)) {
|
||||
topology_normalize_cpu_scale();
|
||||
schedule_work(&update_topology_flags_work);
|
||||
free_raw_capacity();
|
||||
pr_debug("cpu_capacity: parsing done\n");
|
||||
schedule_work(&parsing_done_work);
|
||||
|
Reference in New Issue
Block a user