Merge tag 'v4.20-rc5' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -2880,6 +2880,18 @@ unsigned long long nr_context_switches(void)
|
||||
return sum;
|
||||
}
|
||||
|
||||
/*
|
||||
* Consumers of these two interfaces, like for example the cpuidle menu
|
||||
* governor, are using nonsensical data. Preferring shallow idle state selection
|
||||
* for a CPU that has IO-wait which might not even end up running the task when
|
||||
* it does become runnable.
|
||||
*/
|
||||
|
||||
unsigned long nr_iowait_cpu(int cpu)
|
||||
{
|
||||
return atomic_read(&cpu_rq(cpu)->nr_iowait);
|
||||
}
|
||||
|
||||
/*
|
||||
* IO-wait accounting, and how its mostly bollocks (on SMP).
|
||||
*
|
||||
@@ -2915,31 +2927,11 @@ unsigned long nr_iowait(void)
|
||||
unsigned long i, sum = 0;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
sum += atomic_read(&cpu_rq(i)->nr_iowait);
|
||||
sum += nr_iowait_cpu(i);
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
/*
|
||||
* Consumers of these two interfaces, like for example the cpuidle menu
|
||||
* governor, are using nonsensical data. Preferring shallow idle state selection
|
||||
* for a CPU that has IO-wait which might not even end up running the task when
|
||||
* it does become runnable.
|
||||
*/
|
||||
|
||||
unsigned long nr_iowait_cpu(int cpu)
|
||||
{
|
||||
struct rq *this = cpu_rq(cpu);
|
||||
return atomic_read(&this->nr_iowait);
|
||||
}
|
||||
|
||||
void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
|
||||
{
|
||||
struct rq *rq = this_rq();
|
||||
*nr_waiters = atomic_read(&rq->nr_iowait);
|
||||
*load = rq->load.weight;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
@@ -5746,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
/*
|
||||
* The sched_smt_present static key needs to be evaluated on every
|
||||
* hotplug event because at boot time SMT might be disabled when
|
||||
* the number of booted CPUs is limited.
|
||||
*
|
||||
* If then later a sibling gets hotplugged, then the key would stay
|
||||
* off and SMT scheduling would never be functional.
|
||||
* When going up, increment the number of cores with SMT present.
|
||||
*/
|
||||
if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
|
||||
static_branch_enable_cpuslocked(&sched_smt_present);
|
||||
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
|
||||
static_branch_inc_cpuslocked(&sched_smt_present);
|
||||
#endif
|
||||
set_cpu_active(cpu, true);
|
||||
|
||||
@@ -5798,6 +5785,14 @@ int sched_cpu_deactivate(unsigned int cpu)
|
||||
*/
|
||||
synchronize_rcu_mult(call_rcu, call_rcu_sched);
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
/*
|
||||
* When going down, decrement the number of cores with SMT present.
|
||||
*/
|
||||
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
|
||||
static_branch_dec_cpuslocked(&sched_smt_present);
|
||||
#endif
|
||||
|
||||
if (!sched_smp_initialized)
|
||||
return 0;
|
||||
|
||||
|
Reference in New Issue
Block a user