diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 78efbb3522b8..71a493bdeacd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8545,23 +8545,16 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) * * Thermal pressure will impact all cpus in this perf domain * equally. */ - if (sched_energy_enabled()) { + if (static_branch_unlikely(&sched_asym_cpucapacity)) { unsigned long inv_cap = capacity_orig - thermal_load_avg(rq); - struct perf_domain *pd; + struct perf_domain *pd = rcu_dereference(rq->rd->pd); - rcu_read_lock(); - - pd = rcu_dereference(rq->rd->pd); rq->cpu_capacity_inverted = 0; for (; pd; pd = pd->next) { struct cpumask *pd_span = perf_domain_span(pd); unsigned long pd_cap_orig, pd_cap; - /* We can't be inverted against our own pd */ - if (cpumask_test_cpu(cpu_of(rq), pd_span)) - continue; - cpu = cpumask_any(pd_span); pd_cap_orig = arch_scale_cpu_capacity(cpu); @@ -8586,8 +8579,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) break; } } - - rcu_read_unlock(); } trace_sched_cpu_capacity_tp(rq);