Revert "sched/fair: Fixes for capacity inversion detection"

This reverts commit 4735b6f74f.

It breaks the Android kernel abi, so revert it.  If it needs to come
back later, it can do so in an abi-safe way.

Bug: 161946584
Cc: Qais Yousef <qyousef@google.com>
Change-Id: I91e7a2222c06516f4df460603cc27cf7c1106823
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-06-20 11:16:56 +00:00
parent 4c20c2c837
commit e1be343429

View File

@@ -8545,23 +8545,16 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
* * Thermal pressure will impact all cpus in this perf domain * * Thermal pressure will impact all cpus in this perf domain
* equally. * equally.
*/ */
if (sched_energy_enabled()) { if (static_branch_unlikely(&sched_asym_cpucapacity)) {
unsigned long inv_cap = capacity_orig - thermal_load_avg(rq); unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
struct perf_domain *pd; struct perf_domain *pd = rcu_dereference(rq->rd->pd);
rcu_read_lock();
pd = rcu_dereference(rq->rd->pd);
rq->cpu_capacity_inverted = 0; rq->cpu_capacity_inverted = 0;
for (; pd; pd = pd->next) { for (; pd; pd = pd->next) {
struct cpumask *pd_span = perf_domain_span(pd); struct cpumask *pd_span = perf_domain_span(pd);
unsigned long pd_cap_orig, pd_cap; unsigned long pd_cap_orig, pd_cap;
/* We can't be inverted against our own pd */
if (cpumask_test_cpu(cpu_of(rq), pd_span))
continue;
cpu = cpumask_any(pd_span); cpu = cpumask_any(pd_span);
pd_cap_orig = arch_scale_cpu_capacity(cpu); pd_cap_orig = arch_scale_cpu_capacity(cpu);
@@ -8586,8 +8579,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
break; break;
} }
} }
rcu_read_unlock();
} }
trace_sched_cpu_capacity_tp(rq); trace_sched_cpu_capacity_tp(rq);