ANDROID: cpu/hotplug: add pause/resume_cpus interface

pause_cpus intends to have a way to force a CPU to go idle and to resume
as quickly as possible, with as little disruption as possible on the
system. This is a way of saving energy or meet thermal constraints, for
which a full CPU hotunplug is too slow. A paused CPU is simply deactivated
from the scheduler point of view. This corresponds to the first hotunplug
step.

Each pause operation still needs some heavy synchronization. Allowing to
pause several CPUs in one go mitigate that issue.

Paused CPUs can be resumed with resume_cpus(), which also takes a cpumask
as an input.

Few limitations:

  * It isn't possible to pause a CPU which is running SCHED_DEADLINE task.

  * A paused CPU will be removed from any cpuset it is part of. Resuming
    the CPU won't put back this CPU in the cpuset if using cgroup1.
    Cgroup2 doesn't have this limitation.

  * per-CPU kthreads are still allowed to run on a paused CPU.

Bug: 161210528
Change-Id: I1f5cb28190f8ec979bb8640a89b022f2f7266bcf
Signed-off-by: Vincent Donnefort <vincent.donnefort@arm.com>
Signed-off-by: Todd Kjos <tkjos@google.com>
This commit is contained in:
Vincent Donnefort
2020-11-03 14:26:59 +00:00
committed by Todd Kjos
orang tua 1734af6299
melakukan 683010f555
4 mengubah file dengan 175 tambahan dan 9 penghapusan

Melihat File

@@ -1093,6 +1093,121 @@ int remove_cpu(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(remove_cpu);
extern bool dl_cpu_busy(unsigned int cpu);
int pause_cpus(struct cpumask *cpus)
{
int err = 0;
int cpu;
cpu_maps_update_begin();
if (cpu_hotplug_disabled) {
err = -EBUSY;
goto err_cpu_maps_update;
}
/* Pausing an already inactive CPU isn't an error */
cpumask_and(cpus, cpus, cpu_active_mask);
for_each_cpu(cpu, cpus) {
if (!cpu_online(cpu) || dl_cpu_busy(cpu)) {
err = -EBUSY;
goto err_cpu_maps_update;
}
}
if (cpumask_weight(cpus) >= num_active_cpus()) {
err = -EBUSY;
goto err_cpu_maps_update;
}
if (cpumask_empty(cpus))
goto err_cpu_maps_update;
cpus_write_lock();
cpuhp_tasks_frozen = 0;
if (sched_cpus_deactivate_nosync(cpus)) {
err = -EBUSY;
goto err_cpus_write_unlock;
}
/*
* Even if living on the side of the regular HP path, pause is using
* one of the HP step (CPUHP_AP_ACTIVE). This should be reflected on the
* current state of the CPU.
*/
for_each_cpu(cpu, cpus) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
st->state = CPUHP_AP_ACTIVE - 1;
st->target = st->state;
}
err_cpus_write_unlock:
cpus_write_unlock();
err_cpu_maps_update:
cpu_maps_update_done();
return err;
}
EXPORT_SYMBOL_GPL(pause_cpus);
int resume_cpus(struct cpumask *cpus)
{
unsigned int cpu;
int err = 0;
cpu_maps_update_begin();
if (cpu_hotplug_disabled) {
err = -EBUSY;
goto err_cpu_maps_update;
}
/* Resuming an already active CPU isn't an error */
cpumask_andnot(cpus, cpus, cpu_active_mask);
for_each_cpu(cpu, cpus) {
if (!cpu_online(cpu)) {
err = -EBUSY;
goto err_cpu_maps_update;
}
}
if (cpumask_empty(cpus))
goto err_cpu_maps_update;
cpus_write_lock();
cpuhp_tasks_frozen = 0;
if (sched_cpus_activate(cpus)) {
err = -EBUSY;
goto err_cpus_write_unlock;
}
/*
* see pause_cpus.
*/
for_each_cpu(cpu, cpus) {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
st->state = CPUHP_ONLINE;
st->target = st->state;
}
err_cpus_write_unlock:
cpus_write_unlock();
err_cpu_maps_update:
cpu_maps_update_done();
return err;
}
EXPORT_SYMBOL_GPL(resume_cpus);
void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
{
unsigned int cpu;