
paused_cpus intending to force CPUs to go idle as quickly as possible, adding a migration step, to drain the rq from any running task. Two steps are actually needed. The first one, "lazy", will run before the cpu_active_mask has been synced. The second one will run after. It is possible for another CPU, to observe an outdated version of that mask and to enqueue a task on a rq that has just been marked inactive. The second migration is there to catch any of those spurious move, while the first one will drain the rq as quickly as possible to let the CPU reach an idle state. Bug: 161210528 Change-Id: Ie26c2e4c42665dd61d41a899a84536e56bf2b887 Signed-off-by: Vincent Donnefort <vincent.donnefort@arm.com>
30 lines
799 B
C
30 lines
799 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_SCHED_HOTPLUG_H
|
|
#define _LINUX_SCHED_HOTPLUG_H
|
|
|
|
/*
|
|
* Scheduler interfaces for hotplug CPU support:
|
|
*/
|
|
|
|
extern int sched_cpu_starting(unsigned int cpu);
|
|
extern int sched_cpu_activate(unsigned int cpu);
|
|
extern int sched_cpus_activate(struct cpumask *cpus);
|
|
extern int sched_cpu_deactivate(unsigned int cpu);
|
|
extern int sched_cpus_deactivate_nosync(struct cpumask *cpus);
|
|
extern int sched_cpu_drain_rq(unsigned int cpu);
|
|
extern void sched_cpu_drain_rq_wait(unsigned int cpu);
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern int sched_cpu_dying(unsigned int cpu);
|
|
#else
|
|
# define sched_cpu_dying NULL
|
|
#endif
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern void idle_task_exit(void);
|
|
#else
|
|
static inline void idle_task_exit(void) {}
|
|
#endif
|
|
|
|
#endif /* _LINUX_SCHED_HOTPLUG_H */
|