diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 7f1478c26a33..a433f4adf62d 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -160,6 +160,8 @@ static inline void set_mems_allowed(nodemask_t nodemask) task_unlock(current); } +extern void cpuset_hotplug_workfn(struct work_struct *work); + #else /* !CONFIG_CPUSETS */ static inline bool cpusets_enabled(void) { return false; } @@ -275,6 +277,8 @@ static inline bool read_mems_allowed_retry(unsigned int seq) return false; } +static inline void cpuset_hotplug_workfn(struct work_struct *work) {} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index cf643f3fd99a..9ac2a148bf48 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -342,9 +342,9 @@ static DEFINE_SPINLOCK(callback_lock); static struct workqueue_struct *cpuset_migrate_mm_wq; /* - * CPU / memory hotplug is handled asynchronously. + * CPU / memory hotplug is handled asynchronously + * for hotplug, synchronously for resume_cpus */ -static void cpuset_hotplug_workfn(struct work_struct *work); static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); @@ -3136,7 +3136,7 @@ update_tasks: * Note that CPU offlining during suspend is ignored. We don't modify * cpusets across suspend/resume cycles at all. */ -static void cpuset_hotplug_workfn(struct work_struct *work) +void cpuset_hotplug_workfn(struct work_struct *work) { static cpumask_t new_cpus; static nodemask_t new_mems; diff --git a/kernel/cpu.c b/kernel/cpu.c index 2bcc13dad8ea..e4b7df06bb64 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -1253,6 +1254,15 @@ int resume_cpus(struct cpumask *cpus) if (cpumask_empty(cpus)) goto err_cpu_maps_update; + for_each_cpu(cpu, cpus) + set_cpu_active(cpu, true); + + /* Lazy Resume. Build domains immediately instead of scheduling + * a workqueue. This is so that the cpu can pull load when + * sent a load balancing kick. + */ + cpuset_hotplug_workfn(NULL); + cpus_write_lock(); cpuhp_tasks_frozen = 0;