ANDROID: cpu/hotplug: rebuild sched domains immediately

In the resume_cpus() path, cpus cannot be taken
advantage of until the cpus write lock is acquired,
and cpus are activated and domains rebuilt.  This
can incurr significant delay in the unpause operation.

Additionally, if scheduled through the kworker thread,
the wait time for rebuilding sched domains becomes
large due to a busy system that can prevent the kworker
from executing.

Activate the cpus and call the cpuset_hotplug_workfn
directly within resume_cpus prior to getting the cpus
write lock, thereby eliminating delays associated
with scheduling this activity.

Bug: 161210528
Change-Id: Ie2521f28ed9078b22d421d792f08413016d4dd62
Signed-off-by: Stephen Dickey <dickey@codeaurora.org>
Signed-off-by: Todd Kjos <tkjos@google.com>
This commit is contained in:
Stephen Dickey
2020-12-03 13:49:13 -08:00
committed by Todd Kjos
parent e19b8ce907
commit 1d3a64fbd2
3 changed files with 17 additions and 3 deletions

View File

@@ -160,6 +160,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
extern void cpuset_hotplug_workfn(struct work_struct *work);
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -275,6 +277,8 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
static inline void cpuset_hotplug_workfn(struct work_struct *work) {}
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */

View File

@@ -342,9 +342,9 @@ static DEFINE_SPINLOCK(callback_lock);
static struct workqueue_struct *cpuset_migrate_mm_wq;
/*
* CPU / memory hotplug is handled asynchronously.
* CPU / memory hotplug is handled asynchronously
* for hotplug, synchronously for resume_cpus
*/
static void cpuset_hotplug_workfn(struct work_struct *work);
static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
@@ -3136,7 +3136,7 @@ update_tasks:
* Note that CPU offlining during suspend is ignored. We don't modify
* cpusets across suspend/resume cycles at all.
*/
static void cpuset_hotplug_workfn(struct work_struct *work)
void cpuset_hotplug_workfn(struct work_struct *work)
{
static cpumask_t new_cpus;
static nodemask_t new_mems;

View File

@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>
#include <uapi/linux/sched/types.h>
#include <linux/cpuset.h>
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
@@ -1253,6 +1254,15 @@ int resume_cpus(struct cpumask *cpus)
if (cpumask_empty(cpus))
goto err_cpu_maps_update;
for_each_cpu(cpu, cpus)
set_cpu_active(cpu, true);
/* Lazy Resume. Build domains immediately instead of scheduling
* a workqueue. This is so that the cpu can pull load when
* sent a load balancing kick.
*/
cpuset_hotplug_workfn(NULL);
cpus_write_lock();
cpuhp_tasks_frozen = 0;