From db3e01ecf0444a8b4a84e0c15607590ce8cc0b02 Mon Sep 17 00:00:00 2001 From: Stephen Dickey Date: Thu, 14 Oct 2021 14:32:39 -0700 Subject: [PATCH] ANDROID: cpuhp: pause: reduce running priority pause/resume In order to get a responsive pause operation, the resume/pause_cpus are usually called within a RT task prio. However only the lazy-resume and lazy-pause operations need to happen quickly, the rest can happen at leisure. Running that portion at high priority keeps the cpu away from important tasks. Reduce the priority right after the lazy portion, and restore it before returning, if it was at RT prio. Bug: 203115740 Fixes: 683010f555d8 ("ANDROID: cpu/hotplug: add pause/resume_cpus interface") Change-Id: I1f3394eb9b5fa1876330fef6e25a203da0fde670 Signed-off-by: Stephen Dickey --- kernel/cpu.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/kernel/cpu.c b/kernel/cpu.c index 49d2eea2bd43..7d86553a0c2e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1136,11 +1136,37 @@ void __wait_drain_rq(struct cpumask *cpus) sched_cpu_drain_rq_wait(cpu); } +/* if rt task, set to cfs and return previous prio */ +static int pause_reduce_prio(void) +{ + int prev_prio = -1; + + if (current->prio < MAX_RT_PRIO) { + struct sched_param param = { .sched_priority = 0 }; + + prev_prio = current->prio; + sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m); + } + + return prev_prio; +} + +/* if previous prio was set, restore */ +static void pause_restore_prio(int prev_prio) +{ + if (prev_prio >= 0 && prev_prio < MAX_RT_PRIO) { + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1-prev_prio }; + + sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); + } +} + int pause_cpus(struct cpumask *cpus) { int err = 0; int cpu; u64 start_time = 0; + int prev_prio; start_time = sched_clock(); @@ -1195,6 +1221,8 @@ int pause_cpus(struct cpumask *cpus) goto err_cpu_maps_update; } + prev_prio = pause_reduce_prio(); + /* * Slow path deactivation: * @@ -1238,6 +1266,7 @@ int pause_cpus(struct cpumask *cpus) err_cpus_write_unlock: cpus_write_unlock(); + pause_restore_prio(prev_prio); err_cpu_maps_update: cpu_maps_update_done(); @@ -1252,6 +1281,7 @@ int resume_cpus(struct cpumask *cpus) unsigned int cpu; int err = 0; u64 start_time = 0; + int prev_prio; start_time = sched_clock(); @@ -1282,6 +1312,8 @@ int resume_cpus(struct cpumask *cpus) if (err) goto err_cpu_maps_update; + prev_prio = pause_reduce_prio(); + /* Lazy Resume. Build domains immediately instead of scheduling * a workqueue. This is so that the cpu can pull load when * sent a load balancing kick. @@ -1309,6 +1341,7 @@ int resume_cpus(struct cpumask *cpus) err_cpus_write_unlock: cpus_write_unlock(); + pause_restore_prio(prev_prio); err_cpu_maps_update: cpu_maps_update_done();