sched/walt: get rcu lock before use task's cgroups

uclamp_latency_sensitive() function will use struct
task_struct's member of cgroups, but rcu lock should
be got before use it.

Change-Id: I1988ed7fe836f9f1ba99d59c5d46f26f3418b51e
Signed-off-by: Tengfei Fan <quic_tengfan@quicinc.com>
This commit is contained in:
Tengfei Fan
2022-01-06 17:13:42 +08:00
parent 3bd18aada1
commit ee302dc562

View File

@@ -751,7 +751,7 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
cpumask_t *candidates;
bool is_rtg, curr_is_rtg;
struct find_best_target_env fbt_env;
bool need_idle = wake_to_idle(p) || uclamp_latency_sensitive(p);
bool need_idle = wake_to_idle(p);
u64 start_t = 0;
int delta = 0;
int task_boost = per_task_boost(p);
@@ -775,9 +775,6 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
is_rtg = task_in_related_thread_group(p);
curr_is_rtg = task_in_related_thread_group(cpu_rq(cpu)->curr);
fbt_env.fastpath = 0;
fbt_env.need_idle = need_idle;
if (trace_sched_task_util_enabled())
start_t = sched_clock();
@@ -785,6 +782,12 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
candidates = this_cpu_ptr(&energy_cpus);
cpumask_clear(candidates);
rcu_read_lock();
need_idle |= uclamp_latency_sensitive(p);
fbt_env.fastpath = 0;
fbt_env.need_idle = need_idle;
if (sync && (need_idle || (is_rtg && curr_is_rtg)))
sync = 0;
@@ -792,10 +795,9 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
&& bias_to_this_cpu(p, cpu, start_cpu)) {
best_energy_cpu = cpu;
fbt_env.fastpath = SYNC_WAKEUP;
goto done;
goto unlock;
}
rcu_read_lock();
pd = rcu_dereference(rd->pd);
if (!pd)
goto fail;
@@ -902,7 +904,6 @@ int walt_find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
unlock:
rcu_read_unlock();
done:
trace_sched_task_util(p, cpumask_bits(candidates)[0], best_energy_cpu,
sync, fbt_env.need_idle, fbt_env.fastpath,
start_t, uclamp_boost, start_cpu);