sched/core: Provide a pointer to the valid CPU mask
In commit:
4b53a3412d
("sched/core: Remove the tsk_nr_cpus_allowed() wrapper")
the tsk_nr_cpus_allowed() wrapper was removed. There was not
much difference in !RT but in RT we used this to implement
migrate_disable(). Within a migrate_disable() section the CPU mask is
restricted to single CPU while the "normal" CPU mask remains untouched.
As an alternative implementation Ingo suggested to use:
struct task_struct {
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
};
with
t->cpus_ptr = &t->cpus_mask;
In -RT we then can switch the cpus_ptr to:
t->cpus_ptr = &cpumask_of(task_cpu(p));
in a migration disabled region. The rules are simple:
- Code that 'uses' ->cpus_allowed would use the pointer.
- Code that 'modifies' ->cpus_allowed would use the direct mask.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20190423142636.14347-1-bigeasy@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committad av
Ingo Molnar

förälder
f2c7c76c5d
incheckning
3bd3706251
@@ -1621,7 +1621,7 @@ static void task_numa_compare(struct task_numa_env *env,
|
||||
* be incurred if the tasks were swapped.
|
||||
*/
|
||||
/* Skip this swap candidate if cannot move to the source cpu */
|
||||
if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
|
||||
if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
@@ -1718,7 +1718,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
|
||||
|
||||
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
|
||||
/* Skip this CPU if the source task cannot migrate */
|
||||
if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
|
||||
if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
|
||||
continue;
|
||||
|
||||
env->dst_cpu = cpu;
|
||||
@@ -5831,7 +5831,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
||||
|
||||
/* Skip over this group if it has no CPUs allowed */
|
||||
if (!cpumask_intersects(sched_group_span(group),
|
||||
&p->cpus_allowed))
|
||||
p->cpus_ptr))
|
||||
continue;
|
||||
|
||||
local_group = cpumask_test_cpu(this_cpu,
|
||||
@@ -5963,7 +5963,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
|
||||
return cpumask_first(sched_group_span(group));
|
||||
|
||||
/* Traverse only the allowed CPUs */
|
||||
for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
|
||||
for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
|
||||
if (available_idle_cpu(i)) {
|
||||
struct rq *rq = cpu_rq(i);
|
||||
struct cpuidle_state *idle = idle_get_state(rq);
|
||||
@@ -6003,7 +6003,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
|
||||
{
|
||||
int new_cpu = cpu;
|
||||
|
||||
if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
|
||||
if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
|
||||
return prev_cpu;
|
||||
|
||||
/*
|
||||
@@ -6120,7 +6120,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
|
||||
if (!test_idle_cores(target, false))
|
||||
return -1;
|
||||
|
||||
cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
|
||||
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
|
||||
|
||||
for_each_cpu_wrap(core, cpus, target) {
|
||||
bool idle = true;
|
||||
@@ -6154,7 +6154,7 @@ static int select_idle_smt(struct task_struct *p, int target)
|
||||
return -1;
|
||||
|
||||
for_each_cpu(cpu, cpu_smt_mask(target)) {
|
||||
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
|
||||
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
continue;
|
||||
if (available_idle_cpu(cpu))
|
||||
return cpu;
|
||||
@@ -6217,7 +6217,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
|
||||
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
|
||||
if (!--nr)
|
||||
return -1;
|
||||
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
|
||||
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
continue;
|
||||
if (available_idle_cpu(cpu))
|
||||
break;
|
||||
@@ -6254,7 +6254,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
|
||||
recent_used_cpu != target &&
|
||||
cpus_share_cache(recent_used_cpu, target) &&
|
||||
available_idle_cpu(recent_used_cpu) &&
|
||||
cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) {
|
||||
cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
|
||||
/*
|
||||
* Replace recent_used_cpu with prev as it is a potential
|
||||
* candidate for the next wake:
|
||||
@@ -6600,7 +6600,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
|
||||
int max_spare_cap_cpu = -1;
|
||||
|
||||
for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
|
||||
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
|
||||
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
continue;
|
||||
|
||||
/* Skip CPUs that will be overutilized. */
|
||||
@@ -6689,7 +6689,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
||||
}
|
||||
|
||||
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
|
||||
cpumask_test_cpu(cpu, &p->cpus_allowed);
|
||||
cpumask_test_cpu(cpu, p->cpus_ptr);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -7445,14 +7445,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
||||
/*
|
||||
* We do not migrate tasks that are:
|
||||
* 1) throttled_lb_pair, or
|
||||
* 2) cannot be migrated to this CPU due to cpus_allowed, or
|
||||
* 2) cannot be migrated to this CPU due to cpus_ptr, or
|
||||
* 3) running (obviously), or
|
||||
* 4) are cache-hot on their current CPU.
|
||||
*/
|
||||
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
|
||||
return 0;
|
||||
|
||||
if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
|
||||
if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
|
||||
int cpu;
|
||||
|
||||
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
|
||||
@@ -7472,7 +7472,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
||||
|
||||
/* Prevent to re-select dst_cpu via env's CPUs: */
|
||||
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
|
||||
if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
|
||||
if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
|
||||
env->flags |= LBF_DST_PINNED;
|
||||
env->new_dst_cpu = cpu;
|
||||
break;
|
||||
@@ -8099,7 +8099,7 @@ static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
|
||||
|
||||
/*
|
||||
* Group imbalance indicates (and tries to solve) the problem where balancing
|
||||
* groups is inadequate due to ->cpus_allowed constraints.
|
||||
* groups is inadequate due to ->cpus_ptr constraints.
|
||||
*
|
||||
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
|
||||
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
|
||||
@@ -8768,7 +8768,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
|
||||
/*
|
||||
* If the busiest group is imbalanced the below checks don't
|
||||
* work because they assume all things are equal, which typically
|
||||
* isn't true due to cpus_allowed constraints and the like.
|
||||
* isn't true due to cpus_ptr constraints and the like.
|
||||
*/
|
||||
if (busiest->group_type == group_imbalanced)
|
||||
goto force_balance;
|
||||
@@ -9210,7 +9210,7 @@ more_balance:
|
||||
* if the curr task on busiest CPU can't be
|
||||
* moved to this_cpu:
|
||||
*/
|
||||
if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
|
||||
if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
|
||||
raw_spin_unlock_irqrestore(&busiest->lock,
|
||||
flags);
|
||||
env.flags |= LBF_ALL_PINNED;
|
||||
|
Referens i nytt ärende
Block a user