sched: Wrap scheduler p->cpus_allowed access
This task is preparatory for the migrate_disable() implementation, but stands on its own and provides a cleanup. It currently only converts those sites required for task-placement. Kosaki-san once mentioned replacing cpus_allowed with a proper cpumask_t instead of the NR_CPUS sized array it currently is, that would also require something like this. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Link: http://lkml.kernel.org/n/tip-e42skvaddos99psip0vce41o@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:

committed by
Ingo Molnar

parent
6eb57e0d65
commit
fa17b507f1
@@ -1179,7 +1179,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
|
||||
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
||||
{
|
||||
if (!task_running(rq, p) &&
|
||||
(cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
|
||||
(cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
|
||||
(p->rt.nr_cpus_allowed > 1))
|
||||
return 1;
|
||||
return 0;
|
||||
@@ -1324,7 +1324,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||
*/
|
||||
if (unlikely(task_rq(task) != rq ||
|
||||
!cpumask_test_cpu(lowest_rq->cpu,
|
||||
&task->cpus_allowed) ||
|
||||
tsk_cpus_allowed(task)) ||
|
||||
task_running(rq, task) ||
|
||||
!task->on_rq)) {
|
||||
|
||||
|
Reference in New Issue
Block a user