sched: Transform resched_task() into resched_curr()
We always use resched_task() with rq->curr argument.
It's not possible to reschedule any task but rq's current.
The patch introduces resched_curr(struct rq *) to
replace all of the repeating patterns. The main aim
is cleanup, but there is a little size profit too:
(before)
$ size kernel/sched/built-in.o
text data bss dec hex filename
155274 16445 7042 178761 2ba49 kernel/sched/built-in.o
$ size vmlinux
text data bss dec hex filename
7411490
1178376 991232 9581098 92322a vmlinux
(after)
$ size kernel/sched/built-in.o
text data bss dec hex filename
155130 16445 7042 178617 2b9b9 kernel/sched/built-in.o
$ size vmlinux
text data bss dec hex filename
7411362 1178376 991232 9580970 9231aa vmlinux
I was choosing between resched_curr() and resched_rq(),
and the first name looks better for me.
A little lie in Documentation/trace/ftrace.txt. I have not
actually collected the tracing again. With a hope the patch
won't make execution times much worse :)
Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20140628200219.1778.18735.stgit@localhost
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
466af29bf4
commit
8875125efe
@@ -589,30 +589,31 @@ static bool set_nr_if_polling(struct task_struct *p)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* resched_task - mark a task 'to be rescheduled now'.
|
||||
* resched_curr - mark rq's current task 'to be rescheduled now'.
|
||||
*
|
||||
* On UP this means the setting of the need_resched flag, on SMP it
|
||||
* might also involve a cross-CPU call to trigger the scheduler on
|
||||
* the target CPU.
|
||||
*/
|
||||
void resched_task(struct task_struct *p)
|
||||
void resched_curr(struct rq *rq)
|
||||
{
|
||||
struct task_struct *curr = rq->curr;
|
||||
int cpu;
|
||||
|
||||
lockdep_assert_held(&task_rq(p)->lock);
|
||||
lockdep_assert_held(&rq->lock);
|
||||
|
||||
if (test_tsk_need_resched(p))
|
||||
if (test_tsk_need_resched(curr))
|
||||
return;
|
||||
|
||||
cpu = task_cpu(p);
|
||||
cpu = cpu_of(rq);
|
||||
|
||||
if (cpu == smp_processor_id()) {
|
||||
set_tsk_need_resched(p);
|
||||
set_tsk_need_resched(curr);
|
||||
set_preempt_need_resched();
|
||||
return;
|
||||
}
|
||||
|
||||
if (set_nr_and_not_polling(p))
|
||||
if (set_nr_and_not_polling(curr))
|
||||
smp_send_reschedule(cpu);
|
||||
else
|
||||
trace_sched_wake_idle_without_ipi(cpu);
|
||||
@@ -625,7 +626,7 @@ void resched_cpu(int cpu)
|
||||
|
||||
if (!raw_spin_trylock_irqsave(&rq->lock, flags))
|
||||
return;
|
||||
resched_task(cpu_curr(cpu));
|
||||
resched_curr(rq);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
@@ -1027,7 +1028,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (class == rq->curr->sched_class)
|
||||
break;
|
||||
if (class == p->sched_class) {
|
||||
resched_task(rq->curr);
|
||||
resched_curr(rq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -3073,7 +3074,7 @@ void set_user_nice(struct task_struct *p, long nice)
|
||||
* lowered its priority, then reschedule its CPU:
|
||||
*/
|
||||
if (delta < 0 || (delta > 0 && task_running(rq, p)))
|
||||
resched_task(rq->curr);
|
||||
resched_curr(rq);
|
||||
}
|
||||
out_unlock:
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
@@ -4299,7 +4300,7 @@ again:
|
||||
* fairness.
|
||||
*/
|
||||
if (preempt && rq != p_rq)
|
||||
resched_task(p_rq->curr);
|
||||
resched_curr(p_rq);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
@@ -7106,7 +7107,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
|
||||
__setscheduler(rq, p, &attr);
|
||||
if (on_rq) {
|
||||
enqueue_task(rq, p, 0);
|
||||
resched_task(rq->curr);
|
||||
resched_curr(rq);
|
||||
}
|
||||
|
||||
check_class_changed(rq, p, prev_class, old_prio);
|
||||
|
Reference in New Issue
Block a user