sched: Transform resched_task() into resched_curr()
We always use resched_task() with rq->curr argument.
It's not possible to reschedule any task but rq's current.
The patch introduces resched_curr(struct rq *) to
replace all of the repeating patterns. The main aim
is cleanup, but there is a little size profit too:
(before)
$ size kernel/sched/built-in.o
text data bss dec hex filename
155274 16445 7042 178761 2ba49 kernel/sched/built-in.o
$ size vmlinux
text data bss dec hex filename
7411490
1178376 991232 9581098 92322a vmlinux
(after)
$ size kernel/sched/built-in.o
text data bss dec hex filename
155130 16445 7042 178617 2b9b9 kernel/sched/built-in.o
$ size vmlinux
text data bss dec hex filename
7411362 1178376 991232 9580970 9231aa vmlinux
I was choosing between resched_curr() and resched_rq(),
and the first name looks better for me.
A little lie in Documentation/trace/ftrace.txt. I have not
actually collected the tracing again. With a hope the patch
won't make execution times much worse :)
Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20140628200219.1778.18735.stgit@localhost
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -2923,7 +2923,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
|
||||
ideal_runtime = sched_slice(cfs_rq, curr);
|
||||
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
|
||||
if (delta_exec > ideal_runtime) {
|
||||
resched_task(rq_of(cfs_rq)->curr);
|
||||
resched_curr(rq_of(cfs_rq));
|
||||
/*
|
||||
* The current task ran long enough, ensure it doesn't get
|
||||
* re-elected due to buddy favours.
|
||||
@@ -2947,7 +2947,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
|
||||
return;
|
||||
|
||||
if (delta > ideal_runtime)
|
||||
resched_task(rq_of(cfs_rq)->curr);
|
||||
resched_curr(rq_of(cfs_rq));
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -3087,7 +3087,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||||
* validating it and just reschedule.
|
||||
*/
|
||||
if (queued) {
|
||||
resched_task(rq_of(cfs_rq)->curr);
|
||||
resched_curr(rq_of(cfs_rq));
|
||||
return;
|
||||
}
|
||||
/*
|
||||
@@ -3278,7 +3278,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
|
||||
* hierarchy can be throttled
|
||||
*/
|
||||
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
|
||||
resched_task(rq_of(cfs_rq)->curr);
|
||||
resched_curr(rq_of(cfs_rq));
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@@ -3438,7 +3438,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
|
||||
/* determine whether we need to wake up potentially idle cpu */
|
||||
if (rq->curr == rq->idle && rq->cfs.nr_running)
|
||||
resched_task(rq->curr);
|
||||
resched_curr(rq);
|
||||
}
|
||||
|
||||
static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
|
||||
@@ -3897,7 +3897,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
||||
|
||||
if (delta < 0) {
|
||||
if (rq->curr == p)
|
||||
resched_task(p);
|
||||
resched_curr(rq);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -4766,7 +4766,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||||
return;
|
||||
|
||||
preempt:
|
||||
resched_task(curr);
|
||||
resched_curr(rq);
|
||||
/*
|
||||
* Only set the backward buddy when the current task is still
|
||||
* on the rq. This can happen when a wakeup gets interleaved
|
||||
@@ -7457,7 +7457,7 @@ static void task_fork_fair(struct task_struct *p)
|
||||
* 'current' within the tree based on its new key value.
|
||||
*/
|
||||
swap(curr->vruntime, se->vruntime);
|
||||
resched_task(rq->curr);
|
||||
resched_curr(rq);
|
||||
}
|
||||
|
||||
se->vruntime -= cfs_rq->min_vruntime;
|
||||
@@ -7482,7 +7482,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
*/
|
||||
if (rq->curr == p) {
|
||||
if (p->prio > oldprio)
|
||||
resched_task(rq->curr);
|
||||
resched_curr(rq);
|
||||
} else
|
||||
check_preempt_curr(rq, p, 0);
|
||||
}
|
||||
@@ -7545,7 +7545,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
|
||||
* if we can still preempt the current task.
|
||||
*/
|
||||
if (rq->curr == p)
|
||||
resched_task(rq->curr);
|
||||
resched_curr(rq);
|
||||
else
|
||||
check_preempt_curr(rq, p, 0);
|
||||
}
|
||||
|
Reference in New Issue
Block a user