sched: uninline scheduler
* save ~300 bytes * activate_idle_task() was moved to avoid a warning bloat-o-meter output: add/remove: 6/0 grow/shrink: 0/16 up/down: 438/-733 (-295) <=== function old new delta __enqueue_entity - 165 +165 finish_task_switch - 110 +110 update_curr_rt - 79 +79 __load_balance_iterator - 32 +32 __task_rq_unlock - 28 +28 find_process_by_pid - 24 +24 do_sched_setscheduler 133 123 -10 sys_sched_rr_get_interval 176 165 -11 sys_sched_getparam 156 145 -11 normalize_rt_tasks 482 470 -12 sched_getaffinity 112 99 -13 sys_sched_getscheduler 86 72 -14 sched_setaffinity 226 212 -14 sched_setscheduler 666 642 -24 load_balance_start_fair 33 9 -24 load_balance_next_fair 33 9 -24 dequeue_task_rt 133 67 -66 put_prev_task_rt 97 28 -69 schedule_tail 133 50 -83 schedule 682 594 -88 enqueue_entity 499 366 -133 task_new_fair 317 180 -137 Signed-off-by: Alexey Dobriyan <adobriyan@sw.ru> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:

committed by
Ingo Molnar

parent
155bb293ae
commit
a9957449b0
@@ -892,7 +892,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
|
||||
* achieve that by always pre-iterating before returning
|
||||
* the current task:
|
||||
*/
|
||||
static inline struct task_struct *
|
||||
static struct task_struct *
|
||||
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
Reference in New Issue
Block a user