sched: group scheduling, change how cpu load is calculated
This patch changes how the cpu load exerted by fair_sched_class tasks is calculated. Load exerted by fair_sched_class tasks on a cpu is now a summation of the group weights, rather than summation of task weights. Weight exerted by a group on a cpu is dependent on the shares allocated to it. This version of patch has a minor impact on code size, but should have no runtime/functional impact for !CONFIG_FAIR_GROUP_SCHED. Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:

committed by
Ingo Molnar

parent
ec2c507fe8
commit
58e2d4ca58
@@ -886,6 +886,16 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
|
||||
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
|
||||
#endif
|
||||
|
||||
static inline void inc_cpu_load(struct rq *rq, unsigned long load)
|
||||
{
|
||||
update_load_add(&rq->load, load);
|
||||
}
|
||||
|
||||
static inline void dec_cpu_load(struct rq *rq, unsigned long load)
|
||||
{
|
||||
update_load_sub(&rq->load, load);
|
||||
}
|
||||
|
||||
#include "sched_stats.h"
|
||||
#include "sched_idletask.c"
|
||||
#include "sched_fair.c"
|
||||
@@ -896,26 +906,14 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
|
||||
|
||||
#define sched_class_highest (&rt_sched_class)
|
||||
|
||||
static inline void inc_load(struct rq *rq, const struct task_struct *p)
|
||||
{
|
||||
update_load_add(&rq->load, p->se.load.weight);
|
||||
}
|
||||
|
||||
static inline void dec_load(struct rq *rq, const struct task_struct *p)
|
||||
{
|
||||
update_load_sub(&rq->load, p->se.load.weight);
|
||||
}
|
||||
|
||||
static void inc_nr_running(struct task_struct *p, struct rq *rq)
|
||||
{
|
||||
rq->nr_running++;
|
||||
inc_load(rq, p);
|
||||
}
|
||||
|
||||
static void dec_nr_running(struct task_struct *p, struct rq *rq)
|
||||
{
|
||||
rq->nr_running--;
|
||||
dec_load(rq, p);
|
||||
}
|
||||
|
||||
static void set_load_weight(struct task_struct *p)
|
||||
@@ -4087,10 +4085,8 @@ void set_user_nice(struct task_struct *p, long nice)
|
||||
goto out_unlock;
|
||||
}
|
||||
on_rq = p->se.on_rq;
|
||||
if (on_rq) {
|
||||
if (on_rq)
|
||||
dequeue_task(rq, p, 0);
|
||||
dec_load(rq, p);
|
||||
}
|
||||
|
||||
p->static_prio = NICE_TO_PRIO(nice);
|
||||
set_load_weight(p);
|
||||
@@ -4100,7 +4096,6 @@ void set_user_nice(struct task_struct *p, long nice)
|
||||
|
||||
if (on_rq) {
|
||||
enqueue_task(rq, p, 0);
|
||||
inc_load(rq, p);
|
||||
/*
|
||||
* If the task increased its priority or is running and
|
||||
* lowered its priority, then reschedule its CPU:
|
||||
|
Reference in New Issue
Block a user