sched: Refactor update_shares_cpu() -> update_blocked_avgs()
Now that running entities maintain their own load-averages the work we must do in update_shares() is largely restricted to the periodic decay of blocked entities. This allows us to be a little less pessimistic regarding our occupancy on rq->lock and the associated rq->clock updates required. Signed-off-by: Paul Turner <pjt@google.com> Reviewed-by: Ben Segall <bsegall@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20120823141507.133999170@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -3639,20 +3639,15 @@ next:
|
|||||||
/*
|
/*
|
||||||
* update tg->load_weight by folding this cpu's load_avg
|
* update tg->load_weight by folding this cpu's load_avg
|
||||||
*/
|
*/
|
||||||
static int update_shares_cpu(struct task_group *tg, int cpu)
|
static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
|
||||||
{
|
{
|
||||||
struct sched_entity *se;
|
struct sched_entity *se = tg->se[cpu];
|
||||||
struct cfs_rq *cfs_rq;
|
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
|
||||||
unsigned long flags;
|
|
||||||
struct rq *rq;
|
|
||||||
|
|
||||||
rq = cpu_rq(cpu);
|
/* throttled entities do not contribute to load */
|
||||||
se = tg->se[cpu];
|
if (throttled_hierarchy(cfs_rq))
|
||||||
cfs_rq = tg->cfs_rq[cpu];
|
return;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
|
||||||
|
|
||||||
update_rq_clock(rq);
|
|
||||||
update_cfs_rq_blocked_load(cfs_rq, 1);
|
update_cfs_rq_blocked_load(cfs_rq, 1);
|
||||||
|
|
||||||
if (se) {
|
if (se) {
|
||||||
@@ -3669,32 +3664,33 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
|
|||||||
if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
|
if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
|
||||||
list_del_leaf_cfs_rq(cfs_rq);
|
list_del_leaf_cfs_rq(cfs_rq);
|
||||||
} else {
|
} else {
|
||||||
|
struct rq *rq = rq_of(cfs_rq);
|
||||||
update_rq_runnable_avg(rq, rq->nr_running);
|
update_rq_runnable_avg(rq, rq->nr_running);
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_shares(int cpu)
|
static void update_blocked_averages(int cpu)
|
||||||
{
|
{
|
||||||
struct cfs_rq *cfs_rq;
|
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
struct cfs_rq *cfs_rq;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
rcu_read_lock();
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||||
|
update_rq_clock(rq);
|
||||||
/*
|
/*
|
||||||
* Iterates the task_group tree in a bottom up fashion, see
|
* Iterates the task_group tree in a bottom up fashion, see
|
||||||
* list_add_leaf_cfs_rq() for details.
|
* list_add_leaf_cfs_rq() for details.
|
||||||
*/
|
*/
|
||||||
for_each_leaf_cfs_rq(rq, cfs_rq) {
|
for_each_leaf_cfs_rq(rq, cfs_rq) {
|
||||||
/* throttled entities do not contribute to load */
|
/*
|
||||||
if (throttled_hierarchy(cfs_rq))
|
* Note: We may want to consider periodically releasing
|
||||||
continue;
|
* rq->lock about these updates so that creating many task
|
||||||
|
* groups does not result in continually extending hold time.
|
||||||
update_shares_cpu(cfs_rq->tg, cpu);
|
*/
|
||||||
|
__update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
|
||||||
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -3746,7 +3742,7 @@ static unsigned long task_h_load(struct task_struct *p)
|
|||||||
return load;
|
return load;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void update_shares(int cpu)
|
static inline void update_blocked_averages(int cpu)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4813,7 +4809,7 @@ void idle_balance(int this_cpu, struct rq *this_rq)
|
|||||||
*/
|
*/
|
||||||
raw_spin_unlock(&this_rq->lock);
|
raw_spin_unlock(&this_rq->lock);
|
||||||
|
|
||||||
update_shares(this_cpu);
|
update_blocked_averages(this_cpu);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_domain(this_cpu, sd) {
|
for_each_domain(this_cpu, sd) {
|
||||||
unsigned long interval;
|
unsigned long interval;
|
||||||
@@ -5068,7 +5064,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
|
|||||||
int update_next_balance = 0;
|
int update_next_balance = 0;
|
||||||
int need_serialize;
|
int need_serialize;
|
||||||
|
|
||||||
update_shares(cpu);
|
update_blocked_averages(cpu);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_domain(cpu, sd) {
|
for_each_domain(cpu, sd) {
|
||||||
|
Reference in New Issue
Block a user