sched/pelt: Remove unused runnable load average
Now that runnable_load_avg is no more used, we can remove it to make space for a new signal. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: "Dietmar Eggemann <dietmar.eggemann@arm.com>" Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Valentin Schneider <valentin.schneider@arm.com> Cc: Phil Auld <pauld@redhat.com> Cc: Hillf Danton <hdanton@sina.com> Link: https://lore.kernel.org/r/20200224095223.13361-8-mgorman@techsingularity.net
This commit is contained in:

committed by
Ingo Molnar

parent
fb86f5b211
commit
0dacee1bfa
@@ -741,9 +741,7 @@ void init_entity_runnable_average(struct sched_entity *se)
|
||||
* nothing has been attached to the task group yet.
|
||||
*/
|
||||
if (entity_is_task(se))
|
||||
sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight);
|
||||
|
||||
se->runnable_weight = se->load.weight;
|
||||
sa->load_avg = scale_load_down(se->load.weight);
|
||||
|
||||
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
|
||||
}
|
||||
@@ -2898,25 +2896,6 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void
|
||||
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
cfs_rq->runnable_weight += se->runnable_weight;
|
||||
|
||||
cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
|
||||
cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
cfs_rq->runnable_weight -= se->runnable_weight;
|
||||
|
||||
sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
|
||||
sub_positive(&cfs_rq->avg.runnable_load_sum,
|
||||
se_runnable(se) * se->avg.runnable_load_sum);
|
||||
}
|
||||
|
||||
static inline void
|
||||
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
@@ -2932,28 +2911,22 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
|
||||
static inline void
|
||||
dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
|
||||
static inline void
|
||||
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
|
||||
static inline void
|
||||
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
|
||||
#endif
|
||||
|
||||
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||
unsigned long weight, unsigned long runnable)
|
||||
unsigned long weight)
|
||||
{
|
||||
if (se->on_rq) {
|
||||
/* commit outstanding execution time */
|
||||
if (cfs_rq->curr == se)
|
||||
update_curr(cfs_rq);
|
||||
account_entity_dequeue(cfs_rq, se);
|
||||
dequeue_runnable_load_avg(cfs_rq, se);
|
||||
}
|
||||
dequeue_load_avg(cfs_rq, se);
|
||||
|
||||
se->runnable_weight = runnable;
|
||||
update_load_set(&se->load, weight);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -2961,16 +2934,13 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||
u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
|
||||
|
||||
se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
|
||||
se->avg.runnable_load_avg =
|
||||
div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
|
||||
} while (0);
|
||||
#endif
|
||||
|
||||
enqueue_load_avg(cfs_rq, se);
|
||||
if (se->on_rq) {
|
||||
if (se->on_rq)
|
||||
account_entity_enqueue(cfs_rq, se);
|
||||
enqueue_runnable_load_avg(cfs_rq, se);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void reweight_task(struct task_struct *p, int prio)
|
||||
@@ -2980,7 +2950,7 @@ void reweight_task(struct task_struct *p, int prio)
|
||||
struct load_weight *load = &se->load;
|
||||
unsigned long weight = scale_load(sched_prio_to_weight[prio]);
|
||||
|
||||
reweight_entity(cfs_rq, se, weight, weight);
|
||||
reweight_entity(cfs_rq, se, weight);
|
||||
load->inv_weight = sched_prio_to_wmult[prio];
|
||||
}
|
||||
|
||||
@@ -3092,50 +3062,6 @@ static long calc_group_shares(struct cfs_rq *cfs_rq)
|
||||
*/
|
||||
return clamp_t(long, shares, MIN_SHARES, tg_shares);
|
||||
}
|
||||
|
||||
/*
|
||||
* This calculates the effective runnable weight for a group entity based on
|
||||
* the group entity weight calculated above.
|
||||
*
|
||||
* Because of the above approximation (2), our group entity weight is
|
||||
* an load_avg based ratio (3). This means that it includes blocked load and
|
||||
* does not represent the runnable weight.
|
||||
*
|
||||
* Approximate the group entity's runnable weight per ratio from the group
|
||||
* runqueue:
|
||||
*
|
||||
* grq->avg.runnable_load_avg
|
||||
* ge->runnable_weight = ge->load.weight * -------------------------- (7)
|
||||
* grq->avg.load_avg
|
||||
*
|
||||
* However, analogous to above, since the avg numbers are slow, this leads to
|
||||
* transients in the from-idle case. Instead we use:
|
||||
*
|
||||
* ge->runnable_weight = ge->load.weight *
|
||||
*
|
||||
* max(grq->avg.runnable_load_avg, grq->runnable_weight)
|
||||
* ----------------------------------------------------- (8)
|
||||
* max(grq->avg.load_avg, grq->load.weight)
|
||||
*
|
||||
* Where these max() serve both to use the 'instant' values to fix the slow
|
||||
* from-idle and avoid the /0 on to-idle, similar to (6).
|
||||
*/
|
||||
static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
|
||||
{
|
||||
long runnable, load_avg;
|
||||
|
||||
load_avg = max(cfs_rq->avg.load_avg,
|
||||
scale_load_down(cfs_rq->load.weight));
|
||||
|
||||
runnable = max(cfs_rq->avg.runnable_load_avg,
|
||||
scale_load_down(cfs_rq->runnable_weight));
|
||||
|
||||
runnable *= shares;
|
||||
if (load_avg)
|
||||
runnable /= load_avg;
|
||||
|
||||
return clamp_t(long, runnable, MIN_SHARES, shares);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
|
||||
@@ -3147,7 +3073,7 @@ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
|
||||
static void update_cfs_group(struct sched_entity *se)
|
||||
{
|
||||
struct cfs_rq *gcfs_rq = group_cfs_rq(se);
|
||||
long shares, runnable;
|
||||
long shares;
|
||||
|
||||
if (!gcfs_rq)
|
||||
return;
|
||||
@@ -3156,16 +3082,15 @@ static void update_cfs_group(struct sched_entity *se)
|
||||
return;
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
runnable = shares = READ_ONCE(gcfs_rq->tg->shares);
|
||||
shares = READ_ONCE(gcfs_rq->tg->shares);
|
||||
|
||||
if (likely(se->load.weight == shares))
|
||||
return;
|
||||
#else
|
||||
shares = calc_group_shares(gcfs_rq);
|
||||
runnable = calc_group_runnable(gcfs_rq, shares);
|
||||
#endif
|
||||
|
||||
reweight_entity(cfs_rq_of(se), se, shares, runnable);
|
||||
reweight_entity(cfs_rq_of(se), se, shares);
|
||||
}
|
||||
|
||||
#else /* CONFIG_FAIR_GROUP_SCHED */
|
||||
@@ -3290,11 +3215,11 @@ void set_task_rq_fair(struct sched_entity *se,
|
||||
* _IFF_ we look at the pure running and runnable sums. Because they
|
||||
* represent the very same entity, just at different points in the hierarchy.
|
||||
*
|
||||
* Per the above update_tg_cfs_util() is trivial and simply copies the running
|
||||
* sum over (but still wrong, because the group entity and group rq do not have
|
||||
* their PELT windows aligned).
|
||||
* Per the above update_tg_cfs_util() is trivial * and simply copies the
|
||||
* running sum over (but still wrong, because the group entity and group rq do
|
||||
* not have their PELT windows aligned).
|
||||
*
|
||||
* However, update_tg_cfs_runnable() is more complex. So we have:
|
||||
* However, update_tg_cfs_load() is more complex. So we have:
|
||||
*
|
||||
* ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
|
||||
*
|
||||
@@ -3375,11 +3300,11 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
|
||||
}
|
||||
|
||||
static inline void
|
||||
update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
|
||||
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
|
||||
{
|
||||
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
|
||||
unsigned long runnable_load_avg, load_avg;
|
||||
u64 runnable_load_sum, load_sum = 0;
|
||||
unsigned long load_avg;
|
||||
u64 load_sum = 0;
|
||||
s64 delta_sum;
|
||||
|
||||
if (!runnable_sum)
|
||||
@@ -3427,20 +3352,6 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
|
||||
se->avg.load_avg = load_avg;
|
||||
add_positive(&cfs_rq->avg.load_avg, delta_avg);
|
||||
add_positive(&cfs_rq->avg.load_sum, delta_sum);
|
||||
|
||||
runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
|
||||
runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
|
||||
|
||||
if (se->on_rq) {
|
||||
delta_sum = runnable_load_sum -
|
||||
se_weight(se) * se->avg.runnable_load_sum;
|
||||
delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
|
||||
add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
|
||||
add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
|
||||
}
|
||||
|
||||
se->avg.runnable_load_sum = runnable_sum;
|
||||
se->avg.runnable_load_avg = runnable_load_avg;
|
||||
}
|
||||
|
||||
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
|
||||
@@ -3468,7 +3379,7 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
|
||||
add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
|
||||
|
||||
update_tg_cfs_util(cfs_rq, se, gcfs_rq);
|
||||
update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
|
||||
update_tg_cfs_load(cfs_rq, se, gcfs_rq);
|
||||
|
||||
trace_pelt_cfs_tp(cfs_rq);
|
||||
trace_pelt_se_tp(se);
|
||||
@@ -3612,8 +3523,6 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||
div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
|
||||
}
|
||||
|
||||
se->avg.runnable_load_sum = se->avg.load_sum;
|
||||
|
||||
enqueue_load_avg(cfs_rq, se);
|
||||
cfs_rq->avg.util_avg += se->avg.util_avg;
|
||||
cfs_rq->avg.util_sum += se->avg.util_sum;
|
||||
@@ -4074,14 +3983,12 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
/*
|
||||
* When enqueuing a sched_entity, we must:
|
||||
* - Update loads to have both entity and cfs_rq synced with now.
|
||||
* - Add its load to cfs_rq->runnable_avg
|
||||
* - For group_entity, update its weight to reflect the new share of
|
||||
* its group cfs_rq
|
||||
* - Add its new weight to cfs_rq->load.weight
|
||||
*/
|
||||
update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
|
||||
update_cfs_group(se);
|
||||
enqueue_runnable_load_avg(cfs_rq, se);
|
||||
account_entity_enqueue(cfs_rq, se);
|
||||
|
||||
if (flags & ENQUEUE_WAKEUP)
|
||||
@@ -4158,13 +4065,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
/*
|
||||
* When dequeuing a sched_entity, we must:
|
||||
* - Update loads to have both entity and cfs_rq synced with now.
|
||||
* - Subtract its load from the cfs_rq->runnable_avg.
|
||||
* - Subtract its previous weight from cfs_rq->load.weight.
|
||||
* - For group entity, update its weight to reflect the new share
|
||||
* of its group cfs_rq.
|
||||
*/
|
||||
update_load_avg(cfs_rq, se, UPDATE_TG);
|
||||
dequeue_runnable_load_avg(cfs_rq, se);
|
||||
|
||||
update_stats_dequeue(cfs_rq, se, flags);
|
||||
|
||||
@@ -7649,9 +7554,6 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
|
||||
if (cfs_rq->avg.util_sum)
|
||||
return false;
|
||||
|
||||
if (cfs_rq->avg.runnable_load_sum)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user