Merge branch 'sched/urgent' into sched/core, to pick up fixes and resolve conflicts
Conflicts: kernel/sched/fair.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -2370,7 +2370,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
|
||||
*/
|
||||
tg_weight = atomic_long_read(&tg->load_avg);
|
||||
tg_weight -= cfs_rq->tg_load_avg_contrib;
|
||||
tg_weight += cfs_rq_load_avg(cfs_rq);
|
||||
tg_weight += cfs_rq->load.weight;
|
||||
|
||||
return tg_weight;
|
||||
}
|
||||
@@ -2380,7 +2380,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
|
||||
long tg_weight, load, shares;
|
||||
|
||||
tg_weight = calc_tg_weight(tg, cfs_rq);
|
||||
load = cfs_rq_load_avg(cfs_rq);
|
||||
load = cfs_rq->load.weight;
|
||||
|
||||
shares = (tg->shares * load);
|
||||
if (tg_weight)
|
||||
@@ -2686,12 +2686,13 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
|
||||
static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct sched_avg *sa = &cfs_rq->avg;
|
||||
int decayed;
|
||||
int decayed, removed = 0;
|
||||
|
||||
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
|
||||
long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
|
||||
sa->load_avg = max_t(long, sa->load_avg - r, 0);
|
||||
sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
|
||||
removed = 1;
|
||||
}
|
||||
|
||||
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
|
||||
@@ -2708,7 +2709,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
cfs_rq->load_last_update_time_copy = sa->last_update_time;
|
||||
#endif
|
||||
|
||||
return decayed;
|
||||
return decayed || removed;
|
||||
}
|
||||
|
||||
/* Update task and its cfs_rq load average */
|
||||
|
Reference in New Issue
Block a user