From de0ba4ea3c417b3cac2f6cef6fcbae6bde36b88d Mon Sep 17 00:00:00 2001 From: Rick Yiu Date: Tue, 16 Nov 2021 12:33:07 +0800 Subject: [PATCH] Revert "sched/pelt: Ensure that *_sum is always synced with *_avg" This reverts commit 813ff24f1d08cb4b4605fe222bb104a8dbdfd7f5. Bug: 205915994 Test: build pass Signed-off-by: Rick Yiu Change-Id: I77d9a103d71da43ce2fe9b630c7a5e12b62e8e42 --- kernel/sched/fair.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f9c570d86282..aa3d2291874d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3777,17 +3777,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s */ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { - /* - * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. - * See ___update_load_avg() for details. - */ - u32 divider = get_pelt_divider(&cfs_rq->avg); - dequeue_load_avg(cfs_rq, se); sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); - cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider; + sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); - cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider; + sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);