sched/uclamp: Fix fits_capacity() check in feec()
commit 244226035a1f9b2b6c326e55ae5188fab4f428cb upstream.
As reported by Yun Hsiang [1], if a task has its uclamp_min >= 0.8 * 1024,
it'll always pick the previous CPU because fits_capacity() will always
return false in this case.
The new util_fits_cpu() logic should handle this correctly for us beside
more corner cases where similar failures could occur, like when using
UCLAMP_MAX.
We open code uclamp_rq_util_with() except for the clamp() part,
util_fits_cpu() needs the 'raw' values to be passed to it.
Also introduce uclamp_rq_{set, get}() shorthand accessors to get uclamp
value for the rq. Makes the code more readable and ensures the right
rules (use READ_ONCE/WRITE_ONCE) are respected transparently.
[1] https://lists.linaro.org/pipermail/eas-dev/2020-July/001488.html
Fixes: 1d42509e47
("sched/fair: Make EAS wakeup placement consider uclamp restrictions")
Reported-by: Yun Hsiang <hsiang023167@gmail.com>
Signed-off-by: Qais Yousef <qais.yousef@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220804143609.515789-4-qais.yousef@arm.com
(cherry picked from commit 244226035a1f9b2b6c326e55ae5188fab4f428cb)
[Fix trivial conflict in kernel/sched/fair.c due to new automatic
variables in master vs 5.10]
Signed-off-by: Qais Yousef (Google) <qyousef@layalina.io>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
5cb1a56ced
commit
8ca2bf63d9
@@ -980,7 +980,7 @@ static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
|
||||
if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
|
||||
return;
|
||||
|
||||
WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
|
||||
uclamp_rq_set(rq, clamp_id, clamp_value);
|
||||
}
|
||||
|
||||
static inline
|
||||
@@ -1158,8 +1158,8 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
|
||||
if (bucket->tasks == 1 || uc_se->value > bucket->value)
|
||||
bucket->value = uc_se->value;
|
||||
|
||||
if (uc_se->value > READ_ONCE(uc_rq->value))
|
||||
WRITE_ONCE(uc_rq->value, uc_se->value);
|
||||
if (uc_se->value > uclamp_rq_get(rq, clamp_id))
|
||||
uclamp_rq_set(rq, clamp_id, uc_se->value);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1225,7 +1225,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
|
||||
if (likely(bucket->tasks))
|
||||
return;
|
||||
|
||||
rq_clamp = READ_ONCE(uc_rq->value);
|
||||
rq_clamp = uclamp_rq_get(rq, clamp_id);
|
||||
/*
|
||||
* Defensive programming: this should never happen. If it happens,
|
||||
* e.g. due to future modification, warn and fixup the expected value.
|
||||
@@ -1233,7 +1233,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
|
||||
SCHED_WARN_ON(bucket->value > rq_clamp);
|
||||
if (bucket->value >= rq_clamp) {
|
||||
bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
|
||||
WRITE_ONCE(uc_rq->value, bkt_clamp);
|
||||
uclamp_rq_set(rq, clamp_id, bkt_clamp);
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user