sched/fair: Convert arch_scale_cpu_capacity() from weak function to #define
Bring arch_scale_cpu_capacity() in line with the recent change of its
arch_scale_freq_capacity() sibling in commit dfbca41f34
("sched:
Optimize freq invariant accounting") from weak function to #define to
allow inlining of the function.
While at it, remove the ARCH_CAPACITY sched_feature as well. With the
change to #define there isn't a straightforward way to allow runtime
switch between an arch implementation and the default implementation of
arch_scale_cpu_capacity() using sched_feature. The default was to use
the arch-specific implementation, but only the arm architecture provides
one and that is essentially equivalent to the default implementation.
Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Dietmar Eggemann <Dietmar.Eggemann@arm.com>
Cc: Juri Lelli <Juri.Lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: daniel.lezcano@linaro.org
Cc: mturquette@baylibre.com
Cc: pang.xunlei@zte.com.cn
Cc: rjw@rjwysocki.net
Cc: sgurrappadi@nvidia.com
Cc: vincent.guittot@linaro.org
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1439569394-11974-3-git-send-email-morten.rasmussen@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Šī revīzija ir iekļauta:

revīziju iesūtīja
Ingo Molnar

vecāks
e0f5f3afd2
revīzija
8cd5601c50
@@ -6054,19 +6054,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
||||
return load_idx;
|
||||
}
|
||||
|
||||
static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
|
||||
return sd->smt_gain / sd->span_weight;
|
||||
|
||||
return SCHED_CAPACITY_SCALE;
|
||||
}
|
||||
|
||||
unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
return default_scale_cpu_capacity(sd, cpu);
|
||||
}
|
||||
|
||||
static unsigned long scale_rt_capacity(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@@ -6096,16 +6083,9 @@ static unsigned long scale_rt_capacity(int cpu)
|
||||
|
||||
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
unsigned long capacity = SCHED_CAPACITY_SCALE;
|
||||
unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
|
||||
struct sched_group *sdg = sd->groups;
|
||||
|
||||
if (sched_feat(ARCH_CAPACITY))
|
||||
capacity *= arch_scale_cpu_capacity(sd, cpu);
|
||||
else
|
||||
capacity *= default_scale_cpu_capacity(sd, cpu);
|
||||
|
||||
capacity >>= SCHED_CAPACITY_SHIFT;
|
||||
|
||||
cpu_rq(cpu)->cpu_capacity_orig = capacity;
|
||||
|
||||
capacity *= scale_rt_capacity(cpu);
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user