Merge branch 'linus' into sched/urgent, to pick up fixes and updates
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -11,61 +11,56 @@
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#include "sched.h"
|
||||
|
||||
#include <trace/events/power.h>
|
||||
|
||||
struct sugov_tunables {
|
||||
struct gov_attr_set attr_set;
|
||||
unsigned int rate_limit_us;
|
||||
struct gov_attr_set attr_set;
|
||||
unsigned int rate_limit_us;
|
||||
};
|
||||
|
||||
struct sugov_policy {
|
||||
struct cpufreq_policy *policy;
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
struct sugov_tunables *tunables;
|
||||
struct list_head tunables_hook;
|
||||
struct sugov_tunables *tunables;
|
||||
struct list_head tunables_hook;
|
||||
|
||||
raw_spinlock_t update_lock; /* For shared policies */
|
||||
u64 last_freq_update_time;
|
||||
s64 freq_update_delay_ns;
|
||||
unsigned int next_freq;
|
||||
unsigned int cached_raw_freq;
|
||||
raw_spinlock_t update_lock; /* For shared policies */
|
||||
u64 last_freq_update_time;
|
||||
s64 freq_update_delay_ns;
|
||||
unsigned int next_freq;
|
||||
unsigned int cached_raw_freq;
|
||||
|
||||
/* The next fields are only needed if fast switch cannot be used. */
|
||||
struct irq_work irq_work;
|
||||
struct kthread_work work;
|
||||
struct mutex work_lock;
|
||||
struct kthread_worker worker;
|
||||
struct task_struct *thread;
|
||||
bool work_in_progress;
|
||||
/* The next fields are only needed if fast switch cannot be used: */
|
||||
struct irq_work irq_work;
|
||||
struct kthread_work work;
|
||||
struct mutex work_lock;
|
||||
struct kthread_worker worker;
|
||||
struct task_struct *thread;
|
||||
bool work_in_progress;
|
||||
|
||||
bool need_freq_update;
|
||||
bool need_freq_update;
|
||||
};
|
||||
|
||||
struct sugov_cpu {
|
||||
struct update_util_data update_util;
|
||||
struct sugov_policy *sg_policy;
|
||||
unsigned int cpu;
|
||||
struct update_util_data update_util;
|
||||
struct sugov_policy *sg_policy;
|
||||
unsigned int cpu;
|
||||
|
||||
bool iowait_boost_pending;
|
||||
unsigned int iowait_boost;
|
||||
unsigned int iowait_boost_max;
|
||||
bool iowait_boost_pending;
|
||||
unsigned int iowait_boost;
|
||||
unsigned int iowait_boost_max;
|
||||
u64 last_update;
|
||||
|
||||
/* The fields below are only needed when sharing a policy. */
|
||||
unsigned long util_cfs;
|
||||
unsigned long util_dl;
|
||||
unsigned long max;
|
||||
unsigned int flags;
|
||||
/* The fields below are only needed when sharing a policy: */
|
||||
unsigned long util_cfs;
|
||||
unsigned long util_dl;
|
||||
unsigned long max;
|
||||
|
||||
/* The field below is for single-CPU policies only. */
|
||||
/* The field below is for single-CPU policies only: */
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
unsigned long saved_idle_calls;
|
||||
unsigned long saved_idle_calls;
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -79,9 +74,9 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
||||
|
||||
/*
|
||||
* Since cpufreq_update_util() is called with rq->lock held for
|
||||
* the @target_cpu, our per-cpu data is fully serialized.
|
||||
* the @target_cpu, our per-CPU data is fully serialized.
|
||||
*
|
||||
* However, drivers cannot in general deal with cross-cpu
|
||||
* However, drivers cannot in general deal with cross-CPU
|
||||
* requests, so while get_next_freq() will work, our
|
||||
* sugov_update_commit() call may not for the fast switching platforms.
|
||||
*
|
||||
@@ -111,6 +106,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
||||
}
|
||||
|
||||
delta_ns = time - sg_policy->last_freq_update_time;
|
||||
|
||||
return delta_ns >= sg_policy->freq_update_delay_ns;
|
||||
}
|
||||
|
||||
@@ -186,17 +182,28 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
|
||||
|
||||
static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(sg_cpu->cpu);
|
||||
unsigned long util;
|
||||
|
||||
if (rq->rt.rt_nr_running) {
|
||||
util = sg_cpu->max;
|
||||
} else {
|
||||
util = sg_cpu->util_dl;
|
||||
if (rq->cfs.h_nr_running)
|
||||
util += sg_cpu->util_cfs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ideally we would like to set util_dl as min/guaranteed freq and
|
||||
* util_cfs + util_dl as requested freq. However, cpufreq is not yet
|
||||
* ready for such an interface. So, we only do the latter for now.
|
||||
*/
|
||||
return min(sg_cpu->util_cfs + sg_cpu->util_dl, sg_cpu->max);
|
||||
return min(util, sg_cpu->max);
|
||||
}
|
||||
|
||||
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time)
|
||||
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags)
|
||||
{
|
||||
if (sg_cpu->flags & SCHED_CPUFREQ_IOWAIT) {
|
||||
if (flags & SCHED_CPUFREQ_IOWAIT) {
|
||||
if (sg_cpu->iowait_boost_pending)
|
||||
return;
|
||||
|
||||
@@ -260,43 +267,51 @@ static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
|
||||
static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
/*
|
||||
* Make sugov_should_update_freq() ignore the rate limit when DL
|
||||
* has increased the utilization.
|
||||
*/
|
||||
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
|
||||
{
|
||||
if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl)
|
||||
sg_policy->need_freq_update = true;
|
||||
}
|
||||
|
||||
static void sugov_update_single(struct update_util_data *hook, u64 time,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
||||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
struct cpufreq_policy *policy = sg_policy->policy;
|
||||
unsigned long util, max;
|
||||
unsigned int next_f;
|
||||
bool busy;
|
||||
|
||||
sugov_set_iowait_boost(sg_cpu, time);
|
||||
sugov_set_iowait_boost(sg_cpu, time, flags);
|
||||
sg_cpu->last_update = time;
|
||||
|
||||
ignore_dl_rate_limit(sg_cpu, sg_policy);
|
||||
|
||||
if (!sugov_should_update_freq(sg_policy, time))
|
||||
return;
|
||||
|
||||
busy = sugov_cpu_is_busy(sg_cpu);
|
||||
|
||||
if (flags & SCHED_CPUFREQ_RT) {
|
||||
next_f = policy->cpuinfo.max_freq;
|
||||
} else {
|
||||
sugov_get_util(sg_cpu);
|
||||
max = sg_cpu->max;
|
||||
util = sugov_aggregate_util(sg_cpu);
|
||||
sugov_iowait_boost(sg_cpu, &util, &max);
|
||||
next_f = get_next_freq(sg_policy, util, max);
|
||||
/*
|
||||
* Do not reduce the frequency if the CPU has not been idle
|
||||
* recently, as the reduction is likely to be premature then.
|
||||
*/
|
||||
if (busy && next_f < sg_policy->next_freq) {
|
||||
next_f = sg_policy->next_freq;
|
||||
sugov_get_util(sg_cpu);
|
||||
max = sg_cpu->max;
|
||||
util = sugov_aggregate_util(sg_cpu);
|
||||
sugov_iowait_boost(sg_cpu, &util, &max);
|
||||
next_f = get_next_freq(sg_policy, util, max);
|
||||
/*
|
||||
* Do not reduce the frequency if the CPU has not been idle
|
||||
* recently, as the reduction is likely to be premature then.
|
||||
*/
|
||||
if (busy && next_f < sg_policy->next_freq) {
|
||||
next_f = sg_policy->next_freq;
|
||||
|
||||
/* Reset cached freq as next_freq has changed */
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
}
|
||||
/* Reset cached freq as next_freq has changed */
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
}
|
||||
|
||||
sugov_update_commit(sg_policy, time, next_f);
|
||||
}
|
||||
|
||||
@@ -312,6 +327,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
||||
unsigned long j_util, j_max;
|
||||
s64 delta_ns;
|
||||
|
||||
sugov_get_util(j_sg_cpu);
|
||||
|
||||
/*
|
||||
* If the CFS CPU utilization was last updated before the
|
||||
* previous frequency update and the time elapsed between the
|
||||
@@ -325,28 +342,22 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
||||
if (delta_ns > TICK_NSEC) {
|
||||
j_sg_cpu->iowait_boost = 0;
|
||||
j_sg_cpu->iowait_boost_pending = false;
|
||||
j_sg_cpu->util_cfs = 0;
|
||||
if (j_sg_cpu->util_dl == 0)
|
||||
continue;
|
||||
}
|
||||
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT)
|
||||
return policy->cpuinfo.max_freq;
|
||||
|
||||
j_max = j_sg_cpu->max;
|
||||
j_util = sugov_aggregate_util(j_sg_cpu);
|
||||
sugov_iowait_boost(j_sg_cpu, &j_util, &j_max);
|
||||
if (j_util * max > j_max * util) {
|
||||
util = j_util;
|
||||
max = j_max;
|
||||
}
|
||||
|
||||
sugov_iowait_boost(j_sg_cpu, &util, &max);
|
||||
}
|
||||
|
||||
return get_next_freq(sg_policy, util, max);
|
||||
}
|
||||
|
||||
static void sugov_update_shared(struct update_util_data *hook, u64 time,
|
||||
unsigned int flags)
|
||||
static void
|
||||
sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
|
||||
{
|
||||
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
||||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
@@ -354,18 +365,13 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
|
||||
|
||||
raw_spin_lock(&sg_policy->update_lock);
|
||||
|
||||
sugov_get_util(sg_cpu);
|
||||
sg_cpu->flags = flags;
|
||||
|
||||
sugov_set_iowait_boost(sg_cpu, time);
|
||||
sugov_set_iowait_boost(sg_cpu, time, flags);
|
||||
sg_cpu->last_update = time;
|
||||
|
||||
if (sugov_should_update_freq(sg_policy, time)) {
|
||||
if (flags & SCHED_CPUFREQ_RT)
|
||||
next_f = sg_policy->policy->cpuinfo.max_freq;
|
||||
else
|
||||
next_f = sugov_next_freq_shared(sg_cpu, time);
|
||||
ignore_dl_rate_limit(sg_cpu, sg_policy);
|
||||
|
||||
if (sugov_should_update_freq(sg_policy, time)) {
|
||||
next_f = sugov_next_freq_shared(sg_cpu, time);
|
||||
sugov_update_commit(sg_policy, time, next_f);
|
||||
}
|
||||
|
||||
@@ -423,8 +429,8 @@ static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
|
||||
return sprintf(buf, "%u\n", tunables->rate_limit_us);
|
||||
}
|
||||
|
||||
static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
|
||||
size_t count)
|
||||
static ssize_t
|
||||
rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
|
||||
{
|
||||
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
||||
struct sugov_policy *sg_policy;
|
||||
@@ -479,11 +485,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
|
||||
{
|
||||
struct task_struct *thread;
|
||||
struct sched_attr attr = {
|
||||
.size = sizeof(struct sched_attr),
|
||||
.sched_policy = SCHED_DEADLINE,
|
||||
.sched_flags = SCHED_FLAG_SUGOV,
|
||||
.sched_nice = 0,
|
||||
.sched_priority = 0,
|
||||
.size = sizeof(struct sched_attr),
|
||||
.sched_policy = SCHED_DEADLINE,
|
||||
.sched_flags = SCHED_FLAG_SUGOV,
|
||||
.sched_nice = 0,
|
||||
.sched_priority = 0,
|
||||
/*
|
||||
* Fake (unused) bandwidth; workaround to "fix"
|
||||
* priority inheritance.
|
||||
@@ -662,21 +668,20 @@ static int sugov_start(struct cpufreq_policy *policy)
|
||||
struct sugov_policy *sg_policy = policy->governor_data;
|
||||
unsigned int cpu;
|
||||
|
||||
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
|
||||
sg_policy->last_freq_update_time = 0;
|
||||
sg_policy->next_freq = UINT_MAX;
|
||||
sg_policy->work_in_progress = false;
|
||||
sg_policy->need_freq_update = false;
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
|
||||
sg_policy->last_freq_update_time = 0;
|
||||
sg_policy->next_freq = UINT_MAX;
|
||||
sg_policy->work_in_progress = false;
|
||||
sg_policy->need_freq_update = false;
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
|
||||
for_each_cpu(cpu, policy->cpus) {
|
||||
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
||||
|
||||
memset(sg_cpu, 0, sizeof(*sg_cpu));
|
||||
sg_cpu->cpu = cpu;
|
||||
sg_cpu->sg_policy = sg_policy;
|
||||
sg_cpu->flags = 0;
|
||||
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
|
||||
sg_cpu->cpu = cpu;
|
||||
sg_cpu->sg_policy = sg_policy;
|
||||
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
|
||||
}
|
||||
|
||||
for_each_cpu(cpu, policy->cpus) {
|
||||
@@ -720,14 +725,14 @@ static void sugov_limits(struct cpufreq_policy *policy)
|
||||
}
|
||||
|
||||
static struct cpufreq_governor schedutil_gov = {
|
||||
.name = "schedutil",
|
||||
.owner = THIS_MODULE,
|
||||
.dynamic_switching = true,
|
||||
.init = sugov_init,
|
||||
.exit = sugov_exit,
|
||||
.start = sugov_start,
|
||||
.stop = sugov_stop,
|
||||
.limits = sugov_limits,
|
||||
.name = "schedutil",
|
||||
.owner = THIS_MODULE,
|
||||
.dynamic_switching = true,
|
||||
.init = sugov_init,
|
||||
.exit = sugov_exit,
|
||||
.start = sugov_start,
|
||||
.stop = sugov_stop,
|
||||
.limits = sugov_limits,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
|
||||
|
Reference in New Issue
Block a user