Merge tag 'v4.20-rc5' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -2880,6 +2880,18 @@ unsigned long long nr_context_switches(void)
|
||||
return sum;
|
||||
}
|
||||
|
||||
/*
|
||||
* Consumers of these two interfaces, like for example the cpuidle menu
|
||||
* governor, are using nonsensical data. Preferring shallow idle state selection
|
||||
* for a CPU that has IO-wait which might not even end up running the task when
|
||||
* it does become runnable.
|
||||
*/
|
||||
|
||||
unsigned long nr_iowait_cpu(int cpu)
|
||||
{
|
||||
return atomic_read(&cpu_rq(cpu)->nr_iowait);
|
||||
}
|
||||
|
||||
/*
|
||||
* IO-wait accounting, and how its mostly bollocks (on SMP).
|
||||
*
|
||||
@@ -2915,31 +2927,11 @@ unsigned long nr_iowait(void)
|
||||
unsigned long i, sum = 0;
|
||||
|
||||
for_each_possible_cpu(i)
|
||||
sum += atomic_read(&cpu_rq(i)->nr_iowait);
|
||||
sum += nr_iowait_cpu(i);
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
/*
|
||||
* Consumers of these two interfaces, like for example the cpuidle menu
|
||||
* governor, are using nonsensical data. Preferring shallow idle state selection
|
||||
* for a CPU that has IO-wait which might not even end up running the task when
|
||||
* it does become runnable.
|
||||
*/
|
||||
|
||||
unsigned long nr_iowait_cpu(int cpu)
|
||||
{
|
||||
struct rq *this = cpu_rq(cpu);
|
||||
return atomic_read(&this->nr_iowait);
|
||||
}
|
||||
|
||||
void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
|
||||
{
|
||||
struct rq *rq = this_rq();
|
||||
*nr_waiters = atomic_read(&rq->nr_iowait);
|
||||
*load = rq->load.weight;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
@@ -5746,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
/*
|
||||
* The sched_smt_present static key needs to be evaluated on every
|
||||
* hotplug event because at boot time SMT might be disabled when
|
||||
* the number of booted CPUs is limited.
|
||||
*
|
||||
* If then later a sibling gets hotplugged, then the key would stay
|
||||
* off and SMT scheduling would never be functional.
|
||||
* When going up, increment the number of cores with SMT present.
|
||||
*/
|
||||
if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
|
||||
static_branch_enable_cpuslocked(&sched_smt_present);
|
||||
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
|
||||
static_branch_inc_cpuslocked(&sched_smt_present);
|
||||
#endif
|
||||
set_cpu_active(cpu, true);
|
||||
|
||||
@@ -5798,6 +5785,14 @@ int sched_cpu_deactivate(unsigned int cpu)
|
||||
*/
|
||||
synchronize_rcu_mult(call_rcu, call_rcu_sched);
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
/*
|
||||
* When going down, decrement the number of cores with SMT present.
|
||||
*/
|
||||
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
|
||||
static_branch_dec_cpuslocked(&sched_smt_present);
|
||||
#endif
|
||||
|
||||
if (!sched_smp_initialized)
|
||||
return 0;
|
||||
|
||||
|
@@ -136,8 +136,18 @@
|
||||
|
||||
static int psi_bug __read_mostly;
|
||||
|
||||
bool psi_disabled __read_mostly;
|
||||
core_param(psi_disabled, psi_disabled, bool, 0644);
|
||||
DEFINE_STATIC_KEY_FALSE(psi_disabled);
|
||||
|
||||
#ifdef CONFIG_PSI_DEFAULT_DISABLED
|
||||
bool psi_enable;
|
||||
#else
|
||||
bool psi_enable = true;
|
||||
#endif
|
||||
static int __init setup_psi(char *str)
|
||||
{
|
||||
return kstrtobool(str, &psi_enable) == 0;
|
||||
}
|
||||
__setup("psi=", setup_psi);
|
||||
|
||||
/* Running averages - we need to be higher-res than loadavg */
|
||||
#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
|
||||
@@ -169,8 +179,10 @@ static void group_init(struct psi_group *group)
|
||||
|
||||
void __init psi_init(void)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (!psi_enable) {
|
||||
static_branch_enable(&psi_disabled);
|
||||
return;
|
||||
}
|
||||
|
||||
psi_period = jiffies_to_nsecs(PSI_FREQ);
|
||||
group_init(&psi_system);
|
||||
@@ -549,7 +561,7 @@ void psi_memstall_enter(unsigned long *flags)
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
*flags = current->flags & PF_MEMSTALL;
|
||||
@@ -579,7 +591,7 @@ void psi_memstall_leave(unsigned long *flags)
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
if (*flags)
|
||||
@@ -600,7 +612,7 @@ void psi_memstall_leave(unsigned long *flags)
|
||||
#ifdef CONFIG_CGROUPS
|
||||
int psi_cgroup_alloc(struct cgroup *cgroup)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return 0;
|
||||
|
||||
cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
|
||||
@@ -612,7 +624,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
|
||||
|
||||
void psi_cgroup_free(struct cgroup *cgroup)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&cgroup->psi.clock_work);
|
||||
@@ -633,38 +645,39 @@ void psi_cgroup_free(struct cgroup *cgroup)
|
||||
*/
|
||||
void cgroup_move_task(struct task_struct *task, struct css_set *to)
|
||||
{
|
||||
bool move_psi = !psi_disabled;
|
||||
unsigned int task_flags = 0;
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
|
||||
if (move_psi) {
|
||||
rq = task_rq_lock(task, &rf);
|
||||
|
||||
if (task_on_rq_queued(task))
|
||||
task_flags = TSK_RUNNING;
|
||||
else if (task->in_iowait)
|
||||
task_flags = TSK_IOWAIT;
|
||||
|
||||
if (task->flags & PF_MEMSTALL)
|
||||
task_flags |= TSK_MEMSTALL;
|
||||
|
||||
if (task_flags)
|
||||
psi_task_change(task, task_flags, 0);
|
||||
if (static_branch_likely(&psi_disabled)) {
|
||||
/*
|
||||
* Lame to do this here, but the scheduler cannot be locked
|
||||
* from the outside, so we move cgroups from inside sched/.
|
||||
*/
|
||||
rcu_assign_pointer(task->cgroups, to);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lame to do this here, but the scheduler cannot be locked
|
||||
* from the outside, so we move cgroups from inside sched/.
|
||||
*/
|
||||
rq = task_rq_lock(task, &rf);
|
||||
|
||||
if (task_on_rq_queued(task))
|
||||
task_flags = TSK_RUNNING;
|
||||
else if (task->in_iowait)
|
||||
task_flags = TSK_IOWAIT;
|
||||
|
||||
if (task->flags & PF_MEMSTALL)
|
||||
task_flags |= TSK_MEMSTALL;
|
||||
|
||||
if (task_flags)
|
||||
psi_task_change(task, task_flags, 0);
|
||||
|
||||
/* See comment above */
|
||||
rcu_assign_pointer(task->cgroups, to);
|
||||
|
||||
if (move_psi) {
|
||||
if (task_flags)
|
||||
psi_task_change(task, 0, task_flags);
|
||||
if (task_flags)
|
||||
psi_task_change(task, 0, task_flags);
|
||||
|
||||
task_rq_unlock(rq, task, &rf);
|
||||
}
|
||||
task_rq_unlock(rq, task, &rf);
|
||||
}
|
||||
#endif /* CONFIG_CGROUPS */
|
||||
|
||||
@@ -672,7 +685,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
|
||||
{
|
||||
int full;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
update_stats(group);
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#include <linux/sched/prio.h>
|
||||
#include <linux/sched/rt.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/sched/smt.h>
|
||||
#include <linux/sched/stat.h>
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <linux/sched/task.h>
|
||||
@@ -941,9 +942,6 @@ static inline int cpu_of(struct rq *rq)
|
||||
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
|
||||
extern struct static_key_false sched_smt_present;
|
||||
|
||||
extern void __update_idle_core(struct rq *rq);
|
||||
|
||||
static inline void update_idle_core(struct rq *rq)
|
||||
|
@@ -66,7 +66,7 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
|
||||
{
|
||||
int clear = 0, set = TSK_RUNNING;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
if (!wakeup || p->sched_psi_wake_requeue) {
|
||||
@@ -86,7 +86,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
|
||||
{
|
||||
int clear = TSK_RUNNING, set = 0;
|
||||
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
if (!sleep) {
|
||||
@@ -102,7 +102,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
|
||||
|
||||
static inline void psi_ttwu_dequeue(struct task_struct *p)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
/*
|
||||
* Is the task being migrated during a wakeup? Make sure to
|
||||
@@ -128,7 +128,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
|
||||
|
||||
static inline void psi_task_tick(struct rq *rq)
|
||||
{
|
||||
if (psi_disabled)
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
if (unlikely(rq->curr->flags & PF_MEMSTALL))
|
||||
|
Reference in New Issue
Block a user