Merge branch 'linus' into timers/core
Make sure the upstream fixes are applied before adding further modifications.
This commit is contained in:
@@ -995,13 +995,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
||||
rq_clock_skip_update(rq, true);
|
||||
}
|
||||
|
||||
static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
|
||||
|
||||
void register_task_migration_notifier(struct notifier_block *n)
|
||||
{
|
||||
atomic_notifier_chain_register(&task_migration_notifier, n);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
{
|
||||
@@ -1032,18 +1025,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
trace_sched_migrate_task(p, new_cpu);
|
||||
|
||||
if (task_cpu(p) != new_cpu) {
|
||||
struct task_migration_notifier tmn;
|
||||
|
||||
if (p->sched_class->migrate_task_rq)
|
||||
p->sched_class->migrate_task_rq(p, new_cpu);
|
||||
p->se.nr_migrations++;
|
||||
perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
|
||||
|
||||
tmn.task = p;
|
||||
tmn.from_cpu = task_cpu(p);
|
||||
tmn.to_cpu = new_cpu;
|
||||
|
||||
atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
|
||||
}
|
||||
|
||||
__set_task_cpu(p, new_cpu);
|
||||
@@ -3294,15 +3279,18 @@ static void __setscheduler_params(struct task_struct *p,
|
||||
|
||||
/* Actually do priority change: must hold pi & rq lock. */
|
||||
static void __setscheduler(struct rq *rq, struct task_struct *p,
|
||||
const struct sched_attr *attr)
|
||||
const struct sched_attr *attr, bool keep_boost)
|
||||
{
|
||||
__setscheduler_params(p, attr);
|
||||
|
||||
/*
|
||||
* If we get here, there was no pi waiters boosting the
|
||||
* task. It is safe to use the normal prio.
|
||||
* Keep a potential priority boosting if called from
|
||||
* sched_setscheduler().
|
||||
*/
|
||||
p->prio = normal_prio(p);
|
||||
if (keep_boost)
|
||||
p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
|
||||
else
|
||||
p->prio = normal_prio(p);
|
||||
|
||||
if (dl_prio(p->prio))
|
||||
p->sched_class = &dl_sched_class;
|
||||
@@ -3402,7 +3390,7 @@ static int __sched_setscheduler(struct task_struct *p,
|
||||
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
|
||||
MAX_RT_PRIO - 1 - attr->sched_priority;
|
||||
int retval, oldprio, oldpolicy = -1, queued, running;
|
||||
int policy = attr->sched_policy;
|
||||
int new_effective_prio, policy = attr->sched_policy;
|
||||
unsigned long flags;
|
||||
const struct sched_class *prev_class;
|
||||
struct rq *rq;
|
||||
@@ -3584,15 +3572,14 @@ change:
|
||||
oldprio = p->prio;
|
||||
|
||||
/*
|
||||
* Special case for priority boosted tasks.
|
||||
*
|
||||
* If the new priority is lower or equal (user space view)
|
||||
* than the current (boosted) priority, we just store the new
|
||||
* Take priority boosted tasks into account. If the new
|
||||
* effective priority is unchanged, we just store the new
|
||||
* normal parameters and do not touch the scheduler class and
|
||||
* the runqueue. This will be done when the task deboost
|
||||
* itself.
|
||||
*/
|
||||
if (rt_mutex_check_prio(p, newprio)) {
|
||||
new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
|
||||
if (new_effective_prio == oldprio) {
|
||||
__setscheduler_params(p, attr);
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
return 0;
|
||||
@@ -3606,7 +3593,7 @@ change:
|
||||
put_prev_task(rq, p);
|
||||
|
||||
prev_class = p->sched_class;
|
||||
__setscheduler(rq, p, attr);
|
||||
__setscheduler(rq, p, attr, true);
|
||||
|
||||
if (running)
|
||||
p->sched_class->set_curr_task(rq);
|
||||
@@ -6991,27 +6978,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
|
||||
unsigned long flags;
|
||||
long cpu = (long)hcpu;
|
||||
struct dl_bw *dl_b;
|
||||
bool overflow;
|
||||
int cpus;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
switch (action) {
|
||||
case CPU_DOWN_PREPARE:
|
||||
/* explicitly allow suspend */
|
||||
if (!(action & CPU_TASKS_FROZEN)) {
|
||||
bool overflow;
|
||||
int cpus;
|
||||
rcu_read_lock_sched();
|
||||
dl_b = dl_bw_of(cpu);
|
||||
|
||||
rcu_read_lock_sched();
|
||||
dl_b = dl_bw_of(cpu);
|
||||
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
||||
cpus = dl_bw_cpus(cpu);
|
||||
overflow = __dl_overflow(dl_b, cpus, 0, 0);
|
||||
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
||||
|
||||
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
||||
cpus = dl_bw_cpus(cpu);
|
||||
overflow = __dl_overflow(dl_b, cpus, 0, 0);
|
||||
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
if (overflow)
|
||||
return notifier_from_errno(-EBUSY);
|
||||
}
|
||||
if (overflow)
|
||||
return notifier_from_errno(-EBUSY);
|
||||
cpuset_update_active_cpus(false);
|
||||
break;
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
@@ -7340,7 +7323,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
|
||||
queued = task_on_rq_queued(p);
|
||||
if (queued)
|
||||
dequeue_task(rq, p, 0);
|
||||
__setscheduler(rq, p, &attr);
|
||||
__setscheduler(rq, p, &attr, false);
|
||||
if (queued) {
|
||||
enqueue_task(rq, p, 0);
|
||||
resched_curr(rq);
|
||||
|
Reference in New Issue
Block a user