Merge branch 'sched/urgent' into sched/core, to pick up fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -1374,6 +1374,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
|
||||
}
|
||||
|
||||
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
|
||||
BPF_SIZE(insn->code) == BPF_DW ||
|
||||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
|
||||
verbose("BPF_LD_ABS uses reserved fields\n");
|
||||
return -EINVAL;
|
||||
|
33
kernel/cpu.c
33
kernel/cpu.c
@@ -36,6 +36,7 @@
|
||||
* @target: The target state
|
||||
* @thread: Pointer to the hotplug thread
|
||||
* @should_run: Thread should execute
|
||||
* @rollback: Perform a rollback
|
||||
* @cb_stat: The state for a single callback (install/uninstall)
|
||||
* @cb: Single callback function (install/uninstall)
|
||||
* @result: Result of the operation
|
||||
@@ -47,6 +48,7 @@ struct cpuhp_cpu_state {
|
||||
#ifdef CONFIG_SMP
|
||||
struct task_struct *thread;
|
||||
bool should_run;
|
||||
bool rollback;
|
||||
enum cpuhp_state cb_state;
|
||||
int (*cb)(unsigned int cpu);
|
||||
int result;
|
||||
@@ -301,6 +303,11 @@ static int cpu_notify(unsigned long val, unsigned int cpu)
|
||||
return __cpu_notify(val, cpu, -1, NULL);
|
||||
}
|
||||
|
||||
static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
|
||||
{
|
||||
BUG_ON(cpu_notify(val, cpu));
|
||||
}
|
||||
|
||||
/* Notifier wrappers for transitioning to state machine */
|
||||
static int notify_prepare(unsigned int cpu)
|
||||
{
|
||||
@@ -477,6 +484,16 @@ static void cpuhp_thread_fun(unsigned int cpu)
|
||||
} else {
|
||||
ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
|
||||
}
|
||||
} else if (st->rollback) {
|
||||
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
|
||||
|
||||
undo_cpu_down(cpu, st, cpuhp_ap_states);
|
||||
/*
|
||||
* This is a momentary workaround to keep the notifier users
|
||||
* happy. Will go away once we got rid of the notifiers.
|
||||
*/
|
||||
cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
|
||||
st->rollback = false;
|
||||
} else {
|
||||
/* Cannot happen .... */
|
||||
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
|
||||
@@ -636,11 +653,6 @@ static inline void check_for_tasks(int dead_cpu)
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
|
||||
{
|
||||
BUG_ON(cpu_notify(val, cpu));
|
||||
}
|
||||
|
||||
static int notify_down_prepare(unsigned int cpu)
|
||||
{
|
||||
int err, nr_calls = 0;
|
||||
@@ -721,9 +733,10 @@ static int takedown_cpu(unsigned int cpu)
|
||||
*/
|
||||
err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
|
||||
if (err) {
|
||||
/* CPU didn't die: tell everyone. Can't complain. */
|
||||
cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
|
||||
/* CPU refused to die */
|
||||
irq_unlock_sparse();
|
||||
/* Unpark the hotplug thread so we can rollback there */
|
||||
kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
|
||||
return err;
|
||||
}
|
||||
BUG_ON(cpu_online(cpu));
|
||||
@@ -832,6 +845,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
|
||||
* to do the further cleanups.
|
||||
*/
|
||||
ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
|
||||
if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
|
||||
st->target = prev_state;
|
||||
st->rollback = true;
|
||||
cpuhp_kick_ap_work(cpu);
|
||||
}
|
||||
|
||||
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
|
||||
out:
|
||||
@@ -1249,6 +1267,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
||||
.name = "notify:online",
|
||||
.startup = notify_online,
|
||||
.teardown = notify_down_prepare,
|
||||
.skip_onerr = true,
|
||||
},
|
||||
#endif
|
||||
/*
|
||||
|
@@ -1295,10 +1295,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
|
||||
if (unlikely(should_fail_futex(true)))
|
||||
ret = -EFAULT;
|
||||
|
||||
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
|
||||
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
|
||||
ret = -EFAULT;
|
||||
else if (curval != uval)
|
||||
ret = -EINVAL;
|
||||
} else if (curval != uval) {
|
||||
/*
|
||||
* If a unconditional UNLOCK_PI operation (user space did not
|
||||
* try the TID->0 transition) raced with a waiter setting the
|
||||
* FUTEX_WAITERS flag between get_user() and locking the hash
|
||||
* bucket lock, retry the operation.
|
||||
*/
|
||||
if ((FUTEX_TID_MASK & curval) == uval)
|
||||
ret = -EAGAIN;
|
||||
else
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (ret) {
|
||||
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
||||
return ret;
|
||||
@@ -1525,8 +1535,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
|
||||
if (likely(&hb1->chain != &hb2->chain)) {
|
||||
plist_del(&q->list, &hb1->chain);
|
||||
hb_waiters_dec(hb1);
|
||||
plist_add(&q->list, &hb2->chain);
|
||||
hb_waiters_inc(hb2);
|
||||
plist_add(&q->list, &hb2->chain);
|
||||
q->lock_ptr = &hb2->lock;
|
||||
}
|
||||
get_futex_key_refs(key2);
|
||||
@@ -2622,6 +2632,15 @@ retry:
|
||||
*/
|
||||
if (ret == -EFAULT)
|
||||
goto pi_faulted;
|
||||
/*
|
||||
* A unconditional UNLOCK_PI op raced against a waiter
|
||||
* setting the FUTEX_WAITERS bit. Try again.
|
||||
*/
|
||||
if (ret == -EAGAIN) {
|
||||
spin_unlock(&hb->lock);
|
||||
put_futex_key(&key);
|
||||
goto retry;
|
||||
}
|
||||
/*
|
||||
* wake_futex_pi has detected invalid state. Tell user
|
||||
* space.
|
||||
|
@@ -94,6 +94,7 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
|
||||
data = irq_get_irq_data(virq + i);
|
||||
cpumask_copy(data->common->affinity, dest);
|
||||
data->common->ipi_offset = offset;
|
||||
irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
|
||||
}
|
||||
return virq;
|
||||
|
||||
|
@@ -136,10 +136,12 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
|
||||
}
|
||||
|
||||
if (counter == qstat_pv_hash_hops) {
|
||||
u64 frac;
|
||||
u64 frac = 0;
|
||||
|
||||
frac = 100ULL * do_div(stat, kicks);
|
||||
frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
|
||||
if (kicks) {
|
||||
frac = 100ULL * do_div(stat, kicks);
|
||||
frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a X.XX decimal number
|
||||
|
@@ -596,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* FIFO realtime policy runs the highest priority task (after DEADLINE).
|
||||
* Other runnable tasks are of a lower priority. The scheduler tick
|
||||
* isn't needed.
|
||||
*/
|
||||
fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
|
||||
if (fifo_nr_running)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Round-robin realtime tasks time slice with other tasks at the same
|
||||
* realtime priority.
|
||||
* If there are more than one RR tasks, we need the tick to effect the
|
||||
* actual RR behaviour.
|
||||
*/
|
||||
if (rq->rt.rr_nr_running) {
|
||||
if (rq->rt.rr_nr_running == 1)
|
||||
@@ -615,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Normal multitasking need periodic preemption checks */
|
||||
if (rq->cfs.nr_running > 1)
|
||||
/*
|
||||
* If there's no RR tasks, but FIFO tasks, we can skip the tick, no
|
||||
* forced preemption between FIFO tasks.
|
||||
*/
|
||||
fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
|
||||
if (fifo_nr_running)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
|
||||
* if there's more than one we need the tick for involuntary
|
||||
* preemption.
|
||||
*/
|
||||
if (rq->nr_running > 1)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
Reference in New Issue
Block a user