Merge branch 'linus' into locking/core, to resolve conflict
Conflicts: arch/arm/include/asm/percpu.h As Stephen Rothwell noted, there's a conflict between this commit in locking/core:a21ee6055c
("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables") and this fresh upstream commit:aa54ea903a
("ARM: percpu.h: fix build error")a21ee6055c
is a simpler solution to the dependency problem and doesn't further increase header hell - so this conflict resolution effectively revertsaa54ea903a
and uses thea21ee6055c
solution. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -1851,7 +1851,6 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
||||
}
|
||||
|
||||
audit_get_stamp(ab->ctx, &t, &serial);
|
||||
audit_clear_dummy(ab->ctx);
|
||||
audit_log_format(ab, "audit(%llu.%03lu:%u): ",
|
||||
(unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial);
|
||||
|
||||
|
@@ -290,13 +290,6 @@ extern int audit_signal_info_syscall(struct task_struct *t);
|
||||
extern void audit_filter_inodes(struct task_struct *tsk,
|
||||
struct audit_context *ctx);
|
||||
extern struct list_head *audit_killed_trees(void);
|
||||
|
||||
static inline void audit_clear_dummy(struct audit_context *ctx)
|
||||
{
|
||||
if (ctx)
|
||||
ctx->dummy = 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_AUDITSYSCALL */
|
||||
#define auditsc_get_stamp(c, t, s) 0
|
||||
#define audit_put_watch(w) {}
|
||||
@@ -330,7 +323,6 @@ static inline int audit_signal_info_syscall(struct task_struct *t)
|
||||
}
|
||||
|
||||
#define audit_filter_inodes(t, c) AUDIT_DISABLED
|
||||
#define audit_clear_dummy(c) {}
|
||||
#endif /* CONFIG_AUDITSYSCALL */
|
||||
|
||||
extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len);
|
||||
|
@@ -1417,6 +1417,9 @@ static void audit_log_proctitle(void)
|
||||
struct audit_context *context = audit_context();
|
||||
struct audit_buffer *ab;
|
||||
|
||||
if (!context || context->dummy)
|
||||
return;
|
||||
|
||||
ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
|
||||
if (!ab)
|
||||
return; /* audit_panic or being filtered */
|
||||
|
@@ -2199,7 +2199,7 @@ static void handle_swbp(struct pt_regs *regs)
|
||||
if (!uprobe) {
|
||||
if (is_swbp > 0) {
|
||||
/* No matching uprobe; signal SIGTRAP. */
|
||||
send_sig(SIGTRAP, current, 0);
|
||||
force_sig(SIGTRAP);
|
||||
} else {
|
||||
/*
|
||||
* Either we raced with uprobe_unregister() or we can't
|
||||
|
@@ -4119,9 +4119,6 @@ static void __sched notrace __schedule(bool preempt)
|
||||
local_irq_disable();
|
||||
rcu_note_context_switch(preempt);
|
||||
|
||||
/* See deactivate_task() below. */
|
||||
prev_state = prev->state;
|
||||
|
||||
/*
|
||||
* Make sure that signal_pending_state()->signal_pending() below
|
||||
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
|
||||
@@ -4145,11 +4142,16 @@ static void __sched notrace __schedule(bool preempt)
|
||||
update_rq_clock(rq);
|
||||
|
||||
switch_count = &prev->nivcsw;
|
||||
|
||||
/*
|
||||
* We must re-load prev->state in case ttwu_remote() changed it
|
||||
* before we acquired rq->lock.
|
||||
* We must load prev->state once (task_struct::state is volatile), such
|
||||
* that:
|
||||
*
|
||||
* - we form a control dependency vs deactivate_task() below.
|
||||
* - ptrace_{,un}freeze_traced() can change ->state underneath us.
|
||||
*/
|
||||
if (!preempt && prev_state && prev_state == prev->state) {
|
||||
prev_state = prev->state;
|
||||
if (!preempt && prev_state) {
|
||||
if (signal_pending_state(prev_state, prev)) {
|
||||
prev->state = TASK_RUNNING;
|
||||
} else {
|
||||
@@ -4163,10 +4165,12 @@ static void __sched notrace __schedule(bool preempt)
|
||||
|
||||
/*
|
||||
* __schedule() ttwu()
|
||||
* prev_state = prev->state; if (READ_ONCE(p->on_rq) && ...)
|
||||
* LOCK rq->lock goto out;
|
||||
* smp_mb__after_spinlock(); smp_acquire__after_ctrl_dep();
|
||||
* p->on_rq = 0; p->state = TASK_WAKING;
|
||||
* prev_state = prev->state; if (p->on_rq && ...)
|
||||
* if (prev_state) goto out;
|
||||
* p->on_rq = 0; smp_acquire__after_ctrl_dep();
|
||||
* p->state = TASK_WAKING
|
||||
*
|
||||
* Where __schedule() and ttwu() have matching control dependencies.
|
||||
*
|
||||
* After this, schedule() must not care about p->state any more.
|
||||
*/
|
||||
@@ -4481,6 +4485,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
|
||||
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
|
||||
void *key)
|
||||
{
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
|
||||
return try_to_wake_up(curr->private, mode, wake_flags);
|
||||
}
|
||||
EXPORT_SYMBOL(default_wake_function);
|
||||
|
@@ -43,6 +43,7 @@
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
@@ -1742,6 +1743,13 @@ void update_process_times(int user_tick)
|
||||
scheduler_tick();
|
||||
if (IS_ENABLED(CONFIG_POSIX_TIMERS))
|
||||
run_posix_cpu_timers();
|
||||
|
||||
/* The current CPU might make use of net randoms without receiving IRQs
|
||||
* to renew them often enough. Let's update the net_rand_state from a
|
||||
* non-constant value that's not affine to the number of calls to make
|
||||
* sure it's updated when there's some activity (we don't care in idle).
|
||||
*/
|
||||
this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Reference in New Issue
Block a user