Merge tag 'v3.3-rc7' into sched/core
Merge reason: merge back final fixes, prepare for the merge window. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -651,10 +651,10 @@ int __init init_hw_breakpoint(void)
|
||||
|
||||
err_alloc:
|
||||
for_each_possible_cpu(err_cpu) {
|
||||
if (err_cpu == cpu)
|
||||
break;
|
||||
for (i = 0; i < TYPE_MAX; i++)
|
||||
kfree(per_cpu(nr_task_bp_pinned[i], cpu));
|
||||
if (err_cpu == cpu)
|
||||
break;
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
|
@@ -668,6 +668,38 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
|
||||
return mm;
|
||||
}
|
||||
|
||||
static void complete_vfork_done(struct task_struct *tsk)
|
||||
{
|
||||
struct completion *vfork;
|
||||
|
||||
task_lock(tsk);
|
||||
vfork = tsk->vfork_done;
|
||||
if (likely(vfork)) {
|
||||
tsk->vfork_done = NULL;
|
||||
complete(vfork);
|
||||
}
|
||||
task_unlock(tsk);
|
||||
}
|
||||
|
||||
static int wait_for_vfork_done(struct task_struct *child,
|
||||
struct completion *vfork)
|
||||
{
|
||||
int killed;
|
||||
|
||||
freezer_do_not_count();
|
||||
killed = wait_for_completion_killable(vfork);
|
||||
freezer_count();
|
||||
|
||||
if (killed) {
|
||||
task_lock(child);
|
||||
child->vfork_done = NULL;
|
||||
task_unlock(child);
|
||||
}
|
||||
|
||||
put_task_struct(child);
|
||||
return killed;
|
||||
}
|
||||
|
||||
/* Please note the differences between mmput and mm_release.
|
||||
* mmput is called whenever we stop holding onto a mm_struct,
|
||||
* error success whatever.
|
||||
@@ -683,8 +715,6 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
|
||||
*/
|
||||
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
struct completion *vfork_done = tsk->vfork_done;
|
||||
|
||||
/* Get rid of any futexes when releasing the mm */
|
||||
#ifdef CONFIG_FUTEX
|
||||
if (unlikely(tsk->robust_list)) {
|
||||
@@ -704,17 +734,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
||||
/* Get rid of any cached register state */
|
||||
deactivate_mm(tsk, mm);
|
||||
|
||||
/* notify parent sleeping on vfork() */
|
||||
if (vfork_done) {
|
||||
tsk->vfork_done = NULL;
|
||||
complete(vfork_done);
|
||||
}
|
||||
if (tsk->vfork_done)
|
||||
complete_vfork_done(tsk);
|
||||
|
||||
/*
|
||||
* If we're exiting normally, clear a user-space tid field if
|
||||
* requested. We leave this alone when dying by signal, to leave
|
||||
* the value intact in a core dump, and to save the unnecessary
|
||||
* trouble otherwise. Userland only wants this done for a sys_exit.
|
||||
* trouble, say, a killed vfork parent shouldn't touch this mm.
|
||||
* Userland only wants this done for a sys_exit.
|
||||
*/
|
||||
if (tsk->clear_child_tid) {
|
||||
if (!(tsk->flags & PF_SIGNALED) &&
|
||||
@@ -1018,7 +1046,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
|
||||
|
||||
new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
|
||||
new_flags |= PF_FORKNOEXEC;
|
||||
new_flags |= PF_STARTING;
|
||||
p->flags = new_flags;
|
||||
}
|
||||
|
||||
@@ -1548,16 +1575,9 @@ long do_fork(unsigned long clone_flags,
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
p->vfork_done = &vfork;
|
||||
init_completion(&vfork);
|
||||
get_task_struct(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* We set PF_STARTING at creation in case tracing wants to
|
||||
* use this to distinguish a fully live task from one that
|
||||
* hasn't finished SIGSTOP raising yet. Now we clear it
|
||||
* and set the child going.
|
||||
*/
|
||||
p->flags &= ~PF_STARTING;
|
||||
|
||||
wake_up_new_task(p);
|
||||
|
||||
/* forking complete and child started to run, tell ptracer */
|
||||
@@ -1565,10 +1585,8 @@ long do_fork(unsigned long clone_flags,
|
||||
ptrace_event(trace, nr);
|
||||
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
freezer_do_not_count();
|
||||
wait_for_completion(&vfork);
|
||||
freezer_count();
|
||||
ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
|
||||
if (!wait_for_vfork_done(p, &vfork))
|
||||
ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
|
||||
}
|
||||
} else {
|
||||
nr = PTR_ERR(p);
|
||||
|
@@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
||||
* For preemptible RCU it is sufficient to call rcu_read_unlock in order
|
||||
* to exit the grace period. For classic RCU, a reschedule is required.
|
||||
*/
|
||||
static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
|
||||
static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
|
||||
{
|
||||
bool can_cont;
|
||||
|
||||
get_task_struct(g);
|
||||
get_task_struct(t);
|
||||
rcu_read_unlock();
|
||||
cond_resched();
|
||||
rcu_read_lock();
|
||||
can_cont = pid_alive(g) && pid_alive(t);
|
||||
put_task_struct(t);
|
||||
put_task_struct(g);
|
||||
|
||||
return can_cont;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
|
||||
goto unlock;
|
||||
if (!--batch_count) {
|
||||
batch_count = HUNG_TASK_BATCHING;
|
||||
rcu_lock_break(g, t);
|
||||
/* Exit if t or g was unhashed during refresh. */
|
||||
if (t->state == TASK_DEAD || g->state == TASK_DEAD)
|
||||
if (!rcu_lock_break(g, t))
|
||||
goto unlock;
|
||||
}
|
||||
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
|
||||
|
@@ -985,6 +985,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
|
||||
/* add new interrupt at end of irq queue */
|
||||
do {
|
||||
/*
|
||||
* Or all existing action->thread_mask bits,
|
||||
* so we can find the next zero bit for this
|
||||
* new action.
|
||||
*/
|
||||
thread_mask |= old->thread_mask;
|
||||
old_ptr = &old->next;
|
||||
old = *old_ptr;
|
||||
@@ -993,14 +998,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the thread mask for this irqaction. Unlikely to have
|
||||
* 32 resp 64 irqs sharing one line, but who knows.
|
||||
* Setup the thread mask for this irqaction for ONESHOT. For
|
||||
* !ONESHOT irqs the thread mask is 0 so we can avoid a
|
||||
* conditional in irq_wake_thread().
|
||||
*/
|
||||
if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
|
||||
ret = -EBUSY;
|
||||
goto out_mask;
|
||||
if (new->flags & IRQF_ONESHOT) {
|
||||
/*
|
||||
* Unlikely to have 32 resp 64 irqs sharing one line,
|
||||
* but who knows.
|
||||
*/
|
||||
if (thread_mask == ~0UL) {
|
||||
ret = -EBUSY;
|
||||
goto out_mask;
|
||||
}
|
||||
/*
|
||||
* The thread_mask for the action is or'ed to
|
||||
* desc->thread_active to indicate that the
|
||||
* IRQF_ONESHOT thread handler has been woken, but not
|
||||
* yet finished. The bit is cleared when a thread
|
||||
* completes. When all threads of a shared interrupt
|
||||
* line have completed desc->threads_active becomes
|
||||
* zero and the interrupt line is unmasked. See
|
||||
* handle.c:irq_wake_thread() for further information.
|
||||
*
|
||||
* If no thread is woken by primary (hard irq context)
|
||||
* interrupt handlers, then desc->threads_active is
|
||||
* also checked for zero to unmask the irq line in the
|
||||
* affected hard irq flow handlers
|
||||
* (handle_[fasteoi|level]_irq).
|
||||
*
|
||||
* The new action gets the first zero bit of
|
||||
* thread_mask assigned. See the loop above which or's
|
||||
* all existing action->thread_mask bits.
|
||||
*/
|
||||
new->thread_mask = 1 << ffz(thread_mask);
|
||||
}
|
||||
new->thread_mask = 1 << ffz(thread_mask);
|
||||
|
||||
if (!shared) {
|
||||
init_waitqueue_head(&desc->wait_for_threads);
|
||||
|
@@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
if (!kernel_text_address((unsigned long) p->addr) ||
|
||||
in_kprobes_functions((unsigned long) p->addr) ||
|
||||
ftrace_text_reserved(p->addr, p->addr) ||
|
||||
jump_label_text_reserved(p->addr, p->addr))
|
||||
goto fail_with_jump_label;
|
||||
jump_label_text_reserved(p->addr, p->addr)) {
|
||||
ret = -EINVAL;
|
||||
goto cannot_probe;
|
||||
}
|
||||
|
||||
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
|
||||
p->flags &= KPROBE_FLAG_DISABLED;
|
||||
@@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
* its code to prohibit unexpected unloading.
|
||||
*/
|
||||
if (unlikely(!try_module_get(probed_mod)))
|
||||
goto fail_with_jump_label;
|
||||
goto cannot_probe;
|
||||
|
||||
/*
|
||||
* If the module freed .init.text, we couldn't insert
|
||||
@@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
if (within_module_init((unsigned long)p->addr, probed_mod) &&
|
||||
probed_mod->state != MODULE_STATE_COMING) {
|
||||
module_put(probed_mod);
|
||||
goto fail_with_jump_label;
|
||||
goto cannot_probe;
|
||||
}
|
||||
/* ret will be updated by following code */
|
||||
}
|
||||
@@ -1409,7 +1411,7 @@ out:
|
||||
|
||||
return ret;
|
||||
|
||||
fail_with_jump_label:
|
||||
cannot_probe:
|
||||
preempt_enable();
|
||||
jump_label_unlock();
|
||||
return ret;
|
||||
|
@@ -702,6 +702,9 @@ static bool printk_time = 0;
|
||||
#endif
|
||||
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
|
||||
|
||||
static bool always_kmsg_dump;
|
||||
module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
|
||||
|
||||
/* Check if we have any console registered that can be called early in boot. */
|
||||
static int have_callable_console(void)
|
||||
{
|
||||
@@ -1766,6 +1769,9 @@ void kmsg_dump(enum kmsg_dump_reason reason)
|
||||
unsigned long l1, l2;
|
||||
unsigned long flags;
|
||||
|
||||
if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
|
||||
return;
|
||||
|
||||
/* Theoretically, the log could move on after we do this, but
|
||||
there's not a lot we can do about that. The new messages
|
||||
will overwrite the start of what we dump. */
|
||||
|
Reference in New Issue
Block a user