Merge branch 'perf/urgent' into perf/core
So that we can get the perf bench exec stack fixes and then apply the remaining fix for the files added after what is in perf/urgent. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
@@ -1038,6 +1038,22 @@ void do_exit(long code)
|
||||
if (tsk->nr_dirtied)
|
||||
__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
|
||||
exit_rcu();
|
||||
|
||||
/*
|
||||
* The setting of TASK_RUNNING by try_to_wake_up() may be delayed
|
||||
* when the following two conditions become true.
|
||||
* - There is race condition of mmap_sem (It is acquired by
|
||||
* exit_mm()), and
|
||||
* - SMI occurs before setting TASK_RUNINNG.
|
||||
* (or hypervisor of virtual machine switches to other guest)
|
||||
* As a result, we may become TASK_RUNNING after becoming TASK_DEAD
|
||||
*
|
||||
* To avoid it, we have to wait for releasing tsk->pi_lock which
|
||||
* is held by try_to_wake_up()
|
||||
*/
|
||||
smp_mb();
|
||||
raw_spin_unlock_wait(&tsk->pi_lock);
|
||||
|
||||
/* causes final put_task_struct in finish_task_switch(). */
|
||||
tsk->state = TASK_DEAD;
|
||||
tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
|
||||
|
@@ -647,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_task_mm);
|
||||
|
||||
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
int err;
|
||||
|
||||
err = mutex_lock_killable(&task->signal->cred_guard_mutex);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
mm = get_task_mm(task);
|
||||
if (mm && mm != current->mm &&
|
||||
!ptrace_may_access(task, mode)) {
|
||||
mmput(mm);
|
||||
mm = ERR_PTR(-EACCES);
|
||||
}
|
||||
mutex_unlock(&task->signal->cred_guard_mutex);
|
||||
|
||||
return mm;
|
||||
}
|
||||
|
||||
/* Please note the differences between mmput and mm_release.
|
||||
* mmput is called whenever we stop holding onto a mm_struct,
|
||||
* error success whatever.
|
||||
|
@@ -188,3 +188,22 @@ void thaw_processes(void)
|
||||
printk("done.\n");
|
||||
}
|
||||
|
||||
void thaw_kernel_threads(void)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
pm_nosig_freezing = false;
|
||||
printk("Restarting kernel threads ... ");
|
||||
|
||||
thaw_workqueues();
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
|
||||
__thaw_task(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
schedule();
|
||||
printk("done.\n");
|
||||
}
|
||||
|
@@ -274,6 +274,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
swsusp_free();
|
||||
memset(&data->handle, 0, sizeof(struct snapshot_handle));
|
||||
data->ready = 0;
|
||||
/*
|
||||
* It is necessary to thaw kernel threads here, because
|
||||
* SNAPSHOT_CREATE_IMAGE may be invoked directly after
|
||||
* SNAPSHOT_FREE. In that case, if kernel threads were not
|
||||
* thawed, the preallocation of memory carried out by
|
||||
* hibernation_snapshot() might run into problems (i.e. it
|
||||
* might fail or even deadlock).
|
||||
*/
|
||||
thaw_kernel_threads();
|
||||
break;
|
||||
|
||||
case SNAPSHOT_PREF_IMAGE_SIZE:
|
||||
|
@@ -74,6 +74,7 @@
|
||||
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/mutex.h>
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#endif
|
||||
@@ -723,9 +724,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
p->sched_class->dequeue_task(rq, p, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* activate_task - move a task to the runqueue.
|
||||
*/
|
||||
void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
if (task_contributes_to_load(p))
|
||||
@@ -734,9 +732,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
enqueue_task(rq, p, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* deactivate_task - remove a task from the runqueue.
|
||||
*/
|
||||
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
if (task_contributes_to_load(p))
|
||||
@@ -4134,7 +4129,7 @@ recheck:
|
||||
on_rq = p->on_rq;
|
||||
running = task_current(rq, p);
|
||||
if (on_rq)
|
||||
deactivate_task(rq, p, 0);
|
||||
dequeue_task(rq, p, 0);
|
||||
if (running)
|
||||
p->sched_class->put_prev_task(rq, p);
|
||||
|
||||
@@ -4147,7 +4142,7 @@ recheck:
|
||||
if (running)
|
||||
p->sched_class->set_curr_task(rq);
|
||||
if (on_rq)
|
||||
activate_task(rq, p, 0);
|
||||
enqueue_task(rq, p, 0);
|
||||
|
||||
check_class_changed(rq, p, prev_class, oldprio);
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
@@ -4998,9 +4993,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
||||
* placed properly.
|
||||
*/
|
||||
if (p->on_rq) {
|
||||
deactivate_task(rq_src, p, 0);
|
||||
dequeue_task(rq_src, p, 0);
|
||||
set_task_cpu(p, dest_cpu);
|
||||
activate_task(rq_dest, p, 0);
|
||||
enqueue_task(rq_dest, p, 0);
|
||||
check_preempt_curr(rq_dest, p, 0);
|
||||
}
|
||||
done:
|
||||
@@ -7032,10 +7027,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
|
||||
|
||||
on_rq = p->on_rq;
|
||||
if (on_rq)
|
||||
deactivate_task(rq, p, 0);
|
||||
dequeue_task(rq, p, 0);
|
||||
__setscheduler(rq, p, SCHED_NORMAL, 0);
|
||||
if (on_rq) {
|
||||
activate_task(rq, p, 0);
|
||||
enqueue_task(rq, p, 0);
|
||||
resched_task(rq->curr);
|
||||
}
|
||||
|
||||
|
@@ -4866,6 +4866,15 @@ static void nohz_balancer_kick(int cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void clear_nohz_tick_stopped(int cpu)
|
||||
{
|
||||
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||
cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||
atomic_dec(&nohz.nr_cpus);
|
||||
clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void set_cpu_sd_state_busy(void)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
@@ -4904,6 +4913,12 @@ void select_nohz_load_balancer(int stop_tick)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* If this cpu is going down, then nothing needs to be done.
|
||||
*/
|
||||
if (!cpu_active(cpu))
|
||||
return;
|
||||
|
||||
if (stop_tick) {
|
||||
if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
|
||||
return;
|
||||
@@ -4914,6 +4929,18 @@ void select_nohz_load_balancer(int stop_tick)
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_DYING:
|
||||
clear_nohz_tick_stopped(smp_processor_id());
|
||||
return NOTIFY_OK;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static DEFINE_SPINLOCK(balancing);
|
||||
@@ -5070,11 +5097,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
|
||||
* busy tick after returning from idle, we will update the busy stats.
|
||||
*/
|
||||
set_cpu_sd_state_busy();
|
||||
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||
clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||
atomic_dec(&nohz.nr_cpus);
|
||||
}
|
||||
clear_nohz_tick_stopped(cpu);
|
||||
|
||||
/*
|
||||
* None are in tickless mode and hence no need for NOHZ idle load
|
||||
@@ -5590,6 +5613,7 @@ __init void init_sched_fair_class(void)
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
|
||||
cpu_notifier(sched_ilb_notifier, 0);
|
||||
#endif
|
||||
#endif /* SMP */
|
||||
|
||||
|
@@ -1587,6 +1587,11 @@ static int push_rt_task(struct rq *rq)
|
||||
if (!next_task)
|
||||
return 0;
|
||||
|
||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
if (unlikely(task_running(rq, next_task)))
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
retry:
|
||||
if (unlikely(next_task == rq->curr)) {
|
||||
WARN_ON(1);
|
||||
|
@@ -296,7 +296,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
if (__this_cpu_read(soft_watchdog_warn) == true)
|
||||
return HRTIMER_RESTART;
|
||||
|
||||
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
|
||||
printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
|
||||
smp_processor_id(), duration,
|
||||
current->comm, task_pid_nr(current));
|
||||
print_modules();
|
||||
|
Reference in New Issue
Block a user