Merge branch 'akpm' (patches from Andrew)
Merge first patchbomb from Andrew Morton: - a few misc things - ocfs2 udpates - kernel/watchdog.c feature work (took ages to get right) - most of MM. A few tricky bits are held up and probably won't make 4.2. * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (91 commits) mm: kmemleak_alloc_percpu() should follow the gfp from per_alloc() mm, thp: respect MPOL_PREFERRED policy with non-local node tmpfs: truncate prealloc blocks past i_size mm/memory hotplug: print the last vmemmap region at the end of hot add memory mm/mmap.c: optimization of do_mmap_pgoff function mm: kmemleak: optimise kmemleak_lock acquiring during kmemleak_scan mm: kmemleak: avoid deadlock on the kmemleak object insertion error path mm: kmemleak: do not acquire scan_mutex in kmemleak_do_cleanup() mm: kmemleak: fix delete_object_*() race when called on the same memory block mm: kmemleak: allow safe memory scanning during kmemleak disabling memcg: convert mem_cgroup->under_oom from atomic_t to int memcg: remove unused mem_cgroup->oom_wakeups frontswap: allow multiple backends x86, mirror: x86 enabling - find mirrored memory ranges mm/memblock: allocate boot time data structures from mirrored memory mm/memblock: add extra "flags" to memblock to allow selection of memory based on attribute mm: do not ignore mapping_gfp_mask in page cache allocation paths mm/cma.c: fix typos in comments mm/oom_kill.c: print points as unsigned int mm/hugetlb: handle races in alloc_huge_page and hugetlb_reserve_pages ...
This commit is contained in:
@@ -436,7 +436,7 @@ static void exit_mm(struct task_struct *tsk)
|
||||
mm_update_next_owner(mm);
|
||||
mmput(mm);
|
||||
if (test_thread_flag(TIF_MEMDIE))
|
||||
unmark_oom_victim();
|
||||
exit_oom_victim();
|
||||
}
|
||||
|
||||
static struct task_struct *find_alive_thread(struct task_struct *p)
|
||||
|
@@ -232,7 +232,8 @@ void smpboot_unpark_threads(unsigned int cpu)
|
||||
|
||||
mutex_lock(&smpboot_threads_lock);
|
||||
list_for_each_entry(cur, &hotplug_threads, list)
|
||||
smpboot_unpark_thread(cur, cpu);
|
||||
if (cpumask_test_cpu(cpu, cur->cpumask))
|
||||
smpboot_unpark_thread(cur, cpu);
|
||||
mutex_unlock(&smpboot_threads_lock);
|
||||
}
|
||||
|
||||
@@ -258,6 +259,15 @@ static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
/* Unpark any threads that were voluntarily parked. */
|
||||
for_each_cpu_not(cpu, ht->cpumask) {
|
||||
if (cpu_online(cpu)) {
|
||||
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
|
||||
if (tsk)
|
||||
kthread_unpark(tsk);
|
||||
}
|
||||
}
|
||||
|
||||
/* We need to destroy also the parked threads of offline cpus */
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
|
||||
@@ -281,6 +291,10 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
||||
unsigned int cpu;
|
||||
int ret = 0;
|
||||
|
||||
if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
cpumask_copy(plug_thread->cpumask, cpu_possible_mask);
|
||||
|
||||
get_online_cpus();
|
||||
mutex_lock(&smpboot_threads_lock);
|
||||
for_each_online_cpu(cpu) {
|
||||
@@ -313,9 +327,53 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
|
||||
smpboot_destroy_threads(plug_thread);
|
||||
mutex_unlock(&smpboot_threads_lock);
|
||||
put_online_cpus();
|
||||
free_cpumask_var(plug_thread->cpumask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
|
||||
|
||||
/**
|
||||
* smpboot_update_cpumask_percpu_thread - Adjust which per_cpu hotplug threads stay parked
|
||||
* @plug_thread: Hotplug thread descriptor
|
||||
* @new: Revised mask to use
|
||||
*
|
||||
* The cpumask field in the smp_hotplug_thread must not be updated directly
|
||||
* by the client, but only by calling this function.
|
||||
* This function can only be called on a registered smp_hotplug_thread.
|
||||
*/
|
||||
int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
|
||||
const struct cpumask *new)
|
||||
{
|
||||
struct cpumask *old = plug_thread->cpumask;
|
||||
cpumask_var_t tmp;
|
||||
unsigned int cpu;
|
||||
|
||||
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
get_online_cpus();
|
||||
mutex_lock(&smpboot_threads_lock);
|
||||
|
||||
/* Park threads that were exclusively enabled on the old mask. */
|
||||
cpumask_andnot(tmp, old, new);
|
||||
for_each_cpu_and(cpu, tmp, cpu_online_mask)
|
||||
smpboot_park_thread(plug_thread, cpu);
|
||||
|
||||
/* Unpark threads that are exclusively enabled on the new mask. */
|
||||
cpumask_andnot(tmp, new, old);
|
||||
for_each_cpu_and(cpu, tmp, cpu_online_mask)
|
||||
smpboot_unpark_thread(plug_thread, cpu);
|
||||
|
||||
cpumask_copy(old, new);
|
||||
|
||||
mutex_unlock(&smpboot_threads_lock);
|
||||
put_online_cpus();
|
||||
|
||||
free_cpumask_var(tmp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
|
||||
|
||||
static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
|
||||
|
||||
/*
|
||||
|
@@ -871,6 +871,13 @@ static struct ctl_table kern_table[] = {
|
||||
.extra1 = &zero,
|
||||
.extra2 = &one,
|
||||
},
|
||||
{
|
||||
.procname = "watchdog_cpumask",
|
||||
.data = &watchdog_cpumask_bits,
|
||||
.maxlen = NR_CPUS,
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_watchdog_cpumask,
|
||||
},
|
||||
{
|
||||
.procname = "softlockup_panic",
|
||||
.data = &softlockup_panic,
|
||||
|
@@ -19,6 +19,7 @@
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/smpboot.h>
|
||||
#include <linux/sched/rt.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#include <asm/irq_regs.h>
|
||||
#include <linux/kvm_para.h>
|
||||
@@ -58,6 +59,12 @@ int __read_mostly sysctl_softlockup_all_cpu_backtrace;
|
||||
#else
|
||||
#define sysctl_softlockup_all_cpu_backtrace 0
|
||||
#endif
|
||||
static struct cpumask watchdog_cpumask __read_mostly;
|
||||
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
|
||||
|
||||
/* Helper for online, unparked cpus. */
|
||||
#define for_each_watchdog_cpu(cpu) \
|
||||
for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
|
||||
|
||||
static int __read_mostly watchdog_running;
|
||||
static u64 __read_mostly sample_period;
|
||||
@@ -207,7 +214,7 @@ void touch_all_softlockup_watchdogs(void)
|
||||
* do we care if a 0 races with a timestamp?
|
||||
* all it means is the softlock check starts one cycle later
|
||||
*/
|
||||
for_each_online_cpu(cpu)
|
||||
for_each_watchdog_cpu(cpu)
|
||||
per_cpu(watchdog_touch_ts, cpu) = 0;
|
||||
}
|
||||
|
||||
@@ -616,7 +623,7 @@ void watchdog_nmi_enable_all(void)
|
||||
goto unlock;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
for_each_watchdog_cpu(cpu)
|
||||
watchdog_nmi_enable(cpu);
|
||||
put_online_cpus();
|
||||
|
||||
@@ -634,7 +641,7 @@ void watchdog_nmi_disable_all(void)
|
||||
goto unlock;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
for_each_watchdog_cpu(cpu)
|
||||
watchdog_nmi_disable(cpu);
|
||||
put_online_cpus();
|
||||
|
||||
@@ -696,7 +703,7 @@ static void update_watchdog_all_cpus(void)
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
for_each_watchdog_cpu(cpu)
|
||||
update_watchdog(cpu);
|
||||
put_online_cpus();
|
||||
}
|
||||
@@ -709,8 +716,12 @@ static int watchdog_enable_all_cpus(void)
|
||||
err = smpboot_register_percpu_thread(&watchdog_threads);
|
||||
if (err)
|
||||
pr_err("Failed to create watchdog threads, disabled\n");
|
||||
else
|
||||
else {
|
||||
if (smpboot_update_cpumask_percpu_thread(
|
||||
&watchdog_threads, &watchdog_cpumask))
|
||||
pr_err("Failed to set cpumask for watchdog threads\n");
|
||||
watchdog_running = 1;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Enable/disable the lockup detectors or
|
||||
@@ -879,12 +890,58 @@ out:
|
||||
mutex_unlock(&watchdog_proc_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* The cpumask is the mask of possible cpus that the watchdog can run
|
||||
* on, not the mask of cpus it is actually running on. This allows the
|
||||
* user to specify a mask that will include cpus that have not yet
|
||||
* been brought online, if desired.
|
||||
*/
|
||||
int proc_watchdog_cpumask(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&watchdog_proc_mutex);
|
||||
err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
|
||||
if (!err && write) {
|
||||
/* Remove impossible cpus to keep sysctl output cleaner. */
|
||||
cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
|
||||
cpu_possible_mask);
|
||||
|
||||
if (watchdog_running) {
|
||||
/*
|
||||
* Failure would be due to being unable to allocate
|
||||
* a temporary cpumask, so we are likely not in a
|
||||
* position to do much else to make things better.
|
||||
*/
|
||||
if (smpboot_update_cpumask_percpu_thread(
|
||||
&watchdog_threads, &watchdog_cpumask) != 0)
|
||||
pr_err("cpumask update failed\n");
|
||||
}
|
||||
}
|
||||
mutex_unlock(&watchdog_proc_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
void __init lockup_detector_init(void)
|
||||
{
|
||||
set_sample_period();
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
if (tick_nohz_full_enabled()) {
|
||||
if (!cpumask_empty(tick_nohz_full_mask))
|
||||
pr_info("Disabling watchdog on nohz_full cores by default\n");
|
||||
cpumask_andnot(&watchdog_cpumask, cpu_possible_mask,
|
||||
tick_nohz_full_mask);
|
||||
} else
|
||||
cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
|
||||
#else
|
||||
cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
|
||||
#endif
|
||||
|
||||
if (watchdog_enabled)
|
||||
watchdog_enable_all_cpus();
|
||||
}
|
||||
|
Reference in New Issue
Block a user