Merge branch 'akpm' (patches from Andrew)

Merge second patch-bomb from Andrew Morton:

 - a couple of hotfixes

 - the rest of MM

 - a new timer slack control in procfs

 - a couple of procfs fixes

 - a few misc things

 - some printk tweaks

 - lib/ updates, notably to radix-tree.

 - add my and Nick Piggin's old userspace radix-tree test harness to
   tools/testing/radix-tree/.  Matthew said it was a godsend during the
   radix-tree work he did.

 - a few code-size improvements, switching to __always_inline where gcc
   screwed up.

 - partially implement character sets in sscanf

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (118 commits)
  sscanf: implement basic character sets
  lib/bug.c: use common WARN helper
  param: convert some "on"/"off" users to strtobool
  lib: add "on"/"off" support to kstrtobool
  lib: update single-char callers of strtobool()
  lib: move strtobool() to kstrtobool()
  include/linux/unaligned: force inlining of byteswap operations
  include/uapi/linux/byteorder, swab: force inlining of some byteswap operations
  include/asm-generic/atomic-long.h: force inlining of some atomic_long operations
  usb: common: convert to use match_string() helper
  ide: hpt366: convert to use match_string() helper
  ata: hpt366: convert to use match_string() helper
  power: ab8500: convert to use match_string() helper
  power: charger_manager: convert to use match_string() helper
  drm/edid: convert to use match_string() helper
  pinctrl: convert to use match_string() helper
  device property: convert to use match_string() helper
  lib/string: introduce match_string() helper
  radix-tree tests: add test for radix_tree_iter_next
  radix-tree tests: add regression3 test
  ...
This commit is contained in:
Linus Torvalds
2016-03-18 19:26:54 -07:00
225 changed files with 5048 additions and 1915 deletions

View File

@@ -164,12 +164,20 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
THREAD_SIZE_ORDER);
if (page)
memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
1 << THREAD_SIZE_ORDER);
return page ? page_address(page) : NULL;
}
static inline void free_thread_info(struct thread_info *ti)
{
free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
struct page *page = virt_to_page(ti);
memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
-(1 << THREAD_SIZE_ORDER));
__free_kmem_pages(page, THREAD_SIZE_ORDER);
}
# else
static struct kmem_cache *thread_info_cache;

View File

@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/nmi.h>
#include <linux/console.h>
#include <linux/bug.h>
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
@@ -449,20 +450,25 @@ void oops_exit(void)
kmsg_dump(KMSG_DUMP_OOPS);
}
#ifdef WANT_WARN_ON_SLOWPATH
struct slowpath_args {
struct warn_args {
const char *fmt;
va_list args;
};
static void warn_slowpath_common(const char *file, int line, void *caller,
unsigned taint, struct slowpath_args *args)
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args)
{
disable_trace_on_warning();
pr_warn("------------[ cut here ]------------\n");
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
raw_smp_processor_id(), current->pid, file, line, caller);
if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line,
caller);
else
pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
raw_smp_processor_id(), current->pid, caller);
if (args)
vprintk(args->fmt, args->args);
@@ -479,20 +485,27 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
}
print_modules();
dump_stack();
if (regs)
show_regs(regs);
else
dump_stack();
print_oops_end_marker();
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
}
#ifdef WANT_WARN_ON_SLOWPATH
void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
{
struct slowpath_args args;
struct warn_args args;
args.fmt = fmt;
va_start(args.args, fmt);
warn_slowpath_common(file, line, __builtin_return_address(0),
TAINT_WARN, &args);
__warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL,
&args);
va_end(args.args);
}
EXPORT_SYMBOL(warn_slowpath_fmt);
@@ -500,20 +513,18 @@ EXPORT_SYMBOL(warn_slowpath_fmt);
void warn_slowpath_fmt_taint(const char *file, int line,
unsigned taint, const char *fmt, ...)
{
struct slowpath_args args;
struct warn_args args;
args.fmt = fmt;
va_start(args.args, fmt);
warn_slowpath_common(file, line, __builtin_return_address(0),
taint, &args);
__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
va_end(args.args);
}
EXPORT_SYMBOL(warn_slowpath_fmt_taint);
void warn_slowpath_null(const char *file, int line)
{
warn_slowpath_common(file, line, __builtin_return_address(0),
TAINT_WARN, NULL);
__warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
}
EXPORT_SYMBOL(warn_slowpath_null);
#endif

View File

@@ -367,16 +367,20 @@ static int logbuf_has_space(u32 msg_size, bool empty)
static int log_make_free_space(u32 msg_size)
{
while (log_first_seq < log_next_seq) {
if (logbuf_has_space(msg_size, false))
return 0;
while (log_first_seq < log_next_seq &&
!logbuf_has_space(msg_size, false)) {
/* drop old messages until we have enough contiguous space */
log_first_idx = log_next(log_first_idx);
log_first_seq++;
}
if (clear_seq < log_first_seq) {
clear_seq = log_first_seq;
clear_idx = log_first_idx;
}
/* sequence numbers are equal, so the log buffer is empty */
if (logbuf_has_space(msg_size, true))
if (logbuf_has_space(msg_size, log_first_seq == log_next_seq))
return 0;
return -ENOMEM;
@@ -854,6 +858,7 @@ void log_buf_kexec_setup(void)
VMCOREINFO_SYMBOL(log_buf);
VMCOREINFO_SYMBOL(log_buf_len);
VMCOREINFO_SYMBOL(log_first_idx);
VMCOREINFO_SYMBOL(clear_idx);
VMCOREINFO_SYMBOL(log_next_idx);
/*
* Export struct printk_log size and field offsets. User space tools can
@@ -1216,12 +1221,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
u32 idx;
enum log_flags prev;
if (clear_seq < log_first_seq) {
/* messages are gone, move to first available one */
clear_seq = log_first_seq;
clear_idx = log_first_idx;
}
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
@@ -1483,58 +1482,6 @@ static void zap_locks(void)
sema_init(&console_sem, 1);
}
/*
* Check if we have any console that is capable of printing while cpu is
* booting or shutting down. Requires console_sem.
*/
static int have_callable_console(void)
{
struct console *con;
for_each_console(con)
if (con->flags & CON_ANYTIME)
return 1;
return 0;
}
/*
* Can we actually use the console at this time on this cpu?
*
* Console drivers may assume that per-cpu resources have been allocated. So
* unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
* call them until this CPU is officially up.
*/
static inline int can_use_console(unsigned int cpu)
{
return cpu_online(cpu) || have_callable_console();
}
/*
* Try to get console ownership to actually show the kernel
* messages from a 'printk'. Return true (and with the
* console_lock held, and 'console_locked' set) if it
* is successful, false otherwise.
*/
static int console_trylock_for_printk(void)
{
unsigned int cpu = smp_processor_id();
if (!console_trylock())
return 0;
/*
* If we can't use the console, we need to release the console
* semaphore by hand to avoid flushing the buffer. We need to hold the
* console semaphore in order to do this test safely.
*/
if (!can_use_console(cpu)) {
console_locked = 0;
up_console_sem();
return 0;
}
return 1;
}
int printk_delay_msec __read_mostly;
static inline void printk_delay(void)
@@ -1681,7 +1628,6 @@ asmlinkage int vprintk_emit(int facility, int level,
boot_delay_msec(level);
printk_delay();
/* This stops the holder of console_sem just where we want him */
local_irq_save(flags);
this_cpu = smp_processor_id();
@@ -1705,6 +1651,7 @@ asmlinkage int vprintk_emit(int facility, int level,
}
lockdep_off();
/* This stops the holder of console_sem just where we want him */
raw_spin_lock(&logbuf_lock);
logbuf_cpu = this_cpu;
@@ -1809,21 +1756,13 @@ asmlinkage int vprintk_emit(int facility, int level,
/* If called from the scheduler, we can not call up(). */
if (!in_sched) {
lockdep_off();
/*
* Disable preemption to avoid being preempted while holding
* console_sem which would prevent anyone from printing to
* console
*/
preempt_disable();
/*
* Try to acquire and then immediately release the console
* semaphore. The release will print out buffers and wake up
* /dev/kmsg and syslog() users.
*/
if (console_trylock_for_printk())
if (console_trylock())
console_unlock();
preempt_enable();
lockdep_on();
}
@@ -2174,7 +2113,20 @@ int console_trylock(void)
return 0;
}
console_locked = 1;
console_may_schedule = 0;
/*
* When PREEMPT_COUNT disabled we can't reliably detect if it's
* safe to schedule (e.g. calling printk while holding a spin_lock),
* because preempt_disable()/preempt_enable() are just barriers there
* and preempt_count() is always 0.
*
* RCU read sections have a separate preemption counter when
* PREEMPT_RCU enabled thus we must take extra care and check
* rcu_preempt_depth(), otherwise RCU read sections modify
* preempt_count().
*/
console_may_schedule = !oops_in_progress &&
preemptible() &&
!rcu_preempt_depth();
return 1;
}
EXPORT_SYMBOL(console_trylock);
@@ -2184,6 +2136,34 @@ int is_console_locked(void)
return console_locked;
}
/*
* Check if we have any console that is capable of printing while cpu is
* booting or shutting down. Requires console_sem.
*/
static int have_callable_console(void)
{
struct console *con;
for_each_console(con)
if ((con->flags & CON_ENABLED) &&
(con->flags & CON_ANYTIME))
return 1;
return 0;
}
/*
* Can we actually use the console at this time on this cpu?
*
* Console drivers may assume that per-cpu resources have been allocated. So
* unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
* call them until this CPU is officially up.
*/
static inline int can_use_console(void)
{
return cpu_online(raw_smp_processor_id()) || have_callable_console();
}
static void console_cont_flush(char *text, size_t size)
{
unsigned long flags;
@@ -2254,9 +2234,21 @@ void console_unlock(void)
do_cond_resched = console_may_schedule;
console_may_schedule = 0;
again:
/*
* We released the console_sem lock, so we need to recheck if
* cpu is online and (if not) is there at least one CON_ANYTIME
* console.
*/
if (!can_use_console()) {
console_locked = 0;
up_console_sem();
return;
}
/* flush buffered message fragment immediately to console */
console_cont_flush(text, sizeof(text));
again:
for (;;) {
struct printk_log *msg;
size_t ext_len = 0;

View File

@@ -2169,7 +2169,10 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = perf_event_task_enable();
break;
case PR_GET_TIMERSLACK:
error = current->timer_slack_ns;
if (current->timer_slack_ns > ULONG_MAX)
error = ULONG_MAX;
else
error = current->timer_slack_ns;
break;
case PR_SET_TIMERSLACK:
if (arg2 <= 0)

View File

@@ -126,6 +126,7 @@ static int __maybe_unused two = 2;
static int __maybe_unused four = 4;
static unsigned long one_ul = 1;
static int one_hundred = 100;
static int one_thousand = 1000;
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@@ -1403,6 +1404,15 @@ static struct ctl_table vm_table[] = {
.proc_handler = min_free_kbytes_sysctl_handler,
.extra1 = &zero,
},
{
.procname = "watermark_scale_factor",
.data = &watermark_scale_factor,
.maxlen = sizeof(watermark_scale_factor),
.mode = 0644,
.proc_handler = watermark_scale_factor_sysctl_handler,
.extra1 = &one,
.extra2 = &one_thousand,
},
{
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,

View File

@@ -515,7 +515,7 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
/*
* High resolution timer enabled ?
*/
static int hrtimer_hres_enabled __read_mostly = 1;
static bool hrtimer_hres_enabled __read_mostly = true;
unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
EXPORT_SYMBOL_GPL(hrtimer_resolution);
@@ -524,13 +524,7 @@ EXPORT_SYMBOL_GPL(hrtimer_resolution);
*/
static int __init setup_hrtimer_hres(char *str)
{
if (!strcmp(str, "off"))
hrtimer_hres_enabled = 0;
else if (!strcmp(str, "on"))
hrtimer_hres_enabled = 1;
else
return 0;
return 1;
return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
}
__setup("highres=", setup_hrtimer_hres);
@@ -979,7 +973,7 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
* relative (HRTIMER_MODE_REL)
*/
void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unsigned long delta_ns, const enum hrtimer_mode mode)
u64 delta_ns, const enum hrtimer_mode mode)
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
@@ -1548,7 +1542,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
struct restart_block *restart;
struct hrtimer_sleeper t;
int ret = 0;
unsigned long slack;
u64 slack;
slack = current->timer_slack_ns;
if (dl_task(current) || rt_task(current))
@@ -1724,7 +1718,7 @@ void __init hrtimers_init(void)
* @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
*/
int __sched
schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode, int clock)
{
struct hrtimer_sleeper t;
@@ -1792,7 +1786,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
*
* Returns 0 when the timer has expired otherwise -EINTR
*/
int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode)
{
return schedule_hrtimeout_range_clock(expires, delta, mode,

View File

@@ -486,20 +486,14 @@ void __init tick_nohz_init(void)
/*
* NO HZ enabled ?
*/
int tick_nohz_enabled __read_mostly = 1;
bool tick_nohz_enabled __read_mostly = true;
unsigned long tick_nohz_active __read_mostly;
/*
* Enable / Disable tickless mode
*/
static int __init setup_tick_nohz(char *str)
{
if (!strcmp(str, "off"))
tick_nohz_enabled = 0;
else if (!strcmp(str, "on"))
tick_nohz_enabled = 1;
else
return 0;
return 1;
return (kstrtobool(str, &tick_nohz_enabled) == 0);
}
__setup("nohz=", setup_tick_nohz);

View File

@@ -1698,10 +1698,10 @@ EXPORT_SYMBOL(msleep_interruptible);
static void __sched do_usleep_range(unsigned long min, unsigned long max)
{
ktime_t kmin;
unsigned long delta;
u64 delta;
kmin = ktime_set(0, min * NSEC_PER_USEC);
delta = (max - min) * NSEC_PER_USEC;
delta = (u64)(max - min) * NSEC_PER_USEC;
schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
}

View File

@@ -923,6 +923,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
* both lockup detectors are disabled if proc_watchdog_update()
* returns an error.
*/
if (old == new)
goto out;
err = proc_watchdog_update();
}
out:
@@ -967,7 +970,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
int proc_watchdog_thresh(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int err, old;
int err, old, new;
get_online_cpus();
mutex_lock(&watchdog_proc_mutex);
@@ -987,6 +990,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
/*
* Update the sample period. Restore on failure.
*/
new = ACCESS_ONCE(watchdog_thresh);
if (old == new)
goto out;
set_sample_period();
err = proc_watchdog_update();
if (err) {