Merge branch 'akpm' (patchbomb from Andrew) into next
Merge misc updates from Andrew Morton: - a few fixes for 3.16. Cc'ed to stable so they'll get there somehow. - various misc fixes and cleanups - most of the ocfs2 queue. Review is slow... - most of MM. The MM queue is pretty huge this time, but not much in the way of feature work. - some tweaks under kernel/ - printk maintenance work - updates to lib/ - checkpatch updates - tweaks to init/ * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (276 commits) fs/autofs4/dev-ioctl.c: add __init to autofs_dev_ioctl_init fs/ncpfs/getopt.c: replace simple_strtoul by kstrtoul init/main.c: remove an ifdef kthreads: kill CLONE_KERNEL, change kernel_thread(kernel_init) to avoid CLONE_SIGHAND init/main.c: add initcall_blacklist kernel parameter init/main.c: don't use pr_debug() fs/binfmt_flat.c: make old_reloc() static fs/binfmt_elf.c: fix bool assignements fs/efs: convert printk(KERN_DEBUG to pr_debug fs/efs: add pr_fmt / use __func__ fs/efs: convert printk to pr_foo() scripts/checkpatch.pl: device_initcall is not the only __initcall substitute checkpatch: check stable email address checkpatch: warn on unnecessary void function return statements checkpatch: prefer kstrto<foo> to sscanf(buf, "%<lhuidx>", &bar); checkpatch: add warning for kmalloc/kzalloc with multiply checkpatch: warn on #defines ending in semicolon checkpatch: make --strict a default for files in drivers/net and net/ checkpatch: always warn on missing blank line after variable declaration block checkpatch: fix wildcard DT compatible string checking ...
This commit is contained in:
@@ -19,8 +19,8 @@
|
||||
|
||||
static void backtrace_test_normal(void)
|
||||
{
|
||||
printk("Testing a backtrace from process context.\n");
|
||||
printk("The following trace is a kernel self test and not a bug!\n");
|
||||
pr_info("Testing a backtrace from process context.\n");
|
||||
pr_info("The following trace is a kernel self test and not a bug!\n");
|
||||
|
||||
dump_stack();
|
||||
}
|
||||
@@ -37,8 +37,8 @@ static DECLARE_TASKLET(backtrace_tasklet, &backtrace_test_irq_callback, 0);
|
||||
|
||||
static void backtrace_test_irq(void)
|
||||
{
|
||||
printk("Testing a backtrace from irq context.\n");
|
||||
printk("The following trace is a kernel self test and not a bug!\n");
|
||||
pr_info("Testing a backtrace from irq context.\n");
|
||||
pr_info("The following trace is a kernel self test and not a bug!\n");
|
||||
|
||||
init_completion(&backtrace_work);
|
||||
tasklet_schedule(&backtrace_tasklet);
|
||||
@@ -51,8 +51,8 @@ static void backtrace_test_saved(void)
|
||||
struct stack_trace trace;
|
||||
unsigned long entries[8];
|
||||
|
||||
printk("Testing a saved backtrace.\n");
|
||||
printk("The following trace is a kernel self test and not a bug!\n");
|
||||
pr_info("Testing a saved backtrace.\n");
|
||||
pr_info("The following trace is a kernel self test and not a bug!\n");
|
||||
|
||||
trace.nr_entries = 0;
|
||||
trace.max_entries = ARRAY_SIZE(entries);
|
||||
@@ -65,19 +65,19 @@ static void backtrace_test_saved(void)
|
||||
#else
|
||||
static void backtrace_test_saved(void)
|
||||
{
|
||||
printk("Saved backtrace test skipped.\n");
|
||||
pr_info("Saved backtrace test skipped.\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
static int backtrace_regression_test(void)
|
||||
{
|
||||
printk("====[ backtrace testing ]===========\n");
|
||||
pr_info("====[ backtrace testing ]===========\n");
|
||||
|
||||
backtrace_test_normal();
|
||||
backtrace_test_irq();
|
||||
backtrace_test_saved();
|
||||
|
||||
printk("====[ end of backtrace testing ]====\n");
|
||||
pr_info("====[ end of backtrace testing ]====\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -24,7 +24,6 @@
|
||||
*/
|
||||
|
||||
const kernel_cap_t __cap_empty_set = CAP_EMPTY_SET;
|
||||
|
||||
EXPORT_SYMBOL(__cap_empty_set);
|
||||
|
||||
int file_caps_enabled = 1;
|
||||
@@ -189,7 +188,7 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
|
||||
*
|
||||
* An alternative would be to return an error here
|
||||
* (-ERANGE), but that causes legacy applications to
|
||||
* unexpectidly fail; the capget/modify/capset aborts
|
||||
* unexpectedly fail; the capget/modify/capset aborts
|
||||
* before modification is attempted and the application
|
||||
* fails.
|
||||
*/
|
||||
@@ -395,7 +394,8 @@ EXPORT_SYMBOL(ns_capable);
|
||||
* This does not set PF_SUPERPRIV because the caller may not
|
||||
* actually be privileged.
|
||||
*/
|
||||
bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap)
|
||||
bool file_ns_capable(const struct file *file, struct user_namespace *ns,
|
||||
int cap)
|
||||
{
|
||||
if (WARN_ON_ONCE(!cap_valid(cap)))
|
||||
return false;
|
||||
|
@@ -157,7 +157,7 @@ static int __compat_put_timespec(const struct timespec *ts, struct compat_timesp
|
||||
int compat_get_timeval(struct timeval *tv, const void __user *utv)
|
||||
{
|
||||
if (COMPAT_USE_64BIT_TIME)
|
||||
return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0;
|
||||
return copy_from_user(tv, utv, sizeof(*tv)) ? -EFAULT : 0;
|
||||
else
|
||||
return __compat_get_timeval(tv, utv);
|
||||
}
|
||||
@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(compat_get_timeval);
|
||||
int compat_put_timeval(const struct timeval *tv, void __user *utv)
|
||||
{
|
||||
if (COMPAT_USE_64BIT_TIME)
|
||||
return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0;
|
||||
return copy_to_user(utv, tv, sizeof(*tv)) ? -EFAULT : 0;
|
||||
else
|
||||
return __compat_put_timeval(tv, utv);
|
||||
}
|
||||
@@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(compat_put_timeval);
|
||||
int compat_get_timespec(struct timespec *ts, const void __user *uts)
|
||||
{
|
||||
if (COMPAT_USE_64BIT_TIME)
|
||||
return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0;
|
||||
return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
|
||||
else
|
||||
return __compat_get_timespec(ts, uts);
|
||||
}
|
||||
@@ -184,7 +184,7 @@ EXPORT_SYMBOL_GPL(compat_get_timespec);
|
||||
int compat_put_timespec(const struct timespec *ts, void __user *uts)
|
||||
{
|
||||
if (COMPAT_USE_64BIT_TIME)
|
||||
return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0;
|
||||
return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
|
||||
else
|
||||
return __compat_put_timespec(ts, uts);
|
||||
}
|
||||
|
31
kernel/cpu.c
31
kernel/cpu.c
@@ -283,8 +283,7 @@ static inline void check_for_tasks(int cpu)
|
||||
task_cputime(p, &utime, &stime);
|
||||
if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
|
||||
(utime || stime))
|
||||
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
|
||||
"(state = %ld, flags = %x)\n",
|
||||
pr_warn("Task %s (pid = %d) is on cpu %d (state = %ld, flags = %x)\n",
|
||||
p->comm, task_pid_nr(p), cpu,
|
||||
p->state, p->flags);
|
||||
}
|
||||
@@ -336,8 +335,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
if (err) {
|
||||
nr_calls--;
|
||||
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
|
||||
printk("%s: attempt to take down CPU %u failed\n",
|
||||
__func__, cpu);
|
||||
pr_warn("%s: attempt to take down CPU %u failed\n",
|
||||
__func__, cpu);
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
@@ -444,8 +443,8 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
|
||||
ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
|
||||
if (ret) {
|
||||
nr_calls--;
|
||||
printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
|
||||
__func__, cpu);
|
||||
pr_warn("%s: attempt to bring up CPU %u failed\n",
|
||||
__func__, cpu);
|
||||
goto out_notify;
|
||||
}
|
||||
|
||||
@@ -475,11 +474,10 @@ int cpu_up(unsigned int cpu)
|
||||
int err = 0;
|
||||
|
||||
if (!cpu_possible(cpu)) {
|
||||
printk(KERN_ERR "can't online cpu %d because it is not "
|
||||
"configured as may-hotadd at boot time\n", cpu);
|
||||
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
|
||||
cpu);
|
||||
#if defined(CONFIG_IA64)
|
||||
printk(KERN_ERR "please check additional_cpus= boot "
|
||||
"parameter\n");
|
||||
pr_err("please check additional_cpus= boot parameter\n");
|
||||
#endif
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -518,7 +516,7 @@ int disable_nonboot_cpus(void)
|
||||
*/
|
||||
cpumask_clear(frozen_cpus);
|
||||
|
||||
printk("Disabling non-boot CPUs ...\n");
|
||||
pr_info("Disabling non-boot CPUs ...\n");
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == first_cpu)
|
||||
continue;
|
||||
@@ -526,8 +524,7 @@ int disable_nonboot_cpus(void)
|
||||
if (!error)
|
||||
cpumask_set_cpu(cpu, frozen_cpus);
|
||||
else {
|
||||
printk(KERN_ERR "Error taking CPU%d down: %d\n",
|
||||
cpu, error);
|
||||
pr_err("Error taking CPU%d down: %d\n", cpu, error);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -537,7 +534,7 @@ int disable_nonboot_cpus(void)
|
||||
/* Make sure the CPUs won't be enabled by someone else */
|
||||
cpu_hotplug_disabled = 1;
|
||||
} else {
|
||||
printk(KERN_ERR "Non-boot CPUs are not disabled\n");
|
||||
pr_err("Non-boot CPUs are not disabled\n");
|
||||
}
|
||||
cpu_maps_update_done();
|
||||
return error;
|
||||
@@ -561,17 +558,17 @@ void __ref enable_nonboot_cpus(void)
|
||||
if (cpumask_empty(frozen_cpus))
|
||||
goto out;
|
||||
|
||||
printk(KERN_INFO "Enabling non-boot CPUs ...\n");
|
||||
pr_info("Enabling non-boot CPUs ...\n");
|
||||
|
||||
arch_enable_nonboot_cpus_begin();
|
||||
|
||||
for_each_cpu(cpu, frozen_cpus) {
|
||||
error = _cpu_up(cpu, 1);
|
||||
if (!error) {
|
||||
printk(KERN_INFO "CPU%d is up\n", cpu);
|
||||
pr_info("CPU%d is up\n", cpu);
|
||||
continue;
|
||||
}
|
||||
printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
|
||||
pr_warn("Error taking CPU%d up: %d\n", cpu, error);
|
||||
}
|
||||
|
||||
arch_enable_nonboot_cpus_end();
|
||||
|
@@ -61,12 +61,7 @@
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
/*
|
||||
* Tracks how many cpusets are currently defined in system.
|
||||
* When there is only one cpuset (the root cpuset) we can
|
||||
* short circuit some hooks.
|
||||
*/
|
||||
int number_of_cpusets __read_mostly;
|
||||
struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
|
||||
|
||||
/* See "Frequency meter" comments, below. */
|
||||
|
||||
@@ -611,7 +606,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
|
||||
goto done;
|
||||
}
|
||||
|
||||
csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
|
||||
csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
|
||||
if (!csa)
|
||||
goto done;
|
||||
csn = 0;
|
||||
@@ -1888,7 +1883,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
|
||||
if (is_spread_slab(parent))
|
||||
set_bit(CS_SPREAD_SLAB, &cs->flags);
|
||||
|
||||
number_of_cpusets++;
|
||||
cpuset_inc();
|
||||
|
||||
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
|
||||
goto out_unlock;
|
||||
@@ -1939,7 +1934,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
|
||||
if (is_sched_load_balance(cs))
|
||||
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
|
||||
|
||||
number_of_cpusets--;
|
||||
cpuset_dec();
|
||||
clear_bit(CS_ONLINE, &cs->flags);
|
||||
|
||||
mutex_unlock(&cpuset_mutex);
|
||||
@@ -1992,7 +1987,6 @@ int __init cpuset_init(void)
|
||||
if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
|
||||
BUG();
|
||||
|
||||
number_of_cpusets = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -21,7 +21,7 @@
|
||||
static void kdb_show_stack(struct task_struct *p, void *addr)
|
||||
{
|
||||
int old_lvl = console_loglevel;
|
||||
console_loglevel = 15;
|
||||
console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
|
||||
kdb_trap_printk++;
|
||||
kdb_set_current_task(p);
|
||||
if (addr) {
|
||||
|
@@ -710,7 +710,7 @@ kdb_printit:
|
||||
}
|
||||
if (logging) {
|
||||
saved_loglevel = console_loglevel;
|
||||
console_loglevel = 0;
|
||||
console_loglevel = CONSOLE_LOGLEVEL_SILENT;
|
||||
printk(KERN_INFO "%s", kdb_buffer);
|
||||
}
|
||||
|
||||
|
@@ -1091,7 +1091,7 @@ static int kdb_reboot(int argc, const char **argv)
|
||||
static void kdb_dumpregs(struct pt_regs *regs)
|
||||
{
|
||||
int old_lvl = console_loglevel;
|
||||
console_loglevel = 15;
|
||||
console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
|
||||
kdb_trap_printk++;
|
||||
show_regs(regs);
|
||||
kdb_trap_printk--;
|
||||
|
@@ -37,7 +37,7 @@ static unsigned long ident_map[32] = {
|
||||
struct exec_domain default_exec_domain = {
|
||||
.name = "Linux", /* name */
|
||||
.handler = default_handler, /* lcall7 causes a seg fault. */
|
||||
.pers_low = 0, /* PER_LINUX personality. */
|
||||
.pers_low = 0, /* PER_LINUX personality. */
|
||||
.pers_high = 0, /* PER_LINUX personality. */
|
||||
.signal_map = ident_map, /* Identity map signals. */
|
||||
.signal_invmap = ident_map, /* - both ways. */
|
||||
@@ -83,7 +83,7 @@ lookup_exec_domain(unsigned int personality)
|
||||
ep = &default_exec_domain;
|
||||
out:
|
||||
read_unlock(&exec_domains_lock);
|
||||
return (ep);
|
||||
return ep;
|
||||
}
|
||||
|
||||
int
|
||||
@@ -110,8 +110,9 @@ register_exec_domain(struct exec_domain *ep)
|
||||
|
||||
out:
|
||||
write_unlock(&exec_domains_lock);
|
||||
return (err);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(register_exec_domain);
|
||||
|
||||
int
|
||||
unregister_exec_domain(struct exec_domain *ep)
|
||||
@@ -133,6 +134,7 @@ unregister:
|
||||
write_unlock(&exec_domains_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_exec_domain);
|
||||
|
||||
int __set_personality(unsigned int personality)
|
||||
{
|
||||
@@ -144,6 +146,7 @@ int __set_personality(unsigned int personality)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(__set_personality);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static int execdomains_proc_show(struct seq_file *m, void *v)
|
||||
@@ -188,8 +191,3 @@ SYSCALL_DEFINE1(personality, unsigned int, personality)
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
|
||||
EXPORT_SYMBOL(register_exec_domain);
|
||||
EXPORT_SYMBOL(unregister_exec_domain);
|
||||
EXPORT_SYMBOL(__set_personality);
|
||||
|
@@ -352,7 +352,7 @@ int disallow_signal(int sig)
|
||||
|
||||
EXPORT_SYMBOL(disallow_signal);
|
||||
|
||||
#ifdef CONFIG_MM_OWNER
|
||||
#ifdef CONFIG_MEMCG
|
||||
/*
|
||||
* A task is exiting. If it owned this mm, find a new owner for the mm.
|
||||
*/
|
||||
@@ -395,14 +395,18 @@ retry:
|
||||
}
|
||||
|
||||
/*
|
||||
* Search through everything else. We should not get
|
||||
* here often
|
||||
* Search through everything else, we should not get here often.
|
||||
*/
|
||||
do_each_thread(g, c) {
|
||||
if (c->mm == mm)
|
||||
goto assign_new_owner;
|
||||
} while_each_thread(g, c);
|
||||
|
||||
for_each_process(g) {
|
||||
if (g->flags & PF_KTHREAD)
|
||||
continue;
|
||||
for_each_thread(g, c) {
|
||||
if (c->mm == mm)
|
||||
goto assign_new_owner;
|
||||
if (c->mm)
|
||||
break;
|
||||
}
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
/*
|
||||
* We found no owner yet mm_users > 1: this implies that we are
|
||||
@@ -434,7 +438,7 @@ assign_new_owner:
|
||||
task_unlock(c);
|
||||
put_task_struct(c);
|
||||
}
|
||||
#endif /* CONFIG_MM_OWNER */
|
||||
#endif /* CONFIG_MEMCG */
|
||||
|
||||
/*
|
||||
* Turn us into a lazy TLB process if we
|
||||
|
@@ -150,15 +150,15 @@ void __weak arch_release_thread_info(struct thread_info *ti)
|
||||
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
|
||||
int node)
|
||||
{
|
||||
struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED,
|
||||
THREAD_SIZE_ORDER);
|
||||
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
|
||||
THREAD_SIZE_ORDER);
|
||||
|
||||
return page ? page_address(page) : NULL;
|
||||
}
|
||||
|
||||
static inline void free_thread_info(struct thread_info *ti)
|
||||
{
|
||||
free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
|
||||
free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
|
||||
}
|
||||
# else
|
||||
static struct kmem_cache *thread_info_cache;
|
||||
@@ -1099,12 +1099,12 @@ static void rt_mutex_init_task(struct task_struct *p)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MM_OWNER
|
||||
#ifdef CONFIG_MEMCG
|
||||
void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
|
||||
{
|
||||
mm->owner = p;
|
||||
}
|
||||
#endif /* CONFIG_MM_OWNER */
|
||||
#endif /* CONFIG_MEMCG */
|
||||
|
||||
/*
|
||||
* Initialize POSIX timer handling for a single task.
|
||||
|
@@ -52,8 +52,10 @@ unsigned int __read_mostly sysctl_hung_task_panic =
|
||||
|
||||
static int __init hung_task_panic_setup(char *str)
|
||||
{
|
||||
sysctl_hung_task_panic = simple_strtoul(str, NULL, 0);
|
||||
int rc = kstrtouint(str, 0, &sysctl_hung_task_panic);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
return 1;
|
||||
}
|
||||
__setup("hung_task_panic=", hung_task_panic_setup);
|
||||
|
@@ -262,7 +262,7 @@ static void create_kthread(struct kthread_create_info *create)
|
||||
* kthread_stop() has been called). The return value should be zero
|
||||
* or a negative error number; it will be passed to kthread_stop().
|
||||
*
|
||||
* Returns a task_struct or ERR_PTR(-ENOMEM).
|
||||
* Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
|
||||
*/
|
||||
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
||||
void *data, int node,
|
||||
@@ -298,7 +298,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
||||
* that thread.
|
||||
*/
|
||||
if (xchg(&create->done, NULL))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return ERR_PTR(-EINTR);
|
||||
/*
|
||||
* kthreadd (or new kernel thread) will call complete()
|
||||
* shortly.
|
||||
|
@@ -88,7 +88,8 @@ static void clear_global_latency_tracing(void)
|
||||
}
|
||||
|
||||
static void __sched
|
||||
account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat)
|
||||
account_global_scheduler_latency(struct task_struct *tsk,
|
||||
struct latency_record *lat)
|
||||
{
|
||||
int firstnonnull = MAXLR + 1;
|
||||
int i;
|
||||
@@ -255,7 +256,7 @@ static int lstats_show(struct seq_file *m, void *v)
|
||||
break;
|
||||
seq_printf(m, " %ps", (void *)bt);
|
||||
}
|
||||
seq_printf(m, "\n");
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@@ -54,20 +54,16 @@
|
||||
#include "console_cmdline.h"
|
||||
#include "braille.h"
|
||||
|
||||
/* printk's without a loglevel use this.. */
|
||||
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
|
||||
|
||||
/* We show everything that is MORE important than this.. */
|
||||
#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
|
||||
#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
|
||||
|
||||
int console_printk[4] = {
|
||||
DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
|
||||
CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
|
||||
DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
|
||||
MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */
|
||||
DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
|
||||
CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
|
||||
CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
|
||||
};
|
||||
|
||||
/* Deferred messaged from sched code are marked by this special level */
|
||||
#define SCHED_MESSAGE_LOGLEVEL -2
|
||||
|
||||
/*
|
||||
* Low level drivers may need that to know if they can schedule in
|
||||
* their unblank() callback or not. So let's export it.
|
||||
@@ -90,6 +86,29 @@ static struct lockdep_map console_lock_dep_map = {
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helper macros to handle lockdep when locking/unlocking console_sem. We use
|
||||
* macros instead of functions so that _RET_IP_ contains useful information.
|
||||
*/
|
||||
#define down_console_sem() do { \
|
||||
down(&console_sem);\
|
||||
mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
|
||||
} while (0)
|
||||
|
||||
static int __down_trylock_console_sem(unsigned long ip)
|
||||
{
|
||||
if (down_trylock(&console_sem))
|
||||
return 1;
|
||||
mutex_acquire(&console_lock_dep_map, 0, 1, ip);
|
||||
return 0;
|
||||
}
|
||||
#define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
|
||||
|
||||
#define up_console_sem() do { \
|
||||
mutex_release(&console_lock_dep_map, 1, _RET_IP_);\
|
||||
up(&console_sem);\
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* This is used for debugging the mess that is the VT code by
|
||||
* keeping track if we have the console semaphore held. It's
|
||||
@@ -206,8 +225,9 @@ struct printk_log {
|
||||
};
|
||||
|
||||
/*
|
||||
* The logbuf_lock protects kmsg buffer, indices, counters. It is also
|
||||
* used in interesting ways to provide interlocking in console_unlock();
|
||||
* The logbuf_lock protects kmsg buffer, indices, counters. This can be taken
|
||||
* within the scheduler's rq lock. It must be released before calling
|
||||
* console_unlock() or anything else that might wake up a process.
|
||||
*/
|
||||
static DEFINE_RAW_SPINLOCK(logbuf_lock);
|
||||
|
||||
@@ -250,9 +270,6 @@ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
|
||||
static char *log_buf = __log_buf;
|
||||
static u32 log_buf_len = __LOG_BUF_LEN;
|
||||
|
||||
/* cpu currently holding logbuf_lock */
|
||||
static volatile unsigned int logbuf_cpu = UINT_MAX;
|
||||
|
||||
/* human readable text of the record */
|
||||
static char *log_text(const struct printk_log *msg)
|
||||
{
|
||||
@@ -297,34 +314,106 @@ static u32 log_next(u32 idx)
|
||||
return idx + msg->len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether there is enough free space for the given message.
|
||||
*
|
||||
* The same values of first_idx and next_idx mean that the buffer
|
||||
* is either empty or full.
|
||||
*
|
||||
* If the buffer is empty, we must respect the position of the indexes.
|
||||
* They cannot be reset to the beginning of the buffer.
|
||||
*/
|
||||
static int logbuf_has_space(u32 msg_size, bool empty)
|
||||
{
|
||||
u32 free;
|
||||
|
||||
if (log_next_idx > log_first_idx || empty)
|
||||
free = max(log_buf_len - log_next_idx, log_first_idx);
|
||||
else
|
||||
free = log_first_idx - log_next_idx;
|
||||
|
||||
/*
|
||||
* We need space also for an empty header that signalizes wrapping
|
||||
* of the buffer.
|
||||
*/
|
||||
return free >= msg_size + sizeof(struct printk_log);
|
||||
}
|
||||
|
||||
static int log_make_free_space(u32 msg_size)
|
||||
{
|
||||
while (log_first_seq < log_next_seq) {
|
||||
if (logbuf_has_space(msg_size, false))
|
||||
return 0;
|
||||
/* drop old messages until we have enough continuous space */
|
||||
log_first_idx = log_next(log_first_idx);
|
||||
log_first_seq++;
|
||||
}
|
||||
|
||||
/* sequence numbers are equal, so the log buffer is empty */
|
||||
if (logbuf_has_space(msg_size, true))
|
||||
return 0;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* compute the message size including the padding bytes */
|
||||
static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len)
|
||||
{
|
||||
u32 size;
|
||||
|
||||
size = sizeof(struct printk_log) + text_len + dict_len;
|
||||
*pad_len = (-size) & (LOG_ALIGN - 1);
|
||||
size += *pad_len;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Define how much of the log buffer we could take at maximum. The value
|
||||
* must be greater than two. Note that only half of the buffer is available
|
||||
* when the index points to the middle.
|
||||
*/
|
||||
#define MAX_LOG_TAKE_PART 4
|
||||
static const char trunc_msg[] = "<truncated>";
|
||||
|
||||
static u32 truncate_msg(u16 *text_len, u16 *trunc_msg_len,
|
||||
u16 *dict_len, u32 *pad_len)
|
||||
{
|
||||
/*
|
||||
* The message should not take the whole buffer. Otherwise, it might
|
||||
* get removed too soon.
|
||||
*/
|
||||
u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
|
||||
if (*text_len > max_text_len)
|
||||
*text_len = max_text_len;
|
||||
/* enable the warning message */
|
||||
*trunc_msg_len = strlen(trunc_msg);
|
||||
/* disable the "dict" completely */
|
||||
*dict_len = 0;
|
||||
/* compute the size again, count also the warning message */
|
||||
return msg_used_size(*text_len + *trunc_msg_len, 0, pad_len);
|
||||
}
|
||||
|
||||
/* insert record into the buffer, discard old ones, update heads */
|
||||
static void log_store(int facility, int level,
|
||||
enum log_flags flags, u64 ts_nsec,
|
||||
const char *dict, u16 dict_len,
|
||||
const char *text, u16 text_len)
|
||||
static int log_store(int facility, int level,
|
||||
enum log_flags flags, u64 ts_nsec,
|
||||
const char *dict, u16 dict_len,
|
||||
const char *text, u16 text_len)
|
||||
{
|
||||
struct printk_log *msg;
|
||||
u32 size, pad_len;
|
||||
u16 trunc_msg_len = 0;
|
||||
|
||||
/* number of '\0' padding bytes to next message */
|
||||
size = sizeof(struct printk_log) + text_len + dict_len;
|
||||
pad_len = (-size) & (LOG_ALIGN - 1);
|
||||
size += pad_len;
|
||||
size = msg_used_size(text_len, dict_len, &pad_len);
|
||||
|
||||
while (log_first_seq < log_next_seq) {
|
||||
u32 free;
|
||||
|
||||
if (log_next_idx > log_first_idx)
|
||||
free = max(log_buf_len - log_next_idx, log_first_idx);
|
||||
else
|
||||
free = log_first_idx - log_next_idx;
|
||||
|
||||
if (free >= size + sizeof(struct printk_log))
|
||||
break;
|
||||
|
||||
/* drop old messages until we have enough contiuous space */
|
||||
log_first_idx = log_next(log_first_idx);
|
||||
log_first_seq++;
|
||||
if (log_make_free_space(size)) {
|
||||
/* truncate the message if it is too long for empty buffer */
|
||||
size = truncate_msg(&text_len, &trunc_msg_len,
|
||||
&dict_len, &pad_len);
|
||||
/* survive when the log buffer is too small for trunc_msg */
|
||||
if (log_make_free_space(size))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) {
|
||||
@@ -341,6 +430,10 @@ static void log_store(int facility, int level,
|
||||
msg = (struct printk_log *)(log_buf + log_next_idx);
|
||||
memcpy(log_text(msg), text, text_len);
|
||||
msg->text_len = text_len;
|
||||
if (trunc_msg_len) {
|
||||
memcpy(log_text(msg) + text_len, trunc_msg, trunc_msg_len);
|
||||
msg->text_len += trunc_msg_len;
|
||||
}
|
||||
memcpy(log_dict(msg), dict, dict_len);
|
||||
msg->dict_len = dict_len;
|
||||
msg->facility = facility;
|
||||
@@ -356,6 +449,8 @@ static void log_store(int facility, int level,
|
||||
/* insert message */
|
||||
log_next_idx += msg->len;
|
||||
log_next_seq++;
|
||||
|
||||
return msg->text_len;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SECURITY_DMESG_RESTRICT
|
||||
@@ -1303,7 +1398,10 @@ static void zap_locks(void)
|
||||
sema_init(&console_sem, 1);
|
||||
}
|
||||
|
||||
/* Check if we have any console registered that can be called early in boot. */
|
||||
/*
|
||||
* Check if we have any console that is capable of printing while cpu is
|
||||
* booting or shutting down. Requires console_sem.
|
||||
*/
|
||||
static int have_callable_console(void)
|
||||
{
|
||||
struct console *con;
|
||||
@@ -1318,10 +1416,9 @@ static int have_callable_console(void)
|
||||
/*
|
||||
* Can we actually use the console at this time on this cpu?
|
||||
*
|
||||
* Console drivers may assume that per-cpu resources have
|
||||
* been allocated. So unless they're explicitly marked as
|
||||
* being able to cope (CON_ANYTIME) don't call them until
|
||||
* this CPU is officially up.
|
||||
* Console drivers may assume that per-cpu resources have been allocated. So
|
||||
* unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
|
||||
* call them until this CPU is officially up.
|
||||
*/
|
||||
static inline int can_use_console(unsigned int cpu)
|
||||
{
|
||||
@@ -1333,36 +1430,24 @@ static inline int can_use_console(unsigned int cpu)
|
||||
* messages from a 'printk'. Return true (and with the
|
||||
* console_lock held, and 'console_locked' set) if it
|
||||
* is successful, false otherwise.
|
||||
*
|
||||
* This gets called with the 'logbuf_lock' spinlock held and
|
||||
* interrupts disabled. It should return with 'lockbuf_lock'
|
||||
* released but interrupts still disabled.
|
||||
*/
|
||||
static int console_trylock_for_printk(unsigned int cpu)
|
||||
__releases(&logbuf_lock)
|
||||
static int console_trylock_for_printk(void)
|
||||
{
|
||||
int retval = 0, wake = 0;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (console_trylock()) {
|
||||
retval = 1;
|
||||
|
||||
/*
|
||||
* If we can't use the console, we need to release
|
||||
* the console semaphore by hand to avoid flushing
|
||||
* the buffer. We need to hold the console semaphore
|
||||
* in order to do this test safely.
|
||||
*/
|
||||
if (!can_use_console(cpu)) {
|
||||
console_locked = 0;
|
||||
wake = 1;
|
||||
retval = 0;
|
||||
}
|
||||
if (!console_trylock())
|
||||
return 0;
|
||||
/*
|
||||
* If we can't use the console, we need to release the console
|
||||
* semaphore by hand to avoid flushing the buffer. We need to hold the
|
||||
* console semaphore in order to do this test safely.
|
||||
*/
|
||||
if (!can_use_console(cpu)) {
|
||||
console_locked = 0;
|
||||
up_console_sem();
|
||||
return 0;
|
||||
}
|
||||
logbuf_cpu = UINT_MAX;
|
||||
raw_spin_unlock(&logbuf_lock);
|
||||
if (wake)
|
||||
up(&console_sem);
|
||||
return retval;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int printk_delay_msec __read_mostly;
|
||||
@@ -1490,11 +1575,19 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
static int recursion_bug;
|
||||
static char textbuf[LOG_LINE_MAX];
|
||||
char *text = textbuf;
|
||||
size_t text_len;
|
||||
size_t text_len = 0;
|
||||
enum log_flags lflags = 0;
|
||||
unsigned long flags;
|
||||
int this_cpu;
|
||||
int printed_len = 0;
|
||||
bool in_sched = false;
|
||||
/* cpu currently holding logbuf_lock in this function */
|
||||
static volatile unsigned int logbuf_cpu = UINT_MAX;
|
||||
|
||||
if (level == SCHED_MESSAGE_LOGLEVEL) {
|
||||
level = -1;
|
||||
in_sched = true;
|
||||
}
|
||||
|
||||
boot_delay_msec(level);
|
||||
printk_delay();
|
||||
@@ -1516,7 +1609,8 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
*/
|
||||
if (!oops_in_progress && !lockdep_recursing(current)) {
|
||||
recursion_bug = 1;
|
||||
goto out_restore_irqs;
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
zap_locks();
|
||||
}
|
||||
@@ -1530,17 +1624,22 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
"BUG: recent printk recursion!";
|
||||
|
||||
recursion_bug = 0;
|
||||
printed_len += strlen(recursion_msg);
|
||||
text_len = strlen(recursion_msg);
|
||||
/* emit KERN_CRIT message */
|
||||
log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
|
||||
NULL, 0, recursion_msg, printed_len);
|
||||
printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
|
||||
NULL, 0, recursion_msg, text_len);
|
||||
}
|
||||
|
||||
/*
|
||||
* The printf needs to come first; we need the syslog
|
||||
* prefix which might be passed-in as a parameter.
|
||||
*/
|
||||
text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
|
||||
if (in_sched)
|
||||
text_len = scnprintf(text, sizeof(textbuf),
|
||||
KERN_WARNING "[sched_delayed] ");
|
||||
|
||||
text_len += vscnprintf(text + text_len,
|
||||
sizeof(textbuf) - text_len, fmt, args);
|
||||
|
||||
/* mark and strip a trailing newline */
|
||||
if (text_len && text[text_len-1] == '\n') {
|
||||
@@ -1586,9 +1685,12 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
cont_flush(LOG_NEWLINE);
|
||||
|
||||
/* buffer line if possible, otherwise store it right away */
|
||||
if (!cont_add(facility, level, text, text_len))
|
||||
log_store(facility, level, lflags | LOG_CONT, 0,
|
||||
dict, dictlen, text, text_len);
|
||||
if (cont_add(facility, level, text, text_len))
|
||||
printed_len += text_len;
|
||||
else
|
||||
printed_len += log_store(facility, level,
|
||||
lflags | LOG_CONT, 0,
|
||||
dict, dictlen, text, text_len);
|
||||
} else {
|
||||
bool stored = false;
|
||||
|
||||
@@ -1607,26 +1709,35 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
cont_flush(LOG_NEWLINE);
|
||||
}
|
||||
|
||||
if (!stored)
|
||||
log_store(facility, level, lflags, 0,
|
||||
dict, dictlen, text, text_len);
|
||||
if (stored)
|
||||
printed_len += text_len;
|
||||
else
|
||||
printed_len += log_store(facility, level, lflags, 0,
|
||||
dict, dictlen, text, text_len);
|
||||
}
|
||||
printed_len += text_len;
|
||||
|
||||
logbuf_cpu = UINT_MAX;
|
||||
raw_spin_unlock(&logbuf_lock);
|
||||
lockdep_on();
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* If called from the scheduler, we can not call up(). */
|
||||
if (in_sched)
|
||||
return printed_len;
|
||||
|
||||
/*
|
||||
* Disable preemption to avoid being preempted while holding
|
||||
* console_sem which would prevent anyone from printing to console
|
||||
*/
|
||||
preempt_disable();
|
||||
/*
|
||||
* Try to acquire and then immediately release the console semaphore.
|
||||
* The release will print out buffers and wake up /dev/kmsg and syslog()
|
||||
* users.
|
||||
*
|
||||
* The console_trylock_for_printk() function will release 'logbuf_lock'
|
||||
* regardless of whether it actually gets the console semaphore or not.
|
||||
*/
|
||||
if (console_trylock_for_printk(this_cpu))
|
||||
if (console_trylock_for_printk())
|
||||
console_unlock();
|
||||
|
||||
lockdep_on();
|
||||
out_restore_irqs:
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
|
||||
return printed_len;
|
||||
}
|
||||
@@ -1882,16 +1993,14 @@ void suspend_console(void)
|
||||
printk("Suspending console(s) (use no_console_suspend to debug)\n");
|
||||
console_lock();
|
||||
console_suspended = 1;
|
||||
up(&console_sem);
|
||||
mutex_release(&console_lock_dep_map, 1, _RET_IP_);
|
||||
up_console_sem();
|
||||
}
|
||||
|
||||
void resume_console(void)
|
||||
{
|
||||
if (!console_suspend_enabled)
|
||||
return;
|
||||
down(&console_sem);
|
||||
mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
|
||||
down_console_sem();
|
||||
console_suspended = 0;
|
||||
console_unlock();
|
||||
}
|
||||
@@ -1933,12 +2042,11 @@ void console_lock(void)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
down(&console_sem);
|
||||
down_console_sem();
|
||||
if (console_suspended)
|
||||
return;
|
||||
console_locked = 1;
|
||||
console_may_schedule = 1;
|
||||
mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(console_lock);
|
||||
|
||||
@@ -1952,15 +2060,14 @@ EXPORT_SYMBOL(console_lock);
|
||||
*/
|
||||
int console_trylock(void)
|
||||
{
|
||||
if (down_trylock(&console_sem))
|
||||
if (down_trylock_console_sem())
|
||||
return 0;
|
||||
if (console_suspended) {
|
||||
up(&console_sem);
|
||||
up_console_sem();
|
||||
return 0;
|
||||
}
|
||||
console_locked = 1;
|
||||
console_may_schedule = 0;
|
||||
mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(console_trylock);
|
||||
@@ -2022,7 +2129,7 @@ void console_unlock(void)
|
||||
bool retry;
|
||||
|
||||
if (console_suspended) {
|
||||
up(&console_sem);
|
||||
up_console_sem();
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2043,10 +2150,15 @@ again:
|
||||
}
|
||||
|
||||
if (console_seq < log_first_seq) {
|
||||
len = sprintf(text, "** %u printk messages dropped ** ",
|
||||
(unsigned)(log_first_seq - console_seq));
|
||||
|
||||
/* messages are gone, move to first one */
|
||||
console_seq = log_first_seq;
|
||||
console_idx = log_first_idx;
|
||||
console_prev = 0;
|
||||
} else {
|
||||
len = 0;
|
||||
}
|
||||
skip:
|
||||
if (console_seq == log_next_seq)
|
||||
@@ -2071,8 +2183,8 @@ skip:
|
||||
}
|
||||
|
||||
level = msg->level;
|
||||
len = msg_print_text(msg, console_prev, false,
|
||||
text, sizeof(text));
|
||||
len += msg_print_text(msg, console_prev, false,
|
||||
text + len, sizeof(text) - len);
|
||||
console_idx = log_next(console_idx);
|
||||
console_seq++;
|
||||
console_prev = msg->flags;
|
||||
@@ -2084,7 +2196,6 @@ skip:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
console_locked = 0;
|
||||
mutex_release(&console_lock_dep_map, 1, _RET_IP_);
|
||||
|
||||
/* Release the exclusive_console once it is used */
|
||||
if (unlikely(exclusive_console))
|
||||
@@ -2092,7 +2203,7 @@ skip:
|
||||
|
||||
raw_spin_unlock(&logbuf_lock);
|
||||
|
||||
up(&console_sem);
|
||||
up_console_sem();
|
||||
|
||||
/*
|
||||
* Someone could have filled up the buffer again, so re-check if there's
|
||||
@@ -2137,7 +2248,7 @@ void console_unblank(void)
|
||||
* oops_in_progress is set to 1..
|
||||
*/
|
||||
if (oops_in_progress) {
|
||||
if (down_trylock(&console_sem) != 0)
|
||||
if (down_trylock_console_sem() != 0)
|
||||
return;
|
||||
} else
|
||||
console_lock();
|
||||
@@ -2438,21 +2549,19 @@ late_initcall(printk_late_init);
|
||||
/*
|
||||
* Delayed printk version, for scheduler-internal messages:
|
||||
*/
|
||||
#define PRINTK_BUF_SIZE 512
|
||||
|
||||
#define PRINTK_PENDING_WAKEUP 0x01
|
||||
#define PRINTK_PENDING_SCHED 0x02
|
||||
#define PRINTK_PENDING_OUTPUT 0x02
|
||||
|
||||
static DEFINE_PER_CPU(int, printk_pending);
|
||||
static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
|
||||
|
||||
static void wake_up_klogd_work_func(struct irq_work *irq_work)
|
||||
{
|
||||
int pending = __this_cpu_xchg(printk_pending, 0);
|
||||
|
||||
if (pending & PRINTK_PENDING_SCHED) {
|
||||
char *buf = __get_cpu_var(printk_sched_buf);
|
||||
pr_warn("[sched_delayed] %s", buf);
|
||||
if (pending & PRINTK_PENDING_OUTPUT) {
|
||||
/* If trylock fails, someone else is doing the printing */
|
||||
if (console_trylock())
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
if (pending & PRINTK_PENDING_WAKEUP)
|
||||
@@ -2474,23 +2583,19 @@ void wake_up_klogd(void)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
int printk_sched(const char *fmt, ...)
|
||||
int printk_deferred(const char *fmt, ...)
|
||||
{
|
||||
unsigned long flags;
|
||||
va_list args;
|
||||
char *buf;
|
||||
int r;
|
||||
|
||||
local_irq_save(flags);
|
||||
buf = __get_cpu_var(printk_sched_buf);
|
||||
|
||||
preempt_disable();
|
||||
va_start(args, fmt);
|
||||
r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
|
||||
r = vprintk_emit(0, SCHED_MESSAGE_LOGLEVEL, NULL, 0, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
__this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
|
||||
__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
|
||||
irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@@ -388,15 +388,22 @@ static int __init reboot_setup(char *str)
|
||||
break;
|
||||
|
||||
case 's':
|
||||
if (isdigit(*(str+1)))
|
||||
reboot_cpu = simple_strtoul(str+1, NULL, 0);
|
||||
else if (str[1] == 'm' && str[2] == 'p' &&
|
||||
isdigit(*(str+3)))
|
||||
reboot_cpu = simple_strtoul(str+3, NULL, 0);
|
||||
else
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (isdigit(*(str+1))) {
|
||||
rc = kstrtoint(str+1, 0, &reboot_cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else if (str[1] == 'm' && str[2] == 'p' &&
|
||||
isdigit(*(str+3))) {
|
||||
rc = kstrtoint(str+3, 0, &reboot_cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else
|
||||
reboot_mode = REBOOT_SOFT;
|
||||
break;
|
||||
|
||||
}
|
||||
case 'g':
|
||||
reboot_mode = REBOOT_GPIO;
|
||||
break;
|
||||
|
@@ -186,8 +186,11 @@ int res_counter_memparse_write_strategy(const char *buf,
|
||||
|
||||
/* return RES_COUNTER_MAX(unlimited) if "-1" is specified */
|
||||
if (*buf == '-') {
|
||||
res = simple_strtoull(buf + 1, &end, 10);
|
||||
if (res != 1 || *end != '\0')
|
||||
int rc = kstrtoull(buf + 1, 10, &res);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
if (res != 1)
|
||||
return -EINVAL;
|
||||
*resp = RES_COUNTER_MAX;
|
||||
return 0;
|
||||
|
@@ -1367,7 +1367,7 @@ out:
|
||||
* leave kernel.
|
||||
*/
|
||||
if (p->mm && printk_ratelimit()) {
|
||||
printk_sched("process %d (%s) no longer affine to cpu%d\n",
|
||||
printk_deferred("process %d (%s) no longer affine to cpu%d\n",
|
||||
task_pid_nr(p), p->comm, cpu);
|
||||
}
|
||||
}
|
||||
|
@@ -348,12 +348,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
|
||||
* entity.
|
||||
*/
|
||||
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
|
||||
static bool lag_once = false;
|
||||
|
||||
if (!lag_once) {
|
||||
lag_once = true;
|
||||
printk_sched("sched: DL replenish lagged to much\n");
|
||||
}
|
||||
printk_deferred_once("sched: DL replenish lagged to much\n");
|
||||
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
|
||||
dl_se->runtime = pi_se->dl_runtime;
|
||||
}
|
||||
|
@@ -890,14 +890,8 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
||||
* but accrue some time due to boosting.
|
||||
*/
|
||||
if (likely(rt_b->rt_runtime)) {
|
||||
static bool once = false;
|
||||
|
||||
rt_rq->rt_throttled = 1;
|
||||
|
||||
if (!once) {
|
||||
once = true;
|
||||
printk_sched("sched: RT throttling activated\n");
|
||||
}
|
||||
printk_deferred_once("sched: RT throttling activated\n");
|
||||
} else {
|
||||
/*
|
||||
* In case we did anyway, make it go away,
|
||||
|
@@ -3496,7 +3496,7 @@ COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __ARCH_WANT_SYS_SGETMASK
|
||||
#ifdef CONFIG_SGETMASK_SYSCALL
|
||||
|
||||
/*
|
||||
* For backwards compatibility. Functionality superseded by sigprocmask.
|
||||
@@ -3517,7 +3517,7 @@ SYSCALL_DEFINE1(ssetmask, int, newmask)
|
||||
|
||||
return old;
|
||||
}
|
||||
#endif /* __ARCH_WANT_SGETMASK */
|
||||
#endif /* CONFIG_SGETMASK_SYSCALL */
|
||||
|
||||
#ifdef __ARCH_WANT_SYS_SIGNAL
|
||||
/*
|
||||
|
@@ -307,6 +307,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
|
||||
* @cpu: cpu to stop
|
||||
* @fn: function to execute
|
||||
* @arg: argument to @fn
|
||||
* @work_buf: pointer to cpu_stop_work structure
|
||||
*
|
||||
* Similar to stop_one_cpu() but doesn't wait for completion. The
|
||||
* caller is responsible for ensuring @work_buf is currently unused
|
||||
|
@@ -135,6 +135,8 @@ cond_syscall(sys_setresgid16);
|
||||
cond_syscall(sys_setresuid16);
|
||||
cond_syscall(sys_setreuid16);
|
||||
cond_syscall(sys_setuid16);
|
||||
cond_syscall(sys_sgetmask);
|
||||
cond_syscall(sys_ssetmask);
|
||||
cond_syscall(sys_vm86old);
|
||||
cond_syscall(sys_vm86);
|
||||
cond_syscall(sys_ipc);
|
||||
|
@@ -786,8 +786,9 @@ static long hardpps_update_freq(struct pps_normtime freq_norm)
|
||||
time_status |= STA_PPSERROR;
|
||||
pps_errcnt++;
|
||||
pps_dec_freq_interval();
|
||||
pr_err("hardpps: PPSERROR: interval too long - %ld s\n",
|
||||
freq_norm.sec);
|
||||
printk_deferred(KERN_ERR
|
||||
"hardpps: PPSERROR: interval too long - %ld s\n",
|
||||
freq_norm.sec);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -800,7 +801,8 @@ static long hardpps_update_freq(struct pps_normtime freq_norm)
|
||||
delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
|
||||
pps_freq = ftemp;
|
||||
if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
|
||||
pr_warning("hardpps: PPSWANDER: change=%ld\n", delta);
|
||||
printk_deferred(KERN_WARNING
|
||||
"hardpps: PPSWANDER: change=%ld\n", delta);
|
||||
time_status |= STA_PPSWANDER;
|
||||
pps_stbcnt++;
|
||||
pps_dec_freq_interval();
|
||||
@@ -844,8 +846,9 @@ static void hardpps_update_phase(long error)
|
||||
* the time offset is updated.
|
||||
*/
|
||||
if (jitter > (pps_jitter << PPS_POPCORN)) {
|
||||
pr_warning("hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
|
||||
jitter, (pps_jitter << PPS_POPCORN));
|
||||
printk_deferred(KERN_WARNING
|
||||
"hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
|
||||
jitter, (pps_jitter << PPS_POPCORN));
|
||||
time_status |= STA_PPSJITTER;
|
||||
pps_jitcnt++;
|
||||
} else if (time_status & STA_PPSTIME) {
|
||||
@@ -902,7 +905,7 @@ void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
|
||||
time_status |= STA_PPSJITTER;
|
||||
/* restart the frequency calibration interval */
|
||||
pps_fbase = *raw_ts;
|
||||
pr_err("hardpps: PPSJITTER: bad pulse\n");
|
||||
printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -852,8 +852,9 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
|
||||
struct timespec *delta)
|
||||
{
|
||||
if (!timespec_valid_strict(delta)) {
|
||||
printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
|
||||
"sleep delta value!\n");
|
||||
printk_deferred(KERN_WARNING
|
||||
"__timekeeping_inject_sleeptime: Invalid "
|
||||
"sleep delta value!\n");
|
||||
return;
|
||||
}
|
||||
tk_xtime_add(tk, delta);
|
||||
@@ -1157,7 +1158,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
||||
|
||||
if (unlikely(tk->clock->maxadj &&
|
||||
(tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
|
||||
printk_once(KERN_WARNING
|
||||
printk_deferred_once(KERN_WARNING
|
||||
"Adjusting %s more than 11%% (%ld vs %ld)\n",
|
||||
tk->clock->name, (long)tk->mult + adj,
|
||||
(long)tk->clock->mult + tk->clock->maxadj);
|
||||
|
@@ -239,6 +239,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
|
||||
* tracepoint_probe_register - Connect a probe to a tracepoint
|
||||
* @tp: tracepoint
|
||||
* @probe: probe handler
|
||||
* @data: tracepoint data
|
||||
*
|
||||
* Returns 0 if ok, error value on error.
|
||||
* Note: if @tp is within a module, the caller is responsible for
|
||||
@@ -264,6 +265,7 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register);
|
||||
* tracepoint_probe_unregister - Disconnect a probe from a tracepoint
|
||||
* @tp: tracepoint
|
||||
* @probe: probe function pointer
|
||||
* @data: tracepoint data
|
||||
*
|
||||
* Returns 0 if ok, error value on error.
|
||||
*/
|
||||
|
@@ -87,7 +87,6 @@ static DEFINE_SPINLOCK(uidhash_lock);
|
||||
struct user_struct root_user = {
|
||||
.__count = ATOMIC_INIT(1),
|
||||
.processes = ATOMIC_INIT(1),
|
||||
.files = ATOMIC_INIT(0),
|
||||
.sigpending = ATOMIC_INIT(0),
|
||||
.locked_shm = 0,
|
||||
.uid = GLOBAL_ROOT_UID,
|
||||
|
@@ -51,7 +51,7 @@ static int proc_do_uts_string(ctl_table *table, int write,
|
||||
int r;
|
||||
memcpy(&uts_table, table, sizeof(uts_table));
|
||||
uts_table.data = get_uts(table, write);
|
||||
r = proc_dostring(&uts_table,write,buffer,lenp, ppos);
|
||||
r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
|
||||
put_uts(table, write, uts_table.data);
|
||||
|
||||
if (write)
|
||||
@@ -135,4 +135,4 @@ static int __init utsname_sysctl_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
__initcall(utsname_sysctl_init);
|
||||
device_initcall(utsname_sysctl_init);
|
||||
|
Reference in New Issue
Block a user