Merge branch 'linus' into tracing-v28-for-linus-v3
Conflicts: init/main.c kernel/module.c scripts/bootgraph.pl
This commit is contained in:
@@ -2735,6 +2735,8 @@ void cgroup_fork_callbacks(struct task_struct *child)
|
||||
* Called on every change to mm->owner. mm_init_owner() does not
|
||||
* invoke this routine, since it assigns the mm->owner the first time
|
||||
* and does not change it.
|
||||
*
|
||||
* The callbacks are invoked with mmap_sem held in read mode.
|
||||
*/
|
||||
void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
|
||||
{
|
||||
@@ -2750,7 +2752,7 @@ void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
|
||||
if (oldcgrp == newcgrp)
|
||||
continue;
|
||||
if (ss->mm_owner_changed)
|
||||
ss->mm_owner_changed(ss, oldcgrp, newcgrp);
|
||||
ss->mm_owner_changed(ss, oldcgrp, newcgrp, new);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -26,6 +26,64 @@
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
* Note that the native side is already converted to a timespec, because
|
||||
* that's what we want anyway.
|
||||
*/
|
||||
static int compat_get_timeval(struct timespec *o,
|
||||
struct compat_timeval __user *i)
|
||||
{
|
||||
long usec;
|
||||
|
||||
if (get_user(o->tv_sec, &i->tv_sec) ||
|
||||
get_user(usec, &i->tv_usec))
|
||||
return -EFAULT;
|
||||
o->tv_nsec = usec * 1000;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compat_put_timeval(struct compat_timeval __user *o,
|
||||
struct timeval *i)
|
||||
{
|
||||
return (put_user(i->tv_sec, &o->tv_sec) ||
|
||||
put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
|
||||
struct timezone __user *tz)
|
||||
{
|
||||
if (tv) {
|
||||
struct timeval ktv;
|
||||
do_gettimeofday(&ktv);
|
||||
if (compat_put_timeval(tv, &ktv))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (tz) {
|
||||
if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
|
||||
struct timezone __user *tz)
|
||||
{
|
||||
struct timespec kts;
|
||||
struct timezone ktz;
|
||||
|
||||
if (tv) {
|
||||
if (compat_get_timeval(&kts, tv))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (tz) {
|
||||
if (copy_from_user(&ktz, tz, sizeof(ktz)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
|
||||
}
|
||||
|
||||
int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
|
||||
{
|
||||
return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
/* $Id: dma.c,v 1.7 1994/12/28 03:35:33 root Exp root $
|
||||
/*
|
||||
* linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
|
||||
*
|
||||
* Written by Hennus Bergman, 1992.
|
||||
|
@@ -644,24 +644,23 @@ retry:
|
||||
assign_new_owner:
|
||||
BUG_ON(c == p);
|
||||
get_task_struct(c);
|
||||
read_unlock(&tasklist_lock);
|
||||
down_write(&mm->mmap_sem);
|
||||
/*
|
||||
* The task_lock protects c->mm from changing.
|
||||
* We always want mm->owner->mm == mm
|
||||
*/
|
||||
task_lock(c);
|
||||
/*
|
||||
* Delay read_unlock() till we have the task_lock()
|
||||
* to ensure that c does not slip away underneath us
|
||||
*/
|
||||
read_unlock(&tasklist_lock);
|
||||
if (c->mm != mm) {
|
||||
task_unlock(c);
|
||||
up_write(&mm->mmap_sem);
|
||||
put_task_struct(c);
|
||||
goto retry;
|
||||
}
|
||||
cgroup_mm_owner_callbacks(mm->owner, c);
|
||||
mm->owner = c;
|
||||
task_unlock(c);
|
||||
up_write(&mm->mmap_sem);
|
||||
put_task_struct(c);
|
||||
}
|
||||
#endif /* CONFIG_MM_OWNER */
|
||||
|
@@ -260,7 +260,6 @@ const char *kallsyms_lookup(unsigned long addr,
|
||||
/* see if it's in a module */
|
||||
return module_address_lookup(addr, symbolsize, offset, modname,
|
||||
namebuf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int lookup_symbol_name(unsigned long addr, char *symname)
|
||||
|
@@ -113,7 +113,7 @@ int request_module(const char *fmt, ...)
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(request_module);
|
||||
#endif /* CONFIG_KMOD */
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
struct subprocess_info {
|
||||
struct work_struct work;
|
||||
@@ -265,7 +265,7 @@ static void __call_usermodehelper(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/*
|
||||
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
|
||||
* (used for preventing user land processes from being created after the user
|
||||
@@ -288,39 +288,37 @@ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
|
||||
*/
|
||||
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
|
||||
|
||||
static int usermodehelper_pm_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *ignored)
|
||||
/**
|
||||
* usermodehelper_disable - prevent new helpers from being started
|
||||
*/
|
||||
int usermodehelper_disable(void)
|
||||
{
|
||||
long retval;
|
||||
|
||||
switch (action) {
|
||||
case PM_HIBERNATION_PREPARE:
|
||||
case PM_SUSPEND_PREPARE:
|
||||
usermodehelper_disabled = 1;
|
||||
smp_mb();
|
||||
/*
|
||||
* From now on call_usermodehelper_exec() won't start any new
|
||||
* helpers, so it is sufficient if running_helpers turns out to
|
||||
* be zero at one point (it may be increased later, but that
|
||||
* doesn't matter).
|
||||
*/
|
||||
retval = wait_event_timeout(running_helpers_waitq,
|
||||
usermodehelper_disabled = 1;
|
||||
smp_mb();
|
||||
/*
|
||||
* From now on call_usermodehelper_exec() won't start any new
|
||||
* helpers, so it is sufficient if running_helpers turns out to
|
||||
* be zero at one point (it may be increased later, but that
|
||||
* doesn't matter).
|
||||
*/
|
||||
retval = wait_event_timeout(running_helpers_waitq,
|
||||
atomic_read(&running_helpers) == 0,
|
||||
RUNNING_HELPERS_TIMEOUT);
|
||||
if (retval) {
|
||||
return NOTIFY_OK;
|
||||
} else {
|
||||
usermodehelper_disabled = 0;
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
case PM_POST_HIBERNATION:
|
||||
case PM_POST_SUSPEND:
|
||||
usermodehelper_disabled = 0;
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
if (retval)
|
||||
return 0;
|
||||
|
||||
return NOTIFY_DONE;
|
||||
usermodehelper_disabled = 0;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/**
|
||||
* usermodehelper_enable - allow new helpers to be started again
|
||||
*/
|
||||
void usermodehelper_enable(void)
|
||||
{
|
||||
usermodehelper_disabled = 0;
|
||||
}
|
||||
|
||||
static void helper_lock(void)
|
||||
@@ -334,18 +332,12 @@ static void helper_unlock(void)
|
||||
if (atomic_dec_and_test(&running_helpers))
|
||||
wake_up(&running_helpers_waitq);
|
||||
}
|
||||
|
||||
static void register_pm_notifier_callback(void)
|
||||
{
|
||||
pm_notifier(usermodehelper_pm_callback, 0);
|
||||
}
|
||||
#else /* CONFIG_PM */
|
||||
#else /* CONFIG_PM_SLEEP */
|
||||
#define usermodehelper_disabled 0
|
||||
|
||||
static inline void helper_lock(void) {}
|
||||
static inline void helper_unlock(void) {}
|
||||
static inline void register_pm_notifier_callback(void) {}
|
||||
#endif /* CONFIG_PM */
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
/**
|
||||
* call_usermodehelper_setup - prepare to call a usermode helper
|
||||
@@ -515,5 +507,4 @@ void __init usermodehelper_init(void)
|
||||
{
|
||||
khelper_wq = create_singlethread_workqueue("khelper");
|
||||
BUG_ON(!khelper_wq);
|
||||
register_pm_notifier_callback();
|
||||
}
|
||||
|
@@ -404,7 +404,7 @@ void kretprobe_hash_lock(struct task_struct *tsk,
|
||||
spin_lock_irqsave(hlist_lock, *flags);
|
||||
}
|
||||
|
||||
void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
|
||||
static void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
|
||||
{
|
||||
spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
|
||||
spin_lock_irqsave(hlist_lock, *flags);
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define KERNEL_ATTR_RO(_name) \
|
||||
@@ -53,6 +54,37 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
|
||||
KERNEL_ATTR_RW(uevent_helper);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROFILING
|
||||
static ssize_t profiling_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", prof_on);
|
||||
}
|
||||
static ssize_t profiling_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (prof_on)
|
||||
return -EEXIST;
|
||||
/*
|
||||
* This eventually calls into get_option() which
|
||||
* has a ton of callers and is not const. It is
|
||||
* easiest to cast it away here.
|
||||
*/
|
||||
profile_setup((char *)buf);
|
||||
ret = profile_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = create_proc_profile();
|
||||
if (ret)
|
||||
return ret;
|
||||
return count;
|
||||
}
|
||||
KERNEL_ATTR_RW(profiling);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
static ssize_t kexec_loaded_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
@@ -109,6 +141,9 @@ static struct attribute * kernel_attrs[] = {
|
||||
&uevent_seqnum_attr.attr,
|
||||
&uevent_helper_attr.attr,
|
||||
#endif
|
||||
#ifdef CONFIG_PROFILING
|
||||
&profiling_attr.attr,
|
||||
#endif
|
||||
#ifdef CONFIG_KEXEC
|
||||
&kexec_loaded_attr.attr,
|
||||
&kexec_crash_loaded_attr.attr,
|
||||
|
@@ -102,7 +102,7 @@ static inline int strong_try_module_get(struct module *mod)
|
||||
static inline void add_taint_module(struct module *mod, unsigned flag)
|
||||
{
|
||||
add_taint(flag);
|
||||
mod->taints |= flag;
|
||||
mod->taints |= (1U << flag);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -786,6 +786,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
|
||||
mutex_lock(&module_mutex);
|
||||
/* Store the name of the last unloaded module for diagnostic purposes */
|
||||
strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
|
||||
unregister_dynamic_debug_module(mod->name);
|
||||
free_module(mod);
|
||||
|
||||
out:
|
||||
@@ -925,7 +926,7 @@ static const char vermagic[] = VERMAGIC_STRING;
|
||||
static int try_to_force_load(struct module *mod, const char *symname)
|
||||
{
|
||||
#ifdef CONFIG_MODULE_FORCE_LOAD
|
||||
if (!(tainted & TAINT_FORCED_MODULE))
|
||||
if (!test_taint(TAINT_FORCED_MODULE))
|
||||
printk("%s: no version for \"%s\" found: kernel tainted.\n",
|
||||
mod->name, symname);
|
||||
add_taint_module(mod, TAINT_FORCED_MODULE);
|
||||
@@ -1035,7 +1036,7 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
|
||||
const unsigned long *crc;
|
||||
|
||||
ret = find_symbol(name, &owner, &crc,
|
||||
!(mod->taints & TAINT_PROPRIETARY_MODULE), true);
|
||||
!(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
|
||||
if (!IS_ERR_VALUE(ret)) {
|
||||
/* use_module can fail due to OOM,
|
||||
or module initialization or unloading */
|
||||
@@ -1175,7 +1176,7 @@ static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
|
||||
while (i-- > 0)
|
||||
sysfs_remove_bin_file(notes_attrs->dir,
|
||||
¬es_attrs->attrs[i]);
|
||||
kobject_del(notes_attrs->dir);
|
||||
kobject_put(notes_attrs->dir);
|
||||
}
|
||||
kfree(notes_attrs);
|
||||
}
|
||||
@@ -1639,7 +1640,7 @@ static void set_license(struct module *mod, const char *license)
|
||||
license = "unspecified";
|
||||
|
||||
if (!license_is_gpl_compatible(license)) {
|
||||
if (!(tainted & TAINT_PROPRIETARY_MODULE))
|
||||
if (!test_taint(TAINT_PROPRIETARY_MODULE))
|
||||
printk(KERN_WARNING "%s: module license '%s' taints "
|
||||
"kernel.\n", mod->name, license);
|
||||
add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
|
||||
@@ -1788,6 +1789,33 @@ static inline void add_kallsyms(struct module *mod,
|
||||
}
|
||||
#endif /* CONFIG_KALLSYMS */
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG
|
||||
static void dynamic_printk_setup(Elf_Shdr *sechdrs, unsigned int verboseindex)
|
||||
{
|
||||
struct mod_debug *debug_info;
|
||||
unsigned long pos, end;
|
||||
unsigned int num_verbose;
|
||||
|
||||
pos = sechdrs[verboseindex].sh_addr;
|
||||
num_verbose = sechdrs[verboseindex].sh_size /
|
||||
sizeof(struct mod_debug);
|
||||
end = pos + (num_verbose * sizeof(struct mod_debug));
|
||||
|
||||
for (; pos < end; pos += sizeof(struct mod_debug)) {
|
||||
debug_info = (struct mod_debug *)pos;
|
||||
register_dynamic_debug_module(debug_info->modname,
|
||||
debug_info->type, debug_info->logical_modname,
|
||||
debug_info->flag_names, debug_info->hash,
|
||||
debug_info->hash2);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void dynamic_printk_setup(Elf_Shdr *sechdrs,
|
||||
unsigned int verboseindex)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
|
||||
|
||||
static void *module_alloc_update_bounds(unsigned long size)
|
||||
{
|
||||
void *ret = module_alloc(size);
|
||||
@@ -1811,6 +1839,7 @@ static noinline struct module *load_module(void __user *umod,
|
||||
Elf_Ehdr *hdr;
|
||||
Elf_Shdr *sechdrs;
|
||||
char *secstrings, *args, *modmagic, *strtab = NULL;
|
||||
char *staging;
|
||||
unsigned int i;
|
||||
unsigned int symindex = 0;
|
||||
unsigned int strindex = 0;
|
||||
@@ -1836,6 +1865,7 @@ static noinline struct module *load_module(void __user *umod,
|
||||
#endif
|
||||
unsigned int markersindex;
|
||||
unsigned int markersstringsindex;
|
||||
unsigned int verboseindex;
|
||||
unsigned int tracepointsindex;
|
||||
unsigned int tracepointsstringsindex;
|
||||
unsigned int mcountindex;
|
||||
@@ -1969,6 +1999,14 @@ static noinline struct module *load_module(void __user *umod,
|
||||
goto free_hdr;
|
||||
}
|
||||
|
||||
staging = get_modinfo(sechdrs, infoindex, "staging");
|
||||
if (staging) {
|
||||
add_taint_module(mod, TAINT_CRAP);
|
||||
printk(KERN_WARNING "%s: module is from the staging directory,"
|
||||
" the quality is unknown, you have been warned.\n",
|
||||
mod->name);
|
||||
}
|
||||
|
||||
/* Now copy in args */
|
||||
args = strndup_user(uargs, ~0UL >> 1);
|
||||
if (IS_ERR(args)) {
|
||||
@@ -2126,6 +2164,7 @@ static noinline struct module *load_module(void __user *umod,
|
||||
markersindex = find_sec(hdr, sechdrs, secstrings, "__markers");
|
||||
markersstringsindex = find_sec(hdr, sechdrs, secstrings,
|
||||
"__markers_strings");
|
||||
verboseindex = find_sec(hdr, sechdrs, secstrings, "__verbose");
|
||||
tracepointsindex = find_sec(hdr, sechdrs, secstrings, "__tracepoints");
|
||||
tracepointsstringsindex = find_sec(hdr, sechdrs, secstrings,
|
||||
"__tracepoints_strings");
|
||||
@@ -2188,6 +2227,7 @@ static noinline struct module *load_module(void __user *umod,
|
||||
marker_update_probe_range(mod->markers,
|
||||
mod->markers + mod->num_markers);
|
||||
#endif
|
||||
dynamic_printk_setup(sechdrs, verboseindex);
|
||||
#ifdef CONFIG_TRACEPOINTS
|
||||
tracepoint_update_probe_range(mod->tracepoints,
|
||||
mod->tracepoints + mod->num_tracepoints);
|
||||
@@ -2584,10 +2624,12 @@ static char *module_flags(struct module *mod, char *buf)
|
||||
mod->state == MODULE_STATE_GOING ||
|
||||
mod->state == MODULE_STATE_COMING) {
|
||||
buf[bx++] = '(';
|
||||
if (mod->taints & TAINT_PROPRIETARY_MODULE)
|
||||
if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
|
||||
buf[bx++] = 'P';
|
||||
if (mod->taints & TAINT_FORCED_MODULE)
|
||||
if (mod->taints & (1 << TAINT_FORCED_MODULE))
|
||||
buf[bx++] = 'F';
|
||||
if (mod->taints & (1 << TAINT_CRAP))
|
||||
buf[bx++] = 'C';
|
||||
/*
|
||||
* TAINT_FORCED_RMMOD: could be added.
|
||||
* TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
|
||||
|
@@ -23,7 +23,7 @@
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
int panic_on_oops;
|
||||
int tainted;
|
||||
static unsigned long tainted_mask;
|
||||
static int pause_on_oops;
|
||||
static int pause_on_oops_flag;
|
||||
static DEFINE_SPINLOCK(pause_on_oops_lock);
|
||||
@@ -143,6 +143,27 @@ NORET_TYPE void panic(const char * fmt, ...)
|
||||
|
||||
EXPORT_SYMBOL(panic);
|
||||
|
||||
|
||||
struct tnt {
|
||||
u8 bit;
|
||||
char true;
|
||||
char false;
|
||||
};
|
||||
|
||||
static const struct tnt tnts[] = {
|
||||
{ TAINT_PROPRIETARY_MODULE, 'P', 'G' },
|
||||
{ TAINT_FORCED_MODULE, 'F', ' ' },
|
||||
{ TAINT_UNSAFE_SMP, 'S', ' ' },
|
||||
{ TAINT_FORCED_RMMOD, 'R', ' ' },
|
||||
{ TAINT_MACHINE_CHECK, 'M', ' ' },
|
||||
{ TAINT_BAD_PAGE, 'B', ' ' },
|
||||
{ TAINT_USER, 'U', ' ' },
|
||||
{ TAINT_DIE, 'D', ' ' },
|
||||
{ TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
|
||||
{ TAINT_WARN, 'W', ' ' },
|
||||
{ TAINT_CRAP, 'C', ' ' },
|
||||
};
|
||||
|
||||
/**
|
||||
* print_tainted - return a string to represent the kernel taint state.
|
||||
*
|
||||
@@ -155,35 +176,45 @@ EXPORT_SYMBOL(panic);
|
||||
* 'U' - Userspace-defined naughtiness.
|
||||
* 'A' - ACPI table overridden.
|
||||
* 'W' - Taint on warning.
|
||||
* 'C' - modules from drivers/staging are loaded.
|
||||
*
|
||||
* The string is overwritten by the next call to print_taint().
|
||||
*/
|
||||
|
||||
const char *print_tainted(void)
|
||||
{
|
||||
static char buf[20];
|
||||
if (tainted) {
|
||||
snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c%c",
|
||||
tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
|
||||
tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
|
||||
tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
|
||||
tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
|
||||
tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
|
||||
tainted & TAINT_BAD_PAGE ? 'B' : ' ',
|
||||
tainted & TAINT_USER ? 'U' : ' ',
|
||||
tainted & TAINT_DIE ? 'D' : ' ',
|
||||
tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ',
|
||||
tainted & TAINT_WARN ? 'W' : ' ');
|
||||
}
|
||||
else
|
||||
static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1];
|
||||
|
||||
if (tainted_mask) {
|
||||
char *s;
|
||||
int i;
|
||||
|
||||
s = buf + sprintf(buf, "Tainted: ");
|
||||
for (i = 0; i < ARRAY_SIZE(tnts); i++) {
|
||||
const struct tnt *t = &tnts[i];
|
||||
*s++ = test_bit(t->bit, &tainted_mask) ?
|
||||
t->true : t->false;
|
||||
}
|
||||
*s = 0;
|
||||
} else
|
||||
snprintf(buf, sizeof(buf), "Not tainted");
|
||||
return(buf);
|
||||
}
|
||||
|
||||
int test_taint(unsigned flag)
|
||||
{
|
||||
return test_bit(flag, &tainted_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(test_taint);
|
||||
|
||||
unsigned long get_taint(void)
|
||||
{
|
||||
return tainted_mask;
|
||||
}
|
||||
|
||||
void add_taint(unsigned flag)
|
||||
{
|
||||
debug_locks = 0; /* can't trust the integrity of the kernel anymore */
|
||||
tainted |= flag;
|
||||
set_bit(flag, &tainted_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(add_taint);
|
||||
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mount.h>
|
||||
@@ -520,6 +521,10 @@ int hibernate(void)
|
||||
if (error)
|
||||
goto Exit;
|
||||
|
||||
error = usermodehelper_disable();
|
||||
if (error)
|
||||
goto Exit;
|
||||
|
||||
/* Allocate memory management structures */
|
||||
error = create_basic_memory_bitmaps();
|
||||
if (error)
|
||||
@@ -558,6 +563,7 @@ int hibernate(void)
|
||||
thaw_processes();
|
||||
Finish:
|
||||
free_basic_memory_bitmaps();
|
||||
usermodehelper_enable();
|
||||
Exit:
|
||||
pm_notifier_call_chain(PM_POST_HIBERNATION);
|
||||
pm_restore_console();
|
||||
@@ -634,6 +640,10 @@ static int software_resume(void)
|
||||
if (error)
|
||||
goto Finish;
|
||||
|
||||
error = usermodehelper_disable();
|
||||
if (error)
|
||||
goto Finish;
|
||||
|
||||
error = create_basic_memory_bitmaps();
|
||||
if (error)
|
||||
goto Finish;
|
||||
@@ -656,6 +666,7 @@ static int software_resume(void)
|
||||
thaw_processes();
|
||||
Done:
|
||||
free_basic_memory_bitmaps();
|
||||
usermodehelper_enable();
|
||||
Finish:
|
||||
pm_notifier_call_chain(PM_POST_RESTORE);
|
||||
pm_restore_console();
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/cpu.h>
|
||||
@@ -237,6 +238,10 @@ static int suspend_prepare(void)
|
||||
if (error)
|
||||
goto Finish;
|
||||
|
||||
error = usermodehelper_disable();
|
||||
if (error)
|
||||
goto Finish;
|
||||
|
||||
if (suspend_freeze_processes()) {
|
||||
error = -EAGAIN;
|
||||
goto Thaw;
|
||||
@@ -256,6 +261,7 @@ static int suspend_prepare(void)
|
||||
|
||||
Thaw:
|
||||
suspend_thaw_processes();
|
||||
usermodehelper_enable();
|
||||
Finish:
|
||||
pm_notifier_call_chain(PM_POST_SUSPEND);
|
||||
pm_restore_console();
|
||||
@@ -376,6 +382,7 @@ int suspend_devices_and_enter(suspend_state_t state)
|
||||
static void suspend_finish(void)
|
||||
{
|
||||
suspend_thaw_processes();
|
||||
usermodehelper_enable();
|
||||
pm_notifier_call_chain(PM_POST_SUSPEND);
|
||||
pm_restore_console();
|
||||
}
|
||||
|
@@ -212,13 +212,20 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
case SNAPSHOT_FREEZE:
|
||||
if (data->frozen)
|
||||
break;
|
||||
|
||||
printk("Syncing filesystems ... ");
|
||||
sys_sync();
|
||||
printk("done.\n");
|
||||
|
||||
error = freeze_processes();
|
||||
error = usermodehelper_disable();
|
||||
if (error)
|
||||
break;
|
||||
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
thaw_processes();
|
||||
usermodehelper_enable();
|
||||
}
|
||||
if (!error)
|
||||
data->frozen = 1;
|
||||
break;
|
||||
@@ -227,6 +234,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
if (!data->frozen || data->ready)
|
||||
break;
|
||||
thaw_processes();
|
||||
usermodehelper_enable();
|
||||
data->frozen = 0;
|
||||
break;
|
||||
|
||||
|
@@ -13,7 +13,7 @@
|
||||
* Fixed SMP synchronization, 08/08/99, Manfred Spraul
|
||||
* manfred@colorfullife.com
|
||||
* Rewrote bits to get rid of console_lock
|
||||
* 01Mar01 Andrew Morton <andrewm@uow.edu.au>
|
||||
* 01Mar01 Andrew Morton
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
@@ -577,9 +577,6 @@ static int have_callable_console(void)
|
||||
* @fmt: format string
|
||||
*
|
||||
* This is printk(). It can be called from any context. We want it to work.
|
||||
* Be aware of the fact that if oops_in_progress is not set, we might try to
|
||||
* wake klogd up which could deadlock on runqueue lock if printk() is called
|
||||
* from scheduler code.
|
||||
*
|
||||
* We try to grab the console_sem. If we succeed, it's easy - we log the output and
|
||||
* call the console drivers. If we fail to get the semaphore we place the output
|
||||
@@ -593,6 +590,8 @@ static int have_callable_console(void)
|
||||
*
|
||||
* See also:
|
||||
* printf(3)
|
||||
*
|
||||
* See the vsnprintf() documentation for format string extensions over C99.
|
||||
*/
|
||||
|
||||
asmlinkage int printk(const char *fmt, ...)
|
||||
@@ -982,10 +981,25 @@ int is_console_locked(void)
|
||||
return console_locked;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(int, printk_pending);
|
||||
|
||||
void printk_tick(void)
|
||||
{
|
||||
if (__get_cpu_var(printk_pending)) {
|
||||
__get_cpu_var(printk_pending) = 0;
|
||||
wake_up_interruptible(&log_wait);
|
||||
}
|
||||
}
|
||||
|
||||
int printk_needs_cpu(int cpu)
|
||||
{
|
||||
return per_cpu(printk_pending, cpu);
|
||||
}
|
||||
|
||||
void wake_up_klogd(void)
|
||||
{
|
||||
if (!oops_in_progress && waitqueue_active(&log_wait))
|
||||
wake_up_interruptible(&log_wait);
|
||||
if (waitqueue_active(&log_wait))
|
||||
__raw_get_cpu_var(printk_pending) = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -22,6 +22,8 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/ptrace.h>
|
||||
@@ -50,11 +52,11 @@ static DEFINE_PER_CPU(int, cpu_profile_flip);
|
||||
static DEFINE_MUTEX(profile_flip_mutex);
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static int __init profile_setup(char *str)
|
||||
int profile_setup(char *str)
|
||||
{
|
||||
static char __initdata schedstr[] = "schedule";
|
||||
static char __initdata sleepstr[] = "sleep";
|
||||
static char __initdata kvmstr[] = "kvm";
|
||||
static char schedstr[] = "schedule";
|
||||
static char sleepstr[] = "sleep";
|
||||
static char kvmstr[] = "kvm";
|
||||
int par;
|
||||
|
||||
if (!strncmp(str, sleepstr, strlen(sleepstr))) {
|
||||
@@ -100,14 +102,33 @@ static int __init profile_setup(char *str)
|
||||
__setup("profile=", profile_setup);
|
||||
|
||||
|
||||
void __init profile_init(void)
|
||||
int profile_init(void)
|
||||
{
|
||||
int buffer_bytes;
|
||||
if (!prof_on)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* only text is profiled */
|
||||
prof_len = (_etext - _stext) >> prof_shift;
|
||||
prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t));
|
||||
buffer_bytes = prof_len*sizeof(atomic_t);
|
||||
if (!slab_is_available()) {
|
||||
prof_buffer = alloc_bootmem(buffer_bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
|
||||
if (prof_buffer)
|
||||
return 0;
|
||||
|
||||
prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO);
|
||||
if (prof_buffer)
|
||||
return 0;
|
||||
|
||||
prof_buffer = vmalloc(buffer_bytes);
|
||||
if (prof_buffer)
|
||||
return 0;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Profile event notifications */
|
||||
@@ -527,7 +548,7 @@ static void __init profile_nop(void *unused)
|
||||
{
|
||||
}
|
||||
|
||||
static int __init create_hash_tables(void)
|
||||
static int create_hash_tables(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@@ -575,14 +596,14 @@ out_cleanup:
|
||||
#define create_hash_tables() ({ 0; })
|
||||
#endif
|
||||
|
||||
static int __init create_proc_profile(void)
|
||||
int create_proc_profile(void)
|
||||
{
|
||||
struct proc_dir_entry *entry;
|
||||
|
||||
if (!prof_on)
|
||||
return 0;
|
||||
if (create_hash_tables())
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
entry = proc_create("profile", S_IWUSR | S_IRUGO,
|
||||
NULL, &proc_profile_operations);
|
||||
if (!entry)
|
||||
|
@@ -38,10 +38,6 @@ EXPORT_SYMBOL(iomem_resource);
|
||||
|
||||
static DEFINE_RWLOCK(resource_lock);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
enum { MAX_IORES_LEVEL = 5 };
|
||||
|
||||
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
struct resource *p = v;
|
||||
@@ -53,6 +49,10 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
return p->sibling;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
enum { MAX_IORES_LEVEL = 5 };
|
||||
|
||||
static void *r_start(struct seq_file *m, loff_t *pos)
|
||||
__acquires(resource_lock)
|
||||
{
|
||||
@@ -549,13 +549,9 @@ static void __init __reserve_region_with_split(struct resource *root,
|
||||
}
|
||||
|
||||
if (!res) {
|
||||
printk(KERN_DEBUG " __reserve_region_with_split: (%s) [%llx, %llx], res: (%s) [%llx, %llx]\n",
|
||||
conflict->name, conflict->start, conflict->end,
|
||||
name, start, end);
|
||||
|
||||
/* failed, split and try again */
|
||||
|
||||
/* conflict coverred whole area */
|
||||
/* conflict covered whole area */
|
||||
if (conflict->start <= start && conflict->end >= end)
|
||||
return;
|
||||
|
||||
@@ -630,33 +626,34 @@ struct resource * __request_region(struct resource *parent,
|
||||
{
|
||||
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
|
||||
if (res) {
|
||||
res->name = name;
|
||||
res->start = start;
|
||||
res->end = start + n - 1;
|
||||
res->flags = IORESOURCE_BUSY;
|
||||
if (!res)
|
||||
return NULL;
|
||||
|
||||
write_lock(&resource_lock);
|
||||
res->name = name;
|
||||
res->start = start;
|
||||
res->end = start + n - 1;
|
||||
res->flags = IORESOURCE_BUSY;
|
||||
|
||||
for (;;) {
|
||||
struct resource *conflict;
|
||||
write_lock(&resource_lock);
|
||||
|
||||
conflict = __request_resource(parent, res);
|
||||
if (!conflict)
|
||||
break;
|
||||
if (conflict != parent) {
|
||||
parent = conflict;
|
||||
if (!(conflict->flags & IORESOURCE_BUSY))
|
||||
continue;
|
||||
}
|
||||
for (;;) {
|
||||
struct resource *conflict;
|
||||
|
||||
/* Uhhuh, that didn't work out.. */
|
||||
kfree(res);
|
||||
res = NULL;
|
||||
conflict = __request_resource(parent, res);
|
||||
if (!conflict)
|
||||
break;
|
||||
if (conflict != parent) {
|
||||
parent = conflict;
|
||||
if (!(conflict->flags & IORESOURCE_BUSY))
|
||||
continue;
|
||||
}
|
||||
write_unlock(&resource_lock);
|
||||
|
||||
/* Uhhuh, that didn't work out.. */
|
||||
kfree(res);
|
||||
res = NULL;
|
||||
break;
|
||||
}
|
||||
write_unlock(&resource_lock);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(__request_region);
|
||||
@@ -831,3 +828,40 @@ static int __init reserve_setup(char *str)
|
||||
}
|
||||
|
||||
__setup("reserve=", reserve_setup);
|
||||
|
||||
/*
|
||||
* Check if the requested addr and size spans more than any slot in the
|
||||
* iomem resource tree.
|
||||
*/
|
||||
int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
|
||||
{
|
||||
struct resource *p = &iomem_resource;
|
||||
int err = 0;
|
||||
loff_t l;
|
||||
|
||||
read_lock(&resource_lock);
|
||||
for (p = p->child; p ; p = r_next(NULL, p, &l)) {
|
||||
/*
|
||||
* We can probably skip the resources without
|
||||
* IORESOURCE_IO attribute?
|
||||
*/
|
||||
if (p->start >= addr + size)
|
||||
continue;
|
||||
if (p->end < addr)
|
||||
continue;
|
||||
if (p->start <= addr && (p->end >= addr + size - 1))
|
||||
continue;
|
||||
printk(KERN_WARNING "resource map sanity check conflict: "
|
||||
"0x%llx 0x%llx 0x%llx 0x%llx %s\n",
|
||||
(unsigned long long)addr,
|
||||
(unsigned long long)(addr + size - 1),
|
||||
(unsigned long long)p->start,
|
||||
(unsigned long long)p->end,
|
||||
p->name);
|
||||
err = -1;
|
||||
break;
|
||||
}
|
||||
read_unlock(&resource_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@@ -118,13 +118,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
||||
|
||||
/*
|
||||
* scd->clock = clamp(scd->tick_gtod + delta,
|
||||
* max(scd->tick_gtod, scd->clock),
|
||||
* scd->tick_gtod + TICK_NSEC);
|
||||
* max(scd->tick_gtod, scd->clock),
|
||||
* max(scd->clock, scd->tick_gtod + TICK_NSEC));
|
||||
*/
|
||||
|
||||
clock = scd->tick_gtod + delta;
|
||||
min_clock = wrap_max(scd->tick_gtod, scd->clock);
|
||||
max_clock = scd->tick_gtod + TICK_NSEC;
|
||||
max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
|
||||
|
||||
clock = wrap_max(clock, min_clock);
|
||||
clock = wrap_min(clock, max_clock);
|
||||
|
@@ -333,12 +333,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
||||
unsigned long flags;
|
||||
int num_threads = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
if (lock_task_sighand(p, &flags)) {
|
||||
num_threads = atomic_read(&p->signal->count);
|
||||
unlock_task_sighand(p, &flags);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
|
||||
SEQ_printf(m,
|
||||
|
142
kernel/softirq.c
142
kernel/softirq.c
@@ -6,6 +6,8 @@
|
||||
* Distribute under GPLv2.
|
||||
*
|
||||
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
|
||||
*
|
||||
* Remote softirq infrastructure is by Jens Axboe.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
@@ -46,7 +48,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
|
||||
EXPORT_SYMBOL(irq_stat);
|
||||
#endif
|
||||
|
||||
static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
|
||||
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
|
||||
|
||||
static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||
|
||||
@@ -205,7 +207,18 @@ restart:
|
||||
|
||||
do {
|
||||
if (pending & 1) {
|
||||
int prev_count = preempt_count();
|
||||
|
||||
h->action(h);
|
||||
|
||||
if (unlikely(prev_count != preempt_count())) {
|
||||
printk(KERN_ERR "huh, entered softirq %td %p"
|
||||
"with preempt_count %08x,"
|
||||
" exited with %08x?\n", h - softirq_vec,
|
||||
h->action, prev_count, preempt_count());
|
||||
preempt_count() = prev_count;
|
||||
}
|
||||
|
||||
rcu_bh_qsctr_inc(cpu);
|
||||
}
|
||||
h++;
|
||||
@@ -463,17 +476,144 @@ void tasklet_kill(struct tasklet_struct *t)
|
||||
|
||||
EXPORT_SYMBOL(tasklet_kill);
|
||||
|
||||
DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
|
||||
EXPORT_PER_CPU_SYMBOL(softirq_work_list);
|
||||
|
||||
static void __local_trigger(struct call_single_data *cp, int softirq)
|
||||
{
|
||||
struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
|
||||
|
||||
list_add_tail(&cp->list, head);
|
||||
|
||||
/* Trigger the softirq only if the list was previously empty. */
|
||||
if (head->next == &cp->list)
|
||||
raise_softirq_irqoff(softirq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
|
||||
static void remote_softirq_receive(void *data)
|
||||
{
|
||||
struct call_single_data *cp = data;
|
||||
unsigned long flags;
|
||||
int softirq;
|
||||
|
||||
softirq = cp->priv;
|
||||
|
||||
local_irq_save(flags);
|
||||
__local_trigger(cp, softirq);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
|
||||
{
|
||||
if (cpu_online(cpu)) {
|
||||
cp->func = remote_softirq_receive;
|
||||
cp->info = cp;
|
||||
cp->flags = 0;
|
||||
cp->priv = softirq;
|
||||
|
||||
__smp_call_function_single(cpu, cp);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
|
||||
static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __send_remote_softirq - try to schedule softirq work on a remote cpu
|
||||
* @cp: private SMP call function data area
|
||||
* @cpu: the remote cpu
|
||||
* @this_cpu: the currently executing cpu
|
||||
* @softirq: the softirq for the work
|
||||
*
|
||||
* Attempt to schedule softirq work on a remote cpu. If this cannot be
|
||||
* done, the work is instead queued up on the local cpu.
|
||||
*
|
||||
* Interrupts must be disabled.
|
||||
*/
|
||||
void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
|
||||
{
|
||||
if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
|
||||
__local_trigger(cp, softirq);
|
||||
}
|
||||
EXPORT_SYMBOL(__send_remote_softirq);
|
||||
|
||||
/**
|
||||
* send_remote_softirq - try to schedule softirq work on a remote cpu
|
||||
* @cp: private SMP call function data area
|
||||
* @cpu: the remote cpu
|
||||
* @softirq: the softirq for the work
|
||||
*
|
||||
* Like __send_remote_softirq except that disabling interrupts and
|
||||
* computing the current cpu is done for the caller.
|
||||
*/
|
||||
void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
|
||||
{
|
||||
unsigned long flags;
|
||||
int this_cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
this_cpu = smp_processor_id();
|
||||
__send_remote_softirq(cp, cpu, this_cpu, softirq);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(send_remote_softirq);
|
||||
|
||||
static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
/*
|
||||
* If a CPU goes away, splice its entries to the current CPU
|
||||
* and trigger a run of the softirq
|
||||
*/
|
||||
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
|
||||
int cpu = (unsigned long) hcpu;
|
||||
int i;
|
||||
|
||||
local_irq_disable();
|
||||
for (i = 0; i < NR_SOFTIRQS; i++) {
|
||||
struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
|
||||
struct list_head *local_head;
|
||||
|
||||
if (list_empty(head))
|
||||
continue;
|
||||
|
||||
local_head = &__get_cpu_var(softirq_work_list[i]);
|
||||
list_splice_init(head, local_head);
|
||||
raise_softirq_irqoff(i);
|
||||
}
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
|
||||
.notifier_call = remote_softirq_cpu_notify,
|
||||
};
|
||||
|
||||
void __init softirq_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
int i;
|
||||
|
||||
per_cpu(tasklet_vec, cpu).tail =
|
||||
&per_cpu(tasklet_vec, cpu).head;
|
||||
per_cpu(tasklet_hi_vec, cpu).tail =
|
||||
&per_cpu(tasklet_hi_vec, cpu).head;
|
||||
for (i = 0; i < NR_SOFTIRQS; i++)
|
||||
INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
|
||||
}
|
||||
|
||||
register_hotcpu_notifier(&remote_softirq_cpu_notifier);
|
||||
|
||||
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
|
||||
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
||||
}
|
||||
|
@@ -226,7 +226,7 @@ static void check_hung_uninterruptible_tasks(int this_cpu)
|
||||
* If the system crashed already then all bets are off,
|
||||
* do not report extra hung tasks:
|
||||
*/
|
||||
if ((tainted & TAINT_DIE) || did_panic)
|
||||
if (test_taint(TAINT_DIE) || did_panic)
|
||||
return;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
|
34
kernel/sys.c
34
kernel/sys.c
@@ -1349,8 +1349,10 @@ asmlinkage long sys_sethostname(char __user *name, int len)
|
||||
down_write(&uts_sem);
|
||||
errno = -EFAULT;
|
||||
if (!copy_from_user(tmp, name, len)) {
|
||||
memcpy(utsname()->nodename, tmp, len);
|
||||
utsname()->nodename[len] = 0;
|
||||
struct new_utsname *u = utsname();
|
||||
|
||||
memcpy(u->nodename, tmp, len);
|
||||
memset(u->nodename + len, 0, sizeof(u->nodename) - len);
|
||||
errno = 0;
|
||||
}
|
||||
up_write(&uts_sem);
|
||||
@@ -1362,15 +1364,17 @@ asmlinkage long sys_sethostname(char __user *name, int len)
|
||||
asmlinkage long sys_gethostname(char __user *name, int len)
|
||||
{
|
||||
int i, errno;
|
||||
struct new_utsname *u;
|
||||
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
down_read(&uts_sem);
|
||||
i = 1 + strlen(utsname()->nodename);
|
||||
u = utsname();
|
||||
i = 1 + strlen(u->nodename);
|
||||
if (i > len)
|
||||
i = len;
|
||||
errno = 0;
|
||||
if (copy_to_user(name, utsname()->nodename, i))
|
||||
if (copy_to_user(name, u->nodename, i))
|
||||
errno = -EFAULT;
|
||||
up_read(&uts_sem);
|
||||
return errno;
|
||||
@@ -1395,8 +1399,10 @@ asmlinkage long sys_setdomainname(char __user *name, int len)
|
||||
down_write(&uts_sem);
|
||||
errno = -EFAULT;
|
||||
if (!copy_from_user(tmp, name, len)) {
|
||||
memcpy(utsname()->domainname, tmp, len);
|
||||
utsname()->domainname[len] = 0;
|
||||
struct new_utsname *u = utsname();
|
||||
|
||||
memcpy(u->domainname, tmp, len);
|
||||
memset(u->domainname + len, 0, sizeof(u->domainname) - len);
|
||||
errno = 0;
|
||||
}
|
||||
up_write(&uts_sem);
|
||||
@@ -1450,14 +1456,22 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
|
||||
return -EFAULT;
|
||||
if (new_rlim.rlim_cur > new_rlim.rlim_max)
|
||||
return -EINVAL;
|
||||
old_rlim = current->signal->rlim + resource;
|
||||
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
|
||||
!capable(CAP_SYS_RESOURCE))
|
||||
return -EPERM;
|
||||
if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
|
||||
return -EPERM;
|
||||
|
||||
if (resource == RLIMIT_NOFILE) {
|
||||
if (new_rlim.rlim_max == RLIM_INFINITY)
|
||||
new_rlim.rlim_max = sysctl_nr_open;
|
||||
if (new_rlim.rlim_cur == RLIM_INFINITY)
|
||||
new_rlim.rlim_cur = sysctl_nr_open;
|
||||
if (new_rlim.rlim_max > sysctl_nr_open)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (new_rlim.rlim_cur > new_rlim.rlim_max)
|
||||
return -EINVAL;
|
||||
|
||||
retval = security_task_setrlimit(resource, &new_rlim);
|
||||
if (retval)
|
||||
|
@@ -125,6 +125,12 @@ cond_syscall(sys_vm86old);
|
||||
cond_syscall(sys_vm86);
|
||||
cond_syscall(compat_sys_ipc);
|
||||
cond_syscall(compat_sys_sysctl);
|
||||
cond_syscall(sys_flock);
|
||||
cond_syscall(sys_io_setup);
|
||||
cond_syscall(sys_io_destroy);
|
||||
cond_syscall(sys_io_submit);
|
||||
cond_syscall(sys_io_cancel);
|
||||
cond_syscall(sys_io_getevents);
|
||||
|
||||
/* arch-specific weak syscall entries */
|
||||
cond_syscall(sys_pciconfig_read);
|
||||
|
115
kernel/sysctl.c
115
kernel/sysctl.c
@@ -80,7 +80,6 @@ extern int pid_max_min, pid_max_max;
|
||||
extern int sysctl_drop_caches;
|
||||
extern int percpu_pagelist_fraction;
|
||||
extern int compat_log;
|
||||
extern int maps_protect;
|
||||
extern int latencytop_enabled;
|
||||
extern int sysctl_nr_open_min, sysctl_nr_open_max;
|
||||
#ifdef CONFIG_RCU_TORTURE_TEST
|
||||
@@ -97,7 +96,7 @@ static int sixty = 60;
|
||||
static int neg_one = -1;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING)
|
||||
static int two = 2;
|
||||
#endif
|
||||
|
||||
@@ -150,7 +149,7 @@ extern int max_lock_depth;
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
|
||||
static int proc_taint(struct ctl_table *table, int write, struct file *filp,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
#endif
|
||||
|
||||
@@ -380,10 +379,9 @@ static struct ctl_table kern_table[] = {
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
{
|
||||
.procname = "tainted",
|
||||
.data = &tainted,
|
||||
.maxlen = sizeof(int),
|
||||
.maxlen = sizeof(long),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_taint,
|
||||
.proc_handler = &proc_taint,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_LATENCYTOP
|
||||
@@ -807,16 +805,6 @@ static struct ctl_table kern_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "maps_protect",
|
||||
.data = &maps_protect,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
@@ -1259,6 +1247,7 @@ static struct ctl_table fs_table[] = {
|
||||
.extra1 = &minolduid,
|
||||
.extra2 = &maxolduid,
|
||||
},
|
||||
#ifdef CONFIG_FILE_LOCKING
|
||||
{
|
||||
.ctl_name = FS_LEASES,
|
||||
.procname = "leases-enable",
|
||||
@@ -1267,6 +1256,7 @@ static struct ctl_table fs_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_DNOTIFY
|
||||
{
|
||||
.ctl_name = FS_DIR_NOTIFY,
|
||||
@@ -1278,6 +1268,7 @@ static struct ctl_table fs_table[] = {
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_FILE_LOCKING
|
||||
{
|
||||
.ctl_name = FS_LEASE_TIME,
|
||||
.procname = "lease-break-time",
|
||||
@@ -1289,6 +1280,8 @@ static struct ctl_table fs_table[] = {
|
||||
.extra1 = &zero,
|
||||
.extra2 = &two,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_AIO
|
||||
{
|
||||
.procname = "aio-nr",
|
||||
.data = &aio_nr,
|
||||
@@ -1303,6 +1296,7 @@ static struct ctl_table fs_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_doulongvec_minmax,
|
||||
},
|
||||
#endif /* CONFIG_AIO */
|
||||
#ifdef CONFIG_INOTIFY_USER
|
||||
{
|
||||
.ctl_name = FS_INOTIFY,
|
||||
@@ -1508,7 +1502,6 @@ void register_sysctl_root(struct ctl_table_root *root)
|
||||
/* Perform the actual read/write of a sysctl table entry. */
|
||||
static int do_sysctl_strategy(struct ctl_table_root *root,
|
||||
struct ctl_table *table,
|
||||
int __user *name, int nlen,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
@@ -1522,8 +1515,7 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
|
||||
return -EPERM;
|
||||
|
||||
if (table->strategy) {
|
||||
rc = table->strategy(table, name, nlen, oldval, oldlenp,
|
||||
newval, newlen);
|
||||
rc = table->strategy(table, oldval, oldlenp, newval, newlen);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
if (rc > 0)
|
||||
@@ -1533,8 +1525,7 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
|
||||
/* If there is no strategy routine, or if the strategy returns
|
||||
* zero, proceed with automatic r/w */
|
||||
if (table->data && table->maxlen) {
|
||||
rc = sysctl_data(table, name, nlen, oldval, oldlenp,
|
||||
newval, newlen);
|
||||
rc = sysctl_data(table, oldval, oldlenp, newval, newlen);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
}
|
||||
@@ -1566,7 +1557,7 @@ repeat:
|
||||
table = table->child;
|
||||
goto repeat;
|
||||
}
|
||||
error = do_sysctl_strategy(root, table, name, nlen,
|
||||
error = do_sysctl_strategy(root, table,
|
||||
oldval, oldlenp,
|
||||
newval, newlen);
|
||||
return error;
|
||||
@@ -2235,49 +2226,39 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
|
||||
NULL,NULL);
|
||||
}
|
||||
|
||||
#define OP_SET 0
|
||||
#define OP_AND 1
|
||||
#define OP_OR 2
|
||||
|
||||
static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
|
||||
int *valp,
|
||||
int write, void *data)
|
||||
{
|
||||
int op = *(int *)data;
|
||||
if (write) {
|
||||
int val = *negp ? -*lvalp : *lvalp;
|
||||
switch(op) {
|
||||
case OP_SET: *valp = val; break;
|
||||
case OP_AND: *valp &= val; break;
|
||||
case OP_OR: *valp |= val; break;
|
||||
}
|
||||
} else {
|
||||
int val = *valp;
|
||||
if (val < 0) {
|
||||
*negp = -1;
|
||||
*lvalp = (unsigned long)-val;
|
||||
} else {
|
||||
*negp = 0;
|
||||
*lvalp = (unsigned long)val;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Taint values can only be increased
|
||||
* Taint values can only be increased
|
||||
* This means we can safely use a temporary.
|
||||
*/
|
||||
static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
|
||||
static int proc_taint(struct ctl_table *table, int write, struct file *filp,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int op;
|
||||
struct ctl_table t;
|
||||
unsigned long tmptaint = get_taint();
|
||||
int err;
|
||||
|
||||
if (write && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
op = OP_OR;
|
||||
return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
|
||||
do_proc_dointvec_bset_conv,&op);
|
||||
t = *table;
|
||||
t.data = &tmptaint;
|
||||
err = proc_doulongvec_minmax(&t, write, filp, buffer, lenp, ppos);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (write) {
|
||||
/*
|
||||
* Poor man's atomic or. Not worth adding a primitive
|
||||
* to everyone's atomic.h for this
|
||||
*/
|
||||
int i;
|
||||
for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) {
|
||||
if ((tmptaint >> i) & 1)
|
||||
add_taint(i);
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
struct do_proc_dointvec_minmax_conv_param {
|
||||
@@ -2725,7 +2706,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
|
||||
*/
|
||||
|
||||
/* The generic sysctl data routine (used if no strategy routine supplied) */
|
||||
int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_data(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
@@ -2759,7 +2740,7 @@ int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
|
||||
}
|
||||
|
||||
/* The generic string strategy routine: */
|
||||
int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_string(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
@@ -2805,7 +2786,7 @@ int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
|
||||
* are between the minimum and maximum values given in the arrays
|
||||
* table->extra1 and table->extra2, respectively.
|
||||
*/
|
||||
int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_intvec(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
@@ -2841,7 +2822,7 @@ int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
|
||||
}
|
||||
|
||||
/* Strategy function to convert jiffies to seconds */
|
||||
int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_jiffies(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
@@ -2875,7 +2856,7 @@ int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
|
||||
}
|
||||
|
||||
/* Strategy function to convert jiffies to seconds */
|
||||
int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_ms_jiffies(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
@@ -2930,35 +2911,35 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
|
||||
return error;
|
||||
}
|
||||
|
||||
int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_data(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_string(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_intvec(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_jiffies(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
|
||||
int sysctl_ms_jiffies(struct ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
|
@@ -3,7 +3,6 @@
|
||||
#
|
||||
config TICK_ONESHOT
|
||||
bool
|
||||
default n
|
||||
|
||||
config NO_HZ
|
||||
bool "Tickless System (Dynamic Ticks)"
|
||||
|
@@ -270,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
next_jiffies = get_next_timer_interrupt(last_jiffies);
|
||||
delta_jiffies = next_jiffies - last_jiffies;
|
||||
|
||||
if (rcu_needs_cpu(cpu))
|
||||
if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
|
||||
delta_jiffies = 1;
|
||||
/*
|
||||
* Do not stop the tick, if we are only one off
|
||||
|
@@ -978,6 +978,7 @@ void update_process_times(int user_tick)
|
||||
run_local_timers();
|
||||
if (rcu_pending(cpu))
|
||||
rcu_check_callbacks(cpu, user_tick);
|
||||
printk_tick();
|
||||
scheduler_tick();
|
||||
run_posix_cpu_timers(p);
|
||||
}
|
||||
|
@@ -60,7 +60,7 @@ static int proc_do_uts_string(ctl_table *table, int write, struct file *filp,
|
||||
|
||||
#ifdef CONFIG_SYSCTL_SYSCALL
|
||||
/* The generic string strategy routine: */
|
||||
static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
|
||||
static int sysctl_uts_string(ctl_table *table,
|
||||
void __user *oldval, size_t __user *oldlenp,
|
||||
void __user *newval, size_t newlen)
|
||||
{
|
||||
@@ -69,8 +69,7 @@ static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
|
||||
write = newval && newlen;
|
||||
memcpy(&uts_table, table, sizeof(uts_table));
|
||||
uts_table.data = get_uts(table, write);
|
||||
r = sysctl_string(&uts_table, name, nlen,
|
||||
oldval, oldlenp, newval, newlen);
|
||||
r = sysctl_string(&uts_table, oldval, oldlenp, newval, newlen);
|
||||
put_uts(table, write, uts_table.data);
|
||||
return r;
|
||||
}
|
||||
|
@@ -72,12 +72,7 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (list_empty(&wait->task_list))
|
||||
__add_wait_queue(q, wait);
|
||||
/*
|
||||
* don't alter the task state if this is just going to
|
||||
* queue an async wait queue callback
|
||||
*/
|
||||
if (is_sync_wait(wait))
|
||||
set_current_state(state);
|
||||
set_current_state(state);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(prepare_to_wait);
|
||||
@@ -91,12 +86,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (list_empty(&wait->task_list))
|
||||
__add_wait_queue_tail(q, wait);
|
||||
/*
|
||||
* don't alter the task state if this is just going to
|
||||
* queue an async wait queue callback
|
||||
*/
|
||||
if (is_sync_wait(wait))
|
||||
set_current_state(state);
|
||||
set_current_state(state);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(prepare_to_wait_exclusive);
|
||||
|
@@ -9,7 +9,7 @@
|
||||
* Derived from the taskqueue/keventd code by:
|
||||
*
|
||||
* David Woodhouse <dwmw2@infradead.org>
|
||||
* Andrew Morton <andrewm@uow.edu.au>
|
||||
* Andrew Morton
|
||||
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
|
||||
* Theodore Ts'o <tytso@mit.edu>
|
||||
*
|
||||
|
Reference in New Issue
Block a user