Merge branch 'akpm' (incoming from Andrew)
Merge third batch of fixes from Andrew Morton: "Most of the rest. I still have two large patchsets against AIO and IPC, but they're a bit stuck behind other trees and I'm about to vanish for six days. - random fixlets - inotify - more of the MM queue - show_stack() cleanups - DMI update - kthread/workqueue things - compat cleanups - epoll udpates - binfmt updates - nilfs2 - hfs - hfsplus - ptrace - kmod - coredump - kexec - rbtree - pids - pidns - pps - semaphore tweaks - some w1 patches - relay updates - core Kconfig changes - sysrq tweaks" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits) Documentation/sysrq: fix inconstistent help message of sysrq key ethernet/emac/sysrq: fix inconstistent help message of sysrq key sparc/sysrq: fix inconstistent help message of sysrq key powerpc/xmon/sysrq: fix inconstistent help message of sysrq key ARM/etm/sysrq: fix inconstistent help message of sysrq key power/sysrq: fix inconstistent help message of sysrq key kgdb/sysrq: fix inconstistent help message of sysrq key lib/decompress.c: fix initconst notifier-error-inject: fix module names in Kconfig kernel/sys.c: make prctl(PR_SET_MM) generally available UAPI: remove empty Kbuild files menuconfig: print more info for symbol without prompts init/Kconfig: re-order CONFIG_EXPERT options to fix menuconfig display kconfig menu: move Virtualization drivers near other virtualization options Kconfig: consolidate CONFIG_DEBUG_STRICT_USER_COPY_CHECKS relay: use macro PAGE_ALIGN instead of FIX_SIZE kernel/relay.c: move FIX_SIZE macro into relay.c kernel/relay.c: remove unused function argument actor drivers/w1/slaves/w1_ds2760.c: fix the error handling in w1_ds2760_add_slave() drivers/w1/slaves/w1_ds2781.c: fix the error handling in w1_ds2781_add_slave() ...
This commit is contained in:
@@ -1138,71 +1138,6 @@ asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
|
||||
}
|
||||
#endif
|
||||
|
||||
struct compat_sysinfo {
|
||||
s32 uptime;
|
||||
u32 loads[3];
|
||||
u32 totalram;
|
||||
u32 freeram;
|
||||
u32 sharedram;
|
||||
u32 bufferram;
|
||||
u32 totalswap;
|
||||
u32 freeswap;
|
||||
u16 procs;
|
||||
u16 pad;
|
||||
u32 totalhigh;
|
||||
u32 freehigh;
|
||||
u32 mem_unit;
|
||||
char _f[20-2*sizeof(u32)-sizeof(int)];
|
||||
};
|
||||
|
||||
asmlinkage long
|
||||
compat_sys_sysinfo(struct compat_sysinfo __user *info)
|
||||
{
|
||||
struct sysinfo s;
|
||||
|
||||
do_sysinfo(&s);
|
||||
|
||||
/* Check to see if any memory value is too large for 32-bit and scale
|
||||
* down if needed
|
||||
*/
|
||||
if ((s.totalram >> 32) || (s.totalswap >> 32)) {
|
||||
int bitcount = 0;
|
||||
|
||||
while (s.mem_unit < PAGE_SIZE) {
|
||||
s.mem_unit <<= 1;
|
||||
bitcount++;
|
||||
}
|
||||
|
||||
s.totalram >>= bitcount;
|
||||
s.freeram >>= bitcount;
|
||||
s.sharedram >>= bitcount;
|
||||
s.bufferram >>= bitcount;
|
||||
s.totalswap >>= bitcount;
|
||||
s.freeswap >>= bitcount;
|
||||
s.totalhigh >>= bitcount;
|
||||
s.freehigh >>= bitcount;
|
||||
}
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
|
||||
__put_user (s.uptime, &info->uptime) ||
|
||||
__put_user (s.loads[0], &info->loads[0]) ||
|
||||
__put_user (s.loads[1], &info->loads[1]) ||
|
||||
__put_user (s.loads[2], &info->loads[2]) ||
|
||||
__put_user (s.totalram, &info->totalram) ||
|
||||
__put_user (s.freeram, &info->freeram) ||
|
||||
__put_user (s.sharedram, &info->sharedram) ||
|
||||
__put_user (s.bufferram, &info->bufferram) ||
|
||||
__put_user (s.totalswap, &info->totalswap) ||
|
||||
__put_user (s.freeswap, &info->freeswap) ||
|
||||
__put_user (s.procs, &info->procs) ||
|
||||
__put_user (s.totalhigh, &info->totalhigh) ||
|
||||
__put_user (s.freehigh, &info->freehigh) ||
|
||||
__put_user (s.mem_unit, &info->mem_unit))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
|
||||
compat_pid_t, pid,
|
||||
struct compat_timespec __user *, interval)
|
||||
|
@@ -775,7 +775,7 @@ static void sysrq_handle_dbg(int key)
|
||||
|
||||
static struct sysrq_key_op sysrq_dbg_op = {
|
||||
.handler = sysrq_handle_dbg,
|
||||
.help_msg = "debug(G)",
|
||||
.help_msg = "debug(g)",
|
||||
.action_msg = "DEBUG",
|
||||
};
|
||||
#endif
|
||||
|
@@ -786,7 +786,7 @@ static int kimage_load_normal_segment(struct kimage *image,
|
||||
struct kexec_segment *segment)
|
||||
{
|
||||
unsigned long maddr;
|
||||
unsigned long ubytes, mbytes;
|
||||
size_t ubytes, mbytes;
|
||||
int result;
|
||||
unsigned char __user *buf;
|
||||
|
||||
@@ -819,13 +819,9 @@ static int kimage_load_normal_segment(struct kimage *image,
|
||||
/* Start with a clear page */
|
||||
clear_page(ptr);
|
||||
ptr += maddr & ~PAGE_MASK;
|
||||
mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
|
||||
if (mchunk > mbytes)
|
||||
mchunk = mbytes;
|
||||
|
||||
uchunk = mchunk;
|
||||
if (uchunk > ubytes)
|
||||
uchunk = ubytes;
|
||||
mchunk = min_t(size_t, mbytes,
|
||||
PAGE_SIZE - (maddr & ~PAGE_MASK));
|
||||
uchunk = min(ubytes, mchunk);
|
||||
|
||||
result = copy_from_user(ptr, buf, uchunk);
|
||||
kunmap(page);
|
||||
@@ -850,7 +846,7 @@ static int kimage_load_crash_segment(struct kimage *image,
|
||||
* We do things a page at a time for the sake of kmap.
|
||||
*/
|
||||
unsigned long maddr;
|
||||
unsigned long ubytes, mbytes;
|
||||
size_t ubytes, mbytes;
|
||||
int result;
|
||||
unsigned char __user *buf;
|
||||
|
||||
@@ -871,13 +867,10 @@ static int kimage_load_crash_segment(struct kimage *image,
|
||||
}
|
||||
ptr = kmap(page);
|
||||
ptr += maddr & ~PAGE_MASK;
|
||||
mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
|
||||
if (mchunk > mbytes)
|
||||
mchunk = mbytes;
|
||||
|
||||
uchunk = mchunk;
|
||||
if (uchunk > ubytes) {
|
||||
uchunk = ubytes;
|
||||
mchunk = min_t(size_t, mbytes,
|
||||
PAGE_SIZE - (maddr & ~PAGE_MASK));
|
||||
uchunk = min(ubytes, mchunk);
|
||||
if (mchunk > uchunk) {
|
||||
/* Zero the trailing part of the page */
|
||||
memset(ptr + uchunk, 0, mchunk - uchunk);
|
||||
}
|
||||
@@ -1540,14 +1533,13 @@ void vmcoreinfo_append_str(const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
char buf[0x50];
|
||||
int r;
|
||||
size_t r;
|
||||
|
||||
va_start(args, fmt);
|
||||
r = vsnprintf(buf, sizeof(buf), fmt, args);
|
||||
va_end(args);
|
||||
|
||||
if (r + vmcoreinfo_size > vmcoreinfo_max_size)
|
||||
r = vmcoreinfo_max_size - vmcoreinfo_size;
|
||||
r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
|
||||
|
||||
memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
|
||||
|
||||
|
104
kernel/kmod.c
104
kernel/kmod.c
@@ -77,6 +77,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
|
||||
|
||||
static int call_modprobe(char *module_name, int wait)
|
||||
{
|
||||
struct subprocess_info *info;
|
||||
static char *envp[] = {
|
||||
"HOME=/",
|
||||
"TERM=linux",
|
||||
@@ -98,8 +99,15 @@ static int call_modprobe(char *module_name, int wait)
|
||||
argv[3] = module_name; /* check free_modprobe_argv() */
|
||||
argv[4] = NULL;
|
||||
|
||||
return call_usermodehelper_fns(modprobe_path, argv, envp,
|
||||
wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
|
||||
info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
|
||||
NULL, free_modprobe_argv, NULL);
|
||||
if (!info)
|
||||
goto free_module_name;
|
||||
|
||||
return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
|
||||
|
||||
free_module_name:
|
||||
kfree(module_name);
|
||||
free_argv:
|
||||
kfree(argv);
|
||||
out:
|
||||
@@ -502,34 +510,13 @@ static void helper_unlock(void)
|
||||
* @argv: arg vector for process
|
||||
* @envp: environment for process
|
||||
* @gfp_mask: gfp mask for memory allocation
|
||||
* @cleanup: a cleanup function
|
||||
* @init: an init function
|
||||
* @data: arbitrary context sensitive data
|
||||
*
|
||||
* Returns either %NULL on allocation failure, or a subprocess_info
|
||||
* structure. This should be passed to call_usermodehelper_exec to
|
||||
* exec the process and free the structure.
|
||||
*/
|
||||
static
|
||||
struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
|
||||
char **envp, gfp_t gfp_mask)
|
||||
{
|
||||
struct subprocess_info *sub_info;
|
||||
sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
|
||||
if (!sub_info)
|
||||
goto out;
|
||||
|
||||
INIT_WORK(&sub_info->work, __call_usermodehelper);
|
||||
sub_info->path = path;
|
||||
sub_info->argv = argv;
|
||||
sub_info->envp = envp;
|
||||
out:
|
||||
return sub_info;
|
||||
}
|
||||
|
||||
/**
|
||||
* call_usermodehelper_setfns - set a cleanup/init function
|
||||
* @info: a subprocess_info returned by call_usermodehelper_setup
|
||||
* @cleanup: a cleanup function
|
||||
* @init: an init function
|
||||
* @data: arbitrary context sensitive data
|
||||
*
|
||||
* The init function is used to customize the helper process prior to
|
||||
* exec. A non-zero return code causes the process to error out, exit,
|
||||
@@ -540,30 +527,42 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
|
||||
* Function must be runnable in either a process context or the
|
||||
* context in which call_usermodehelper_exec is called.
|
||||
*/
|
||||
static
|
||||
void call_usermodehelper_setfns(struct subprocess_info *info,
|
||||
int (*init)(struct subprocess_info *info, struct cred *new),
|
||||
void (*cleanup)(struct subprocess_info *info),
|
||||
void *data)
|
||||
struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
|
||||
char **envp, gfp_t gfp_mask,
|
||||
int (*init)(struct subprocess_info *info, struct cred *new),
|
||||
void (*cleanup)(struct subprocess_info *info),
|
||||
void *data)
|
||||
{
|
||||
info->cleanup = cleanup;
|
||||
info->init = init;
|
||||
info->data = data;
|
||||
struct subprocess_info *sub_info;
|
||||
sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
|
||||
if (!sub_info)
|
||||
goto out;
|
||||
|
||||
INIT_WORK(&sub_info->work, __call_usermodehelper);
|
||||
sub_info->path = path;
|
||||
sub_info->argv = argv;
|
||||
sub_info->envp = envp;
|
||||
|
||||
sub_info->cleanup = cleanup;
|
||||
sub_info->init = init;
|
||||
sub_info->data = data;
|
||||
out:
|
||||
return sub_info;
|
||||
}
|
||||
EXPORT_SYMBOL(call_usermodehelper_setup);
|
||||
|
||||
/**
|
||||
* call_usermodehelper_exec - start a usermode application
|
||||
* @sub_info: information about the subprocessa
|
||||
* @wait: wait for the application to finish and return status.
|
||||
* when -1 don't wait at all, but you get no useful error back when
|
||||
* the program couldn't be exec'ed. This makes it safe to call
|
||||
* when UMH_NO_WAIT don't wait at all, but you get no useful error back
|
||||
* when the program couldn't be exec'ed. This makes it safe to call
|
||||
* from interrupt context.
|
||||
*
|
||||
* Runs a user-space application. The application is started
|
||||
* asynchronously if wait is not set, and runs as a child of keventd.
|
||||
* (ie. it runs with full root capabilities).
|
||||
*/
|
||||
static
|
||||
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
@@ -615,31 +614,34 @@ unlock:
|
||||
helper_unlock();
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(call_usermodehelper_exec);
|
||||
|
||||
/*
|
||||
* call_usermodehelper_fns() will not run the caller-provided cleanup function
|
||||
* if a memory allocation failure is experienced. So the caller might need to
|
||||
* check the call_usermodehelper_fns() return value: if it is -ENOMEM, perform
|
||||
* the necessaary cleanup within the caller.
|
||||
/**
|
||||
* call_usermodehelper() - prepare and start a usermode application
|
||||
* @path: path to usermode executable
|
||||
* @argv: arg vector for process
|
||||
* @envp: environment for process
|
||||
* @wait: wait for the application to finish and return status.
|
||||
* when UMH_NO_WAIT don't wait at all, but you get no useful error back
|
||||
* when the program couldn't be exec'ed. This makes it safe to call
|
||||
* from interrupt context.
|
||||
*
|
||||
* This function is the equivalent to use call_usermodehelper_setup() and
|
||||
* call_usermodehelper_exec().
|
||||
*/
|
||||
int call_usermodehelper_fns(
|
||||
char *path, char **argv, char **envp, int wait,
|
||||
int (*init)(struct subprocess_info *info, struct cred *new),
|
||||
void (*cleanup)(struct subprocess_info *), void *data)
|
||||
int call_usermodehelper(char *path, char **argv, char **envp, int wait)
|
||||
{
|
||||
struct subprocess_info *info;
|
||||
gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
|
||||
|
||||
info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
|
||||
|
||||
info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
|
||||
NULL, NULL, NULL);
|
||||
if (info == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
call_usermodehelper_setfns(info, init, cleanup, data);
|
||||
|
||||
return call_usermodehelper_exec(info, wait);
|
||||
}
|
||||
EXPORT_SYMBOL(call_usermodehelper_fns);
|
||||
EXPORT_SYMBOL(call_usermodehelper);
|
||||
|
||||
static int proc_cap_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
|
@@ -17,6 +17,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
static DEFINE_SPINLOCK(kthread_create_lock);
|
||||
@@ -135,6 +136,24 @@ void *kthread_data(struct task_struct *task)
|
||||
return to_kthread(task)->data;
|
||||
}
|
||||
|
||||
/**
|
||||
* probe_kthread_data - speculative version of kthread_data()
|
||||
* @task: possible kthread task in question
|
||||
*
|
||||
* @task could be a kthread task. Return the data value specified when it
|
||||
* was created if accessible. If @task isn't a kthread task or its data is
|
||||
* inaccessible for any reason, %NULL is returned. This function requires
|
||||
* that @task itself is safe to dereference.
|
||||
*/
|
||||
void *probe_kthread_data(struct task_struct *task)
|
||||
{
|
||||
struct kthread *kthread = to_kthread(task);
|
||||
void *data = NULL;
|
||||
|
||||
probe_kernel_read(&data, &kthread->data, sizeof(data));
|
||||
return data;
|
||||
}
|
||||
|
||||
static void __kthread_parkme(struct kthread *self)
|
||||
{
|
||||
__set_current_state(TASK_PARKED);
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#define PANIC_TIMER_STEP 100
|
||||
#define PANIC_BLINK_SPD 18
|
||||
@@ -400,13 +399,8 @@ struct slowpath_args {
|
||||
static void warn_slowpath_common(const char *file, int line, void *caller,
|
||||
unsigned taint, struct slowpath_args *args)
|
||||
{
|
||||
const char *board;
|
||||
|
||||
printk(KERN_WARNING "------------[ cut here ]------------\n");
|
||||
printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
|
||||
board = dmi_get_system_info(DMI_PRODUCT_NAME);
|
||||
if (board)
|
||||
printk(KERN_WARNING "Hardware name: %s\n", board);
|
||||
|
||||
if (args)
|
||||
vprintk(args->fmt, args->args);
|
||||
|
11
kernel/pid.c
11
kernel/pid.c
@@ -51,9 +51,6 @@ int pid_max = PID_MAX_DEFAULT;
|
||||
int pid_max_min = RESERVED_PIDS + 1;
|
||||
int pid_max_max = PID_MAX_LIMIT;
|
||||
|
||||
#define BITS_PER_PAGE (PAGE_SIZE*8)
|
||||
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
|
||||
|
||||
static inline int mk_pid(struct pid_namespace *pid_ns,
|
||||
struct pidmap *map, int off)
|
||||
{
|
||||
@@ -183,15 +180,19 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
|
||||
break;
|
||||
}
|
||||
if (likely(atomic_read(&map->nr_free))) {
|
||||
do {
|
||||
for ( ; ; ) {
|
||||
if (!test_and_set_bit(offset, map->page)) {
|
||||
atomic_dec(&map->nr_free);
|
||||
set_last_pid(pid_ns, last, pid);
|
||||
return pid;
|
||||
}
|
||||
offset = find_next_offset(map, offset);
|
||||
if (offset >= BITS_PER_PAGE)
|
||||
break;
|
||||
pid = mk_pid(pid_ns, map, offset);
|
||||
} while (offset < BITS_PER_PAGE && pid < pid_max);
|
||||
if (pid >= pid_max)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
|
||||
++map;
|
||||
|
@@ -19,8 +19,6 @@
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#define BITS_PER_PAGE (PAGE_SIZE*8)
|
||||
|
||||
struct pid_cache {
|
||||
int nr_ids;
|
||||
char name[16];
|
||||
|
@@ -32,7 +32,7 @@ static void handle_poweroff(int key)
|
||||
|
||||
static struct sysrq_key_op sysrq_poweroff_op = {
|
||||
.handler = handle_poweroff,
|
||||
.help_msg = "powerOff",
|
||||
.help_msg = "poweroff(o)",
|
||||
.action_msg = "Power Off",
|
||||
.enable_mask = SYSRQ_ENABLE_BOOT,
|
||||
};
|
||||
|
@@ -43,6 +43,7 @@
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/utsname.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
@@ -2849,4 +2850,65 @@ void kmsg_dump_rewind(struct kmsg_dumper *dumper)
|
||||
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
|
||||
|
||||
static char dump_stack_arch_desc_str[128];
|
||||
|
||||
/**
|
||||
* dump_stack_set_arch_desc - set arch-specific str to show with task dumps
|
||||
* @fmt: printf-style format string
|
||||
* @...: arguments for the format string
|
||||
*
|
||||
* The configured string will be printed right after utsname during task
|
||||
* dumps. Usually used to add arch-specific system identifiers. If an
|
||||
* arch wants to make use of such an ID string, it should initialize this
|
||||
* as soon as possible during boot.
|
||||
*/
|
||||
void __init dump_stack_set_arch_desc(const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
|
||||
fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
/**
|
||||
* dump_stack_print_info - print generic debug info for dump_stack()
|
||||
* @log_lvl: log level
|
||||
*
|
||||
* Arch-specific dump_stack() implementations can use this function to
|
||||
* print out the same debug information as the generic dump_stack().
|
||||
*/
|
||||
void dump_stack_print_info(const char *log_lvl)
|
||||
{
|
||||
printk("%sCPU: %d PID: %d Comm: %.20s %s %s %.*s\n",
|
||||
log_lvl, raw_smp_processor_id(), current->pid, current->comm,
|
||||
print_tainted(), init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
|
||||
if (dump_stack_arch_desc_str[0] != '\0')
|
||||
printk("%sHardware name: %s\n",
|
||||
log_lvl, dump_stack_arch_desc_str);
|
||||
|
||||
print_worker_info(log_lvl, current);
|
||||
}
|
||||
|
||||
/**
|
||||
* show_regs_print_info - print generic debug info for show_regs()
|
||||
* @log_lvl: log level
|
||||
*
|
||||
* show_regs() implementations can use this function to print out generic
|
||||
* debug information.
|
||||
*/
|
||||
void show_regs_print_info(const char *log_lvl)
|
||||
{
|
||||
dump_stack_print_info(log_lvl);
|
||||
|
||||
printk("%stask: %p ti: %p task.ti: %p\n",
|
||||
log_lvl, current, current_thread_info(),
|
||||
task_thread_info(current));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -24,6 +24,7 @@
|
||||
#include <linux/regset.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/cn_proc.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
|
||||
static int ptrace_trapping_sleep_fn(void *flags)
|
||||
@@ -618,6 +619,81 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int ptrace_peek_siginfo(struct task_struct *child,
|
||||
unsigned long addr,
|
||||
unsigned long data)
|
||||
{
|
||||
struct ptrace_peeksiginfo_args arg;
|
||||
struct sigpending *pending;
|
||||
struct sigqueue *q;
|
||||
int ret, i;
|
||||
|
||||
ret = copy_from_user(&arg, (void __user *) addr,
|
||||
sizeof(struct ptrace_peeksiginfo_args));
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
||||
if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
|
||||
return -EINVAL; /* unknown flags */
|
||||
|
||||
if (arg.nr < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
|
||||
pending = &child->signal->shared_pending;
|
||||
else
|
||||
pending = &child->pending;
|
||||
|
||||
for (i = 0; i < arg.nr; ) {
|
||||
siginfo_t info;
|
||||
s32 off = arg.off + i;
|
||||
|
||||
spin_lock_irq(&child->sighand->siglock);
|
||||
list_for_each_entry(q, &pending->list, list) {
|
||||
if (!off--) {
|
||||
copy_siginfo(&info, &q->info);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&child->sighand->siglock);
|
||||
|
||||
if (off >= 0) /* beyond the end of the list */
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (unlikely(is_compat_task())) {
|
||||
compat_siginfo_t __user *uinfo = compat_ptr(data);
|
||||
|
||||
ret = copy_siginfo_to_user32(uinfo, &info);
|
||||
ret |= __put_user(info.si_code, &uinfo->si_code);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
siginfo_t __user *uinfo = (siginfo_t __user *) data;
|
||||
|
||||
ret = copy_siginfo_to_user(uinfo, &info);
|
||||
ret |= __put_user(info.si_code, &uinfo->si_code);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
data += sizeof(siginfo_t);
|
||||
i++;
|
||||
|
||||
if (signal_pending(current))
|
||||
break;
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (i > 0)
|
||||
return i;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef PTRACE_SINGLESTEP
|
||||
#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
|
||||
@@ -748,6 +824,10 @@ int ptrace_request(struct task_struct *child, long request,
|
||||
ret = put_user(child->ptrace_message, datalp);
|
||||
break;
|
||||
|
||||
case PTRACE_PEEKSIGINFO:
|
||||
ret = ptrace_peek_siginfo(child, addr, data);
|
||||
break;
|
||||
|
||||
case PTRACE_GETSIGINFO:
|
||||
ret = ptrace_getsiginfo(child, &siginfo);
|
||||
if (!ret)
|
||||
|
@@ -97,7 +97,8 @@ void subtract_range(struct range *range, int az, u64 start, u64 end)
|
||||
range[i].end = range[j].end;
|
||||
range[i].start = end;
|
||||
} else {
|
||||
printk(KERN_ERR "run of slot in ranges\n");
|
||||
pr_err("%s: run out of slot in ranges\n",
|
||||
__func__);
|
||||
}
|
||||
range[j].end = start;
|
||||
continue;
|
||||
|
@@ -588,7 +588,7 @@ struct rchan *relay_open(const char *base_filename,
|
||||
chan->version = RELAYFS_CHANNEL_VERSION;
|
||||
chan->n_subbufs = n_subbufs;
|
||||
chan->subbuf_size = subbuf_size;
|
||||
chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
|
||||
chan->alloc_size = PAGE_ALIGN(subbuf_size * n_subbufs);
|
||||
chan->parent = parent;
|
||||
chan->private_data = private_data;
|
||||
if (base_filename) {
|
||||
@@ -1099,8 +1099,7 @@ static size_t relay_file_read_end_pos(struct rchan_buf *buf,
|
||||
static int subbuf_read_actor(size_t read_start,
|
||||
struct rchan_buf *buf,
|
||||
size_t avail,
|
||||
read_descriptor_t *desc,
|
||||
read_actor_t actor)
|
||||
read_descriptor_t *desc)
|
||||
{
|
||||
void *from;
|
||||
int ret = 0;
|
||||
@@ -1121,15 +1120,13 @@ static int subbuf_read_actor(size_t read_start,
|
||||
typedef int (*subbuf_actor_t) (size_t read_start,
|
||||
struct rchan_buf *buf,
|
||||
size_t avail,
|
||||
read_descriptor_t *desc,
|
||||
read_actor_t actor);
|
||||
read_descriptor_t *desc);
|
||||
|
||||
/*
|
||||
* relay_file_read_subbufs - read count bytes, bridging subbuf boundaries
|
||||
*/
|
||||
static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
|
||||
subbuf_actor_t subbuf_actor,
|
||||
read_actor_t actor,
|
||||
read_descriptor_t *desc)
|
||||
{
|
||||
struct rchan_buf *buf = filp->private_data;
|
||||
@@ -1150,7 +1147,7 @@ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
|
||||
break;
|
||||
|
||||
avail = min(desc->count, avail);
|
||||
ret = subbuf_actor(read_start, buf, avail, desc, actor);
|
||||
ret = subbuf_actor(read_start, buf, avail, desc);
|
||||
if (desc->error < 0)
|
||||
break;
|
||||
|
||||
@@ -1174,8 +1171,7 @@ static ssize_t relay_file_read(struct file *filp,
|
||||
desc.count = count;
|
||||
desc.arg.buf = buffer;
|
||||
desc.error = 0;
|
||||
return relay_file_read_subbufs(filp, ppos, subbuf_read_actor,
|
||||
NULL, &desc);
|
||||
return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, &desc);
|
||||
}
|
||||
|
||||
static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed)
|
||||
|
@@ -4586,6 +4586,7 @@ void sched_show_task(struct task_struct *p)
|
||||
task_pid_nr(p), ppid,
|
||||
(unsigned long)task_thread_info(p)->flags);
|
||||
|
||||
print_worker_info(KERN_INFO, p);
|
||||
show_stack(p, NULL);
|
||||
}
|
||||
|
||||
|
@@ -193,7 +193,7 @@ EXPORT_SYMBOL(up);
|
||||
struct semaphore_waiter {
|
||||
struct list_head list;
|
||||
struct task_struct *task;
|
||||
int up;
|
||||
bool up;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -209,12 +209,12 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
|
||||
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
waiter.task = task;
|
||||
waiter.up = 0;
|
||||
waiter.up = false;
|
||||
|
||||
for (;;) {
|
||||
if (signal_pending_state(state, task))
|
||||
goto interrupted;
|
||||
if (timeout <= 0)
|
||||
if (unlikely(timeout <= 0))
|
||||
goto timed_out;
|
||||
__set_task_state(task, state);
|
||||
raw_spin_unlock_irq(&sem->lock);
|
||||
@@ -258,6 +258,6 @@ static noinline void __sched __up(struct semaphore *sem)
|
||||
struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
|
||||
struct semaphore_waiter, list);
|
||||
list_del(&waiter->list);
|
||||
waiter->up = 1;
|
||||
waiter->up = true;
|
||||
wake_up_process(waiter->task);
|
||||
}
|
||||
|
@@ -854,12 +854,14 @@ static void ptrace_trap_notify(struct task_struct *t)
|
||||
* Returns true if the signal should be actually delivered, otherwise
|
||||
* it should be dropped.
|
||||
*/
|
||||
static int prepare_signal(int sig, struct task_struct *p, bool force)
|
||||
static bool prepare_signal(int sig, struct task_struct *p, bool force)
|
||||
{
|
||||
struct signal_struct *signal = p->signal;
|
||||
struct task_struct *t;
|
||||
|
||||
if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
|
||||
if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
|
||||
if (signal->flags & SIGNAL_GROUP_COREDUMP)
|
||||
return sig == SIGKILL;
|
||||
/*
|
||||
* The process is in the middle of dying, nothing to do.
|
||||
*/
|
||||
@@ -1160,8 +1162,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
|
||||
static void print_fatal_signal(int signr)
|
||||
{
|
||||
struct pt_regs *regs = signal_pt_regs();
|
||||
printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n",
|
||||
current->comm, task_pid_nr(current), signr);
|
||||
printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
|
||||
|
||||
#if defined(__i386__) && !defined(__arch_um__)
|
||||
printk(KERN_INFO "code at %08lx: ", regs->ip);
|
||||
|
91
kernel/smp.c
91
kernel/smp.c
@@ -100,16 +100,16 @@ void __init call_function_init(void)
|
||||
* previous function call. For multi-cpu calls its even more interesting
|
||||
* as we'll have to ensure no other cpu is observing our csd.
|
||||
*/
|
||||
static void csd_lock_wait(struct call_single_data *data)
|
||||
static void csd_lock_wait(struct call_single_data *csd)
|
||||
{
|
||||
while (data->flags & CSD_FLAG_LOCK)
|
||||
while (csd->flags & CSD_FLAG_LOCK)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static void csd_lock(struct call_single_data *data)
|
||||
static void csd_lock(struct call_single_data *csd)
|
||||
{
|
||||
csd_lock_wait(data);
|
||||
data->flags = CSD_FLAG_LOCK;
|
||||
csd_lock_wait(csd);
|
||||
csd->flags |= CSD_FLAG_LOCK;
|
||||
|
||||
/*
|
||||
* prevent CPU from reordering the above assignment
|
||||
@@ -119,16 +119,16 @@ static void csd_lock(struct call_single_data *data)
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static void csd_unlock(struct call_single_data *data)
|
||||
static void csd_unlock(struct call_single_data *csd)
|
||||
{
|
||||
WARN_ON(!(data->flags & CSD_FLAG_LOCK));
|
||||
WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
|
||||
|
||||
/*
|
||||
* ensure we're all done before releasing data:
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
data->flags &= ~CSD_FLAG_LOCK;
|
||||
csd->flags &= ~CSD_FLAG_LOCK;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -137,7 +137,7 @@ static void csd_unlock(struct call_single_data *data)
|
||||
* ->func, ->info, and ->flags set.
|
||||
*/
|
||||
static
|
||||
void generic_exec_single(int cpu, struct call_single_data *data, int wait)
|
||||
void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
|
||||
{
|
||||
struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
|
||||
unsigned long flags;
|
||||
@@ -145,7 +145,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
|
||||
|
||||
raw_spin_lock_irqsave(&dst->lock, flags);
|
||||
ipi = list_empty(&dst->list);
|
||||
list_add_tail(&data->list, &dst->list);
|
||||
list_add_tail(&csd->list, &dst->list);
|
||||
raw_spin_unlock_irqrestore(&dst->lock, flags);
|
||||
|
||||
/*
|
||||
@@ -163,7 +163,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
|
||||
arch_send_call_function_single_ipi(cpu);
|
||||
|
||||
if (wait)
|
||||
csd_lock_wait(data);
|
||||
csd_lock_wait(csd);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -173,7 +173,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
|
||||
void generic_smp_call_function_single_interrupt(void)
|
||||
{
|
||||
struct call_single_queue *q = &__get_cpu_var(call_single_queue);
|
||||
unsigned int data_flags;
|
||||
LIST_HEAD(list);
|
||||
|
||||
/*
|
||||
@@ -186,25 +185,26 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
raw_spin_unlock(&q->lock);
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
struct call_single_data *data;
|
||||
struct call_single_data *csd;
|
||||
unsigned int csd_flags;
|
||||
|
||||
data = list_entry(list.next, struct call_single_data, list);
|
||||
list_del(&data->list);
|
||||
csd = list_entry(list.next, struct call_single_data, list);
|
||||
list_del(&csd->list);
|
||||
|
||||
/*
|
||||
* 'data' can be invalid after this call if flags == 0
|
||||
* 'csd' can be invalid after this call if flags == 0
|
||||
* (when called through generic_exec_single()),
|
||||
* so save them away before making the call:
|
||||
*/
|
||||
data_flags = data->flags;
|
||||
csd_flags = csd->flags;
|
||||
|
||||
data->func(data->info);
|
||||
csd->func(csd->info);
|
||||
|
||||
/*
|
||||
* Unlocked CSDs are valid through generic_exec_single():
|
||||
*/
|
||||
if (data_flags & CSD_FLAG_LOCK)
|
||||
csd_unlock(data);
|
||||
if (csd_flags & CSD_FLAG_LOCK)
|
||||
csd_unlock(csd);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,16 +249,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
|
||||
struct call_single_data *data = &d;
|
||||
struct call_single_data *csd = &d;
|
||||
|
||||
if (!wait)
|
||||
data = &__get_cpu_var(csd_data);
|
||||
csd = &__get_cpu_var(csd_data);
|
||||
|
||||
csd_lock(data);
|
||||
csd_lock(csd);
|
||||
|
||||
data->func = func;
|
||||
data->info = info;
|
||||
generic_exec_single(cpu, data, wait);
|
||||
csd->func = func;
|
||||
csd->info = info;
|
||||
generic_exec_single(cpu, csd, wait);
|
||||
} else {
|
||||
err = -ENXIO; /* CPU not online */
|
||||
}
|
||||
@@ -325,7 +325,7 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
|
||||
* pre-allocated data structure. Useful for embedding @data inside
|
||||
* other structures, for instance.
|
||||
*/
|
||||
void __smp_call_function_single(int cpu, struct call_single_data *data,
|
||||
void __smp_call_function_single(int cpu, struct call_single_data *csd,
|
||||
int wait)
|
||||
{
|
||||
unsigned int this_cpu;
|
||||
@@ -343,11 +343,11 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
|
||||
|
||||
if (cpu == this_cpu) {
|
||||
local_irq_save(flags);
|
||||
data->func(data->info);
|
||||
csd->func(csd->info);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
csd_lock(data);
|
||||
generic_exec_single(cpu, data, wait);
|
||||
csd_lock(csd);
|
||||
generic_exec_single(cpu, csd, wait);
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
@@ -369,7 +369,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
|
||||
void smp_call_function_many(const struct cpumask *mask,
|
||||
smp_call_func_t func, void *info, bool wait)
|
||||
{
|
||||
struct call_function_data *data;
|
||||
struct call_function_data *cfd;
|
||||
int cpu, next_cpu, this_cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
@@ -401,24 +401,24 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
return;
|
||||
}
|
||||
|
||||
data = &__get_cpu_var(cfd_data);
|
||||
cfd = &__get_cpu_var(cfd_data);
|
||||
|
||||
cpumask_and(data->cpumask, mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(this_cpu, data->cpumask);
|
||||
cpumask_and(cfd->cpumask, mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(this_cpu, cfd->cpumask);
|
||||
|
||||
/* Some callers race with other cpus changing the passed mask */
|
||||
if (unlikely(!cpumask_weight(data->cpumask)))
|
||||
if (unlikely(!cpumask_weight(cfd->cpumask)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* After we put an entry into the list, data->cpumask
|
||||
* may be cleared again when another CPU sends another IPI for
|
||||
* a SMP function call, so data->cpumask will be zero.
|
||||
* After we put an entry into the list, cfd->cpumask may be cleared
|
||||
* again when another CPU sends another IPI for a SMP function call, so
|
||||
* cfd->cpumask will be zero.
|
||||
*/
|
||||
cpumask_copy(data->cpumask_ipi, data->cpumask);
|
||||
cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
|
||||
|
||||
for_each_cpu(cpu, data->cpumask) {
|
||||
struct call_single_data *csd = per_cpu_ptr(data->csd, cpu);
|
||||
for_each_cpu(cpu, cfd->cpumask) {
|
||||
struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
|
||||
struct call_single_queue *dst =
|
||||
&per_cpu(call_single_queue, cpu);
|
||||
unsigned long flags;
|
||||
@@ -433,12 +433,13 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
}
|
||||
|
||||
/* Send a message to all CPUs in the map */
|
||||
arch_send_call_function_ipi_mask(data->cpumask_ipi);
|
||||
arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
|
||||
|
||||
if (wait) {
|
||||
for_each_cpu(cpu, data->cpumask) {
|
||||
struct call_single_data *csd =
|
||||
per_cpu_ptr(data->csd, cpu);
|
||||
for_each_cpu(cpu, cfd->cpumask) {
|
||||
struct call_single_data *csd;
|
||||
|
||||
csd = per_cpu_ptr(cfd->csd, cpu);
|
||||
csd_lock_wait(csd);
|
||||
}
|
||||
}
|
||||
|
@@ -620,8 +620,7 @@ static void remote_softirq_receive(void *data)
|
||||
unsigned long flags;
|
||||
int softirq;
|
||||
|
||||
softirq = cp->priv;
|
||||
|
||||
softirq = *(int *)cp->info;
|
||||
local_irq_save(flags);
|
||||
__local_trigger(cp, softirq);
|
||||
local_irq_restore(flags);
|
||||
@@ -631,9 +630,8 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir
|
||||
{
|
||||
if (cpu_online(cpu)) {
|
||||
cp->func = remote_softirq_receive;
|
||||
cp->info = cp;
|
||||
cp->info = &softirq;
|
||||
cp->flags = 0;
|
||||
cp->priv = softirq;
|
||||
|
||||
__smp_call_function_single(cpu, cp, 0);
|
||||
return 0;
|
||||
|
221
kernel/sys.c
221
kernel/sys.c
@@ -49,6 +49,11 @@
|
||||
#include <linux/user_namespace.h>
|
||||
#include <linux/binfmts.h>
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/uidgid.h>
|
||||
#include <linux/cred.h>
|
||||
|
||||
#include <linux/kmsg_dump.h>
|
||||
/* Move somewhere else to avoid recompiling? */
|
||||
#include <generated/utsrelease.h>
|
||||
@@ -1044,6 +1049,67 @@ change_okay:
|
||||
return old_fsgid;
|
||||
}
|
||||
|
||||
/**
|
||||
* sys_getpid - return the thread group id of the current process
|
||||
*
|
||||
* Note, despite the name, this returns the tgid not the pid. The tgid and
|
||||
* the pid are identical unless CLONE_THREAD was specified on clone() in
|
||||
* which case the tgid is the same in all threads of the same group.
|
||||
*
|
||||
* This is SMP safe as current->tgid does not change.
|
||||
*/
|
||||
SYSCALL_DEFINE0(getpid)
|
||||
{
|
||||
return task_tgid_vnr(current);
|
||||
}
|
||||
|
||||
/* Thread ID - the internal kernel "pid" */
|
||||
SYSCALL_DEFINE0(gettid)
|
||||
{
|
||||
return task_pid_vnr(current);
|
||||
}
|
||||
|
||||
/*
|
||||
* Accessing ->real_parent is not SMP-safe, it could
|
||||
* change from under us. However, we can use a stale
|
||||
* value of ->real_parent under rcu_read_lock(), see
|
||||
* release_task()->call_rcu(delayed_put_task_struct).
|
||||
*/
|
||||
SYSCALL_DEFINE0(getppid)
|
||||
{
|
||||
int pid;
|
||||
|
||||
rcu_read_lock();
|
||||
pid = task_tgid_vnr(rcu_dereference(current->real_parent));
|
||||
rcu_read_unlock();
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(getuid)
|
||||
{
|
||||
/* Only we change this so SMP safe */
|
||||
return from_kuid_munged(current_user_ns(), current_uid());
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(geteuid)
|
||||
{
|
||||
/* Only we change this so SMP safe */
|
||||
return from_kuid_munged(current_user_ns(), current_euid());
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(getgid)
|
||||
{
|
||||
/* Only we change this so SMP safe */
|
||||
return from_kgid_munged(current_user_ns(), current_gid());
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(getegid)
|
||||
{
|
||||
/* Only we change this so SMP safe */
|
||||
return from_kgid_munged(current_user_ns(), current_egid());
|
||||
}
|
||||
|
||||
void do_sys_times(struct tms *tms)
|
||||
{
|
||||
cputime_t tgutime, tgstime, cutime, cstime;
|
||||
@@ -1791,7 +1857,6 @@ SYSCALL_DEFINE1(umask, int, mask)
|
||||
return mask;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CHECKPOINT_RESTORE
|
||||
static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
|
||||
{
|
||||
struct fd exe;
|
||||
@@ -1985,17 +2050,12 @@ out:
|
||||
return error;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CHECKPOINT_RESTORE
|
||||
static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
|
||||
{
|
||||
return put_user(me->clear_child_tid, tid_addr);
|
||||
}
|
||||
|
||||
#else /* CONFIG_CHECKPOINT_RESTORE */
|
||||
static int prctl_set_mm(int opt, unsigned long addr,
|
||||
unsigned long arg4, unsigned long arg5)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#else
|
||||
static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
|
||||
{
|
||||
return -EINVAL;
|
||||
@@ -2245,3 +2305,148 @@ int orderly_poweroff(bool force)
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(orderly_poweroff);
|
||||
|
||||
/**
|
||||
* do_sysinfo - fill in sysinfo struct
|
||||
* @info: pointer to buffer to fill
|
||||
*/
|
||||
static int do_sysinfo(struct sysinfo *info)
|
||||
{
|
||||
unsigned long mem_total, sav_total;
|
||||
unsigned int mem_unit, bitcount;
|
||||
struct timespec tp;
|
||||
|
||||
memset(info, 0, sizeof(struct sysinfo));
|
||||
|
||||
ktime_get_ts(&tp);
|
||||
monotonic_to_bootbased(&tp);
|
||||
info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
|
||||
|
||||
get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
|
||||
|
||||
info->procs = nr_threads;
|
||||
|
||||
si_meminfo(info);
|
||||
si_swapinfo(info);
|
||||
|
||||
/*
|
||||
* If the sum of all the available memory (i.e. ram + swap)
|
||||
* is less than can be stored in a 32 bit unsigned long then
|
||||
* we can be binary compatible with 2.2.x kernels. If not,
|
||||
* well, in that case 2.2.x was broken anyways...
|
||||
*
|
||||
* -Erik Andersen <andersee@debian.org>
|
||||
*/
|
||||
|
||||
mem_total = info->totalram + info->totalswap;
|
||||
if (mem_total < info->totalram || mem_total < info->totalswap)
|
||||
goto out;
|
||||
bitcount = 0;
|
||||
mem_unit = info->mem_unit;
|
||||
while (mem_unit > 1) {
|
||||
bitcount++;
|
||||
mem_unit >>= 1;
|
||||
sav_total = mem_total;
|
||||
mem_total <<= 1;
|
||||
if (mem_total < sav_total)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If mem_total did not overflow, multiply all memory values by
|
||||
* info->mem_unit and set it to 1. This leaves things compatible
|
||||
* with 2.2.x, and also retains compatibility with earlier 2.4.x
|
||||
* kernels...
|
||||
*/
|
||||
|
||||
info->mem_unit = 1;
|
||||
info->totalram <<= bitcount;
|
||||
info->freeram <<= bitcount;
|
||||
info->sharedram <<= bitcount;
|
||||
info->bufferram <<= bitcount;
|
||||
info->totalswap <<= bitcount;
|
||||
info->freeswap <<= bitcount;
|
||||
info->totalhigh <<= bitcount;
|
||||
info->freehigh <<= bitcount;
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
|
||||
{
|
||||
struct sysinfo val;
|
||||
|
||||
do_sysinfo(&val);
|
||||
|
||||
if (copy_to_user(info, &val, sizeof(struct sysinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_sysinfo {
|
||||
s32 uptime;
|
||||
u32 loads[3];
|
||||
u32 totalram;
|
||||
u32 freeram;
|
||||
u32 sharedram;
|
||||
u32 bufferram;
|
||||
u32 totalswap;
|
||||
u32 freeswap;
|
||||
u16 procs;
|
||||
u16 pad;
|
||||
u32 totalhigh;
|
||||
u32 freehigh;
|
||||
u32 mem_unit;
|
||||
char _f[20-2*sizeof(u32)-sizeof(int)];
|
||||
};
|
||||
|
||||
COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
|
||||
{
|
||||
struct sysinfo s;
|
||||
|
||||
do_sysinfo(&s);
|
||||
|
||||
/* Check to see if any memory value is too large for 32-bit and scale
|
||||
* down if needed
|
||||
*/
|
||||
if ((s.totalram >> 32) || (s.totalswap >> 32)) {
|
||||
int bitcount = 0;
|
||||
|
||||
while (s.mem_unit < PAGE_SIZE) {
|
||||
s.mem_unit <<= 1;
|
||||
bitcount++;
|
||||
}
|
||||
|
||||
s.totalram >>= bitcount;
|
||||
s.freeram >>= bitcount;
|
||||
s.sharedram >>= bitcount;
|
||||
s.bufferram >>= bitcount;
|
||||
s.totalswap >>= bitcount;
|
||||
s.freeswap >>= bitcount;
|
||||
s.totalhigh >>= bitcount;
|
||||
s.freehigh >>= bitcount;
|
||||
}
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
|
||||
__put_user(s.uptime, &info->uptime) ||
|
||||
__put_user(s.loads[0], &info->loads[0]) ||
|
||||
__put_user(s.loads[1], &info->loads[1]) ||
|
||||
__put_user(s.loads[2], &info->loads[2]) ||
|
||||
__put_user(s.totalram, &info->totalram) ||
|
||||
__put_user(s.freeram, &info->freeram) ||
|
||||
__put_user(s.sharedram, &info->sharedram) ||
|
||||
__put_user(s.bufferram, &info->bufferram) ||
|
||||
__put_user(s.totalswap, &info->totalswap) ||
|
||||
__put_user(s.freeswap, &info->freeswap) ||
|
||||
__put_user(s.procs, &info->procs) ||
|
||||
__put_user(s.totalhigh, &info->totalhigh) ||
|
||||
__put_user(s.freehigh, &info->freehigh) ||
|
||||
__put_user(s.mem_unit, &info->mem_unit))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
143
kernel/timer.c
143
kernel/timer.c
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* linux/kernel/timer.c
|
||||
*
|
||||
* Kernel internal timers, basic process system calls
|
||||
* Kernel internal timers
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*
|
||||
@@ -41,6 +41,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
@@ -1395,61 +1396,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* sys_getpid - return the thread group id of the current process
|
||||
*
|
||||
* Note, despite the name, this returns the tgid not the pid. The tgid and
|
||||
* the pid are identical unless CLONE_THREAD was specified on clone() in
|
||||
* which case the tgid is the same in all threads of the same group.
|
||||
*
|
||||
* This is SMP safe as current->tgid does not change.
|
||||
*/
|
||||
SYSCALL_DEFINE0(getpid)
|
||||
{
|
||||
return task_tgid_vnr(current);
|
||||
}
|
||||
|
||||
/*
|
||||
* Accessing ->real_parent is not SMP-safe, it could
|
||||
* change from under us. However, we can use a stale
|
||||
* value of ->real_parent under rcu_read_lock(), see
|
||||
* release_task()->call_rcu(delayed_put_task_struct).
|
||||
*/
|
||||
SYSCALL_DEFINE0(getppid)
|
||||
{
|
||||
int pid;
|
||||
|
||||
rcu_read_lock();
|
||||
pid = task_tgid_vnr(rcu_dereference(current->real_parent));
|
||||
rcu_read_unlock();
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(getuid)
|
||||
{
|
||||
/* Only we change this so SMP safe */
|
||||
return from_kuid_munged(current_user_ns(), current_uid());
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(geteuid)
|
||||
{
|
||||
/* Only we change this so SMP safe */
|
||||
return from_kuid_munged(current_user_ns(), current_euid());
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(getgid)
|
||||
{
|
||||
/* Only we change this so SMP safe */
|
||||
return from_kgid_munged(current_user_ns(), current_gid());
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(getegid)
|
||||
{
|
||||
/* Only we change this so SMP safe */
|
||||
return from_kgid_munged(current_user_ns(), current_egid());
|
||||
}
|
||||
|
||||
static void process_timeout(unsigned long __data)
|
||||
{
|
||||
wake_up_process((struct task_struct *)__data);
|
||||
@@ -1557,91 +1503,6 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
|
||||
}
|
||||
EXPORT_SYMBOL(schedule_timeout_uninterruptible);
|
||||
|
||||
/* Thread ID - the internal kernel "pid" */
|
||||
SYSCALL_DEFINE0(gettid)
|
||||
{
|
||||
return task_pid_vnr(current);
|
||||
}
|
||||
|
||||
/**
|
||||
* do_sysinfo - fill in sysinfo struct
|
||||
* @info: pointer to buffer to fill
|
||||
*/
|
||||
int do_sysinfo(struct sysinfo *info)
|
||||
{
|
||||
unsigned long mem_total, sav_total;
|
||||
unsigned int mem_unit, bitcount;
|
||||
struct timespec tp;
|
||||
|
||||
memset(info, 0, sizeof(struct sysinfo));
|
||||
|
||||
ktime_get_ts(&tp);
|
||||
monotonic_to_bootbased(&tp);
|
||||
info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
|
||||
|
||||
get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
|
||||
|
||||
info->procs = nr_threads;
|
||||
|
||||
si_meminfo(info);
|
||||
si_swapinfo(info);
|
||||
|
||||
/*
|
||||
* If the sum of all the available memory (i.e. ram + swap)
|
||||
* is less than can be stored in a 32 bit unsigned long then
|
||||
* we can be binary compatible with 2.2.x kernels. If not,
|
||||
* well, in that case 2.2.x was broken anyways...
|
||||
*
|
||||
* -Erik Andersen <andersee@debian.org>
|
||||
*/
|
||||
|
||||
mem_total = info->totalram + info->totalswap;
|
||||
if (mem_total < info->totalram || mem_total < info->totalswap)
|
||||
goto out;
|
||||
bitcount = 0;
|
||||
mem_unit = info->mem_unit;
|
||||
while (mem_unit > 1) {
|
||||
bitcount++;
|
||||
mem_unit >>= 1;
|
||||
sav_total = mem_total;
|
||||
mem_total <<= 1;
|
||||
if (mem_total < sav_total)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If mem_total did not overflow, multiply all memory values by
|
||||
* info->mem_unit and set it to 1. This leaves things compatible
|
||||
* with 2.2.x, and also retains compatibility with earlier 2.4.x
|
||||
* kernels...
|
||||
*/
|
||||
|
||||
info->mem_unit = 1;
|
||||
info->totalram <<= bitcount;
|
||||
info->freeram <<= bitcount;
|
||||
info->sharedram <<= bitcount;
|
||||
info->bufferram <<= bitcount;
|
||||
info->totalswap <<= bitcount;
|
||||
info->freeswap <<= bitcount;
|
||||
info->totalhigh <<= bitcount;
|
||||
info->freehigh <<= bitcount;
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
|
||||
{
|
||||
struct sysinfo val;
|
||||
|
||||
do_sysinfo(&val);
|
||||
|
||||
if (copy_to_user(info, &val, sizeof(struct sysinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __cpuinit init_timers_cpu(int cpu)
|
||||
{
|
||||
int j;
|
||||
|
@@ -46,6 +46,7 @@
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "workqueue_internal.h"
|
||||
|
||||
@@ -2197,6 +2198,7 @@ __acquires(&pool->lock)
|
||||
worker->current_work = NULL;
|
||||
worker->current_func = NULL;
|
||||
worker->current_pwq = NULL;
|
||||
worker->desc_valid = false;
|
||||
pwq_dec_nr_in_flight(pwq, work_color);
|
||||
}
|
||||
|
||||
@@ -4365,6 +4367,83 @@ unsigned int work_busy(struct work_struct *work)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(work_busy);
|
||||
|
||||
/**
|
||||
* set_worker_desc - set description for the current work item
|
||||
* @fmt: printf-style format string
|
||||
* @...: arguments for the format string
|
||||
*
|
||||
* This function can be called by a running work function to describe what
|
||||
* the work item is about. If the worker task gets dumped, this
|
||||
* information will be printed out together to help debugging. The
|
||||
* description can be at most WORKER_DESC_LEN including the trailing '\0'.
|
||||
*/
|
||||
void set_worker_desc(const char *fmt, ...)
|
||||
{
|
||||
struct worker *worker = current_wq_worker();
|
||||
va_list args;
|
||||
|
||||
if (worker) {
|
||||
va_start(args, fmt);
|
||||
vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
|
||||
va_end(args);
|
||||
worker->desc_valid = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* print_worker_info - print out worker information and description
|
||||
* @log_lvl: the log level to use when printing
|
||||
* @task: target task
|
||||
*
|
||||
* If @task is a worker and currently executing a work item, print out the
|
||||
* name of the workqueue being serviced and worker description set with
|
||||
* set_worker_desc() by the currently executing work item.
|
||||
*
|
||||
* This function can be safely called on any task as long as the
|
||||
* task_struct itself is accessible. While safe, this function isn't
|
||||
* synchronized and may print out mixups or garbages of limited length.
|
||||
*/
|
||||
void print_worker_info(const char *log_lvl, struct task_struct *task)
|
||||
{
|
||||
work_func_t *fn = NULL;
|
||||
char name[WQ_NAME_LEN] = { };
|
||||
char desc[WORKER_DESC_LEN] = { };
|
||||
struct pool_workqueue *pwq = NULL;
|
||||
struct workqueue_struct *wq = NULL;
|
||||
bool desc_valid = false;
|
||||
struct worker *worker;
|
||||
|
||||
if (!(task->flags & PF_WQ_WORKER))
|
||||
return;
|
||||
|
||||
/*
|
||||
* This function is called without any synchronization and @task
|
||||
* could be in any state. Be careful with dereferences.
|
||||
*/
|
||||
worker = probe_kthread_data(task);
|
||||
|
||||
/*
|
||||
* Carefully copy the associated workqueue's workfn and name. Keep
|
||||
* the original last '\0' in case the original contains garbage.
|
||||
*/
|
||||
probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
|
||||
probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
|
||||
probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
|
||||
probe_kernel_read(name, wq->name, sizeof(name) - 1);
|
||||
|
||||
/* copy worker description */
|
||||
probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
|
||||
if (desc_valid)
|
||||
probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
|
||||
|
||||
if (fn || name[0] || desc[0]) {
|
||||
printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
|
||||
if (desc[0])
|
||||
pr_cont(" (%s)", desc);
|
||||
pr_cont("\n");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU hotplug.
|
||||
*
|
||||
|
@@ -29,15 +29,25 @@ struct worker {
|
||||
struct work_struct *current_work; /* L: work being processed */
|
||||
work_func_t current_func; /* L: current_work's fn */
|
||||
struct pool_workqueue *current_pwq; /* L: current_work's pwq */
|
||||
bool desc_valid; /* ->desc is valid */
|
||||
struct list_head scheduled; /* L: scheduled works */
|
||||
|
||||
/* 64 bytes boundary on 64bit, 32 on 32bit */
|
||||
|
||||
struct task_struct *task; /* I: worker task */
|
||||
struct worker_pool *pool; /* I: the associated pool */
|
||||
/* L: for rescuers */
|
||||
/* 64 bytes boundary on 64bit, 32 on 32bit */
|
||||
|
||||
unsigned long last_active; /* L: last active timestamp */
|
||||
unsigned int flags; /* X: flags */
|
||||
int id; /* I: worker id */
|
||||
|
||||
/*
|
||||
* Opaque string set with work_set_desc(). Printed out with task
|
||||
* dump for debugging - WARN, BUG, panic or sysrq.
|
||||
*/
|
||||
char desc[WORKER_DESC_LEN];
|
||||
|
||||
/* used only by rescuers to point to the target workqueue */
|
||||
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
|
||||
};
|
||||
|
Reference in New Issue
Block a user