Merge branch 'sched/urgent' into sched/core

Merge all pending fixes and refresh the tree, before applying new changes.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2015-01-30 19:28:36 +01:00
當前提交 3847b27224
共有 901 個文件被更改,包括 9403 次插入6999 次删除

查看文件

@@ -429,7 +429,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
* This function doesn't consume an skb as might be expected since it has to
* copy it anyways.
*/
static void kauditd_send_multicast_skb(struct sk_buff *skb)
static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *copy;
struct audit_net *aunet = net_generic(&init_net, audit_net_id);
@@ -448,11 +448,11 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
* no reason for new multicast clients to continue with this
* non-compliance.
*/
copy = skb_copy(skb, GFP_KERNEL);
copy = skb_copy(skb, gfp_mask);
if (!copy)
return;
nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
}
/*
@@ -1100,7 +1100,7 @@ static void audit_receive(struct sk_buff *skb)
}
/* Run custom bind function on netlink socket group connect or bind requests. */
static int audit_bind(int group)
static int audit_bind(struct net *net, int group)
{
if (!capable(CAP_AUDIT_READ))
return -EPERM;
@@ -1940,7 +1940,7 @@ void audit_log_end(struct audit_buffer *ab)
struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
nlh->nlmsg_len = ab->skb->len;
kauditd_send_multicast_skb(ab->skb);
kauditd_send_multicast_skb(ab->skb, ab->gfp_mask);
/*
* The original kaudit unicast socket sends up messages with

查看文件

@@ -442,19 +442,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
f->type = AUDIT_LOGINUID_SET;
f->val = 0;
}
if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
struct pid *pid;
rcu_read_lock();
pid = find_vpid(f->val);
if (!pid) {
rcu_read_unlock();
err = -ESRCH;
goto exit_free;
}
f->val = pid_nr(pid);
rcu_read_unlock();
entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
}
err = audit_field_valid(entry, f);
@@ -630,6 +618,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
data->buflen += data->values[i] =
audit_pack_string(&bufp, krule->filterkey);
break;
case AUDIT_LOGINUID_SET:
if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
data->fields[i] = AUDIT_LOGINUID;
data->values[i] = AUDIT_UID_UNSET;
break;
}
/* fallthrough if set */
default:
data->values[i] = f->val;
}
@@ -646,6 +641,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
int i;
if (a->flags != b->flags ||
a->pflags != b->pflags ||
a->listnr != b->listnr ||
a->action != b->action ||
a->field_count != b->field_count)
@@ -764,6 +760,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
new = &entry->rule;
new->vers_ops = old->vers_ops;
new->flags = old->flags;
new->pflags = old->pflags;
new->listnr = old->listnr;
new->action = old->action;
for (i = 0; i < AUDIT_BITMASK_SIZE; i++)

查看文件

@@ -72,6 +72,8 @@
#include <linux/fs_struct.h>
#include <linux/compat.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <uapi/linux/limits.h>
#include "audit.h"
@@ -1861,8 +1863,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
}
list_for_each_entry_reverse(n, &context->names_list, list) {
/* does the name pointer match? */
if (!n->name || n->name->name != name->name)
if (!n->name || strcmp(n->name->name, name->name))
continue;
/* match the correct record type */
@@ -1877,12 +1878,48 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
}
out_alloc:
/* unable to find the name from a previous getname(). Allocate a new
* anonymous entry.
*/
n = audit_alloc_name(context, AUDIT_TYPE_NORMAL);
/* unable to find an entry with both a matching name and type */
n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
if (!n)
return;
/* unfortunately, while we may have a path name to record with the
* inode, we can't always rely on the string lasting until the end of
* the syscall so we need to create our own copy, it may fail due to
* memory allocation issues, but we do our best */
if (name) {
/* we can't use getname_kernel() due to size limits */
size_t len = strlen(name->name) + 1;
struct filename *new = __getname();
if (unlikely(!new))
goto out;
if (len <= (PATH_MAX - sizeof(*new))) {
new->name = (char *)(new) + sizeof(*new);
new->separate = false;
} else if (len <= PATH_MAX) {
/* this looks odd, but is due to final_putname() */
struct filename *new2;
new2 = kmalloc(sizeof(*new2), GFP_KERNEL);
if (unlikely(!new2)) {
__putname(new);
goto out;
}
new2->name = (char *)new;
new2->separate = true;
new = new2;
} else {
/* we should never get here, but let's be safe */
__putname(new);
goto out;
}
strlcpy((char *)new->name, name->name, len);
new->uptr = NULL;
new->aname = n;
n->name = new;
n->name_put = true;
}
out:
if (parent) {
n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;

查看文件

@@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
module_free(NULL, hdr);
module_memfree(hdr);
}
#endif /* CONFIG_BPF_JIT */

查看文件

@@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb)
*
* And don't kill the default root.
*/
if (css_has_online_children(&root->cgrp.self) ||
if (!list_empty(&root->cgrp.self.children) ||
root == &cgrp_dfl_root)
cgroup_put(&root->cgrp);
else

查看文件

@@ -27,6 +27,9 @@
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#define pr_fmt(fmt) "KGDB: " fmt
#include <linux/pid_namespace.h>
#include <linux/clocksource.h>
#include <linux/serial_core.h>
@@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr)
return err;
err = kgdb_arch_remove_breakpoint(&tmp);
if (err)
printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
"memory destroyed at: %lx", addr);
pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
addr);
return err;
}
@@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void)
error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
if (error) {
ret = error;
printk(KERN_INFO "KGDB: BP install failed: %lx",
kgdb_break[i].bpt_addr);
pr_info("BP install failed: %lx\n",
kgdb_break[i].bpt_addr);
continue;
}
@@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void)
continue;
error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error) {
printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
kgdb_break[i].bpt_addr);
pr_info("BP remove failed: %lx\n",
kgdb_break[i].bpt_addr);
ret = error;
}
@@ -367,7 +370,7 @@ int dbg_remove_all_break(void)
goto setundefined;
error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error)
printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
pr_err("breakpoint remove failed: %lx\n",
kgdb_break[i].bpt_addr);
setundefined:
kgdb_break[i].state = BP_UNDEFINED;
@@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait)
if (print_wait) {
#ifdef CONFIG_KGDB_KDB
if (!dbg_kdb_mode)
printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
pr_crit("waiting... or $3#33 for KDB\n");
#else
printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
pr_crit("Waiting for remote debugger\n");
#endif
}
return 1;
@@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
exception_level = 0;
kgdb_skipexception(ks->ex_vector, ks->linux_regs);
dbg_activate_sw_breakpoints();
printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
addr);
pr_crit("re-enter error: breakpoint removed %lx\n", addr);
WARN_ON_ONCE(1);
return 1;
@@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
panic("Recursive entry to debugger");
}
printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
pr_crit("re-enter exception: ALL breakpoints killed\n");
#ifdef CONFIG_KGDB_KDB
/* Allow kdb to debug itself one level */
return 0;
@@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
int cpu;
int trace_on = 0;
int online_cpus = num_online_cpus();
u64 time_left;
kgdb_info[ks->cpu].enter_kgdb++;
kgdb_info[ks->cpu].exception_state |= exception_state;
@@ -595,9 +598,13 @@ return_normal:
/*
* Wait for the other CPUs to be notified and be waiting for us:
*/
while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
atomic_read(&slaves_in_kgdb)) != online_cpus)
time_left = loops_per_jiffy * HZ;
while (kgdb_do_roundup && --time_left &&
(atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
online_cpus)
cpu_relax();
if (!time_left)
pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
/*
* At this point the primary processor is completely
@@ -795,15 +802,15 @@ static struct console kgdbcons = {
static void sysrq_handle_dbg(int key)
{
if (!dbg_io_ops) {
printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
pr_crit("ERROR: No KGDB I/O module available\n");
return;
}
if (!kgdb_connected) {
#ifdef CONFIG_KGDB_KDB
if (!dbg_kdb_mode)
printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
pr_crit("KGDB or $3#33 for KDB\n");
#else
printk(KERN_CRIT "Entering KGDB\n");
pr_crit("Entering KGDB\n");
#endif
}
@@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void)
{
kgdb_break_asap = 0;
printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
pr_crit("Waiting for connection from remote gdb...\n");
kgdb_breakpoint();
}
@@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
if (dbg_io_ops) {
spin_unlock(&kgdb_registration_lock);
printk(KERN_ERR "kgdb: Another I/O driver is already "
"registered with KGDB.\n");
pr_err("Another I/O driver is already registered with KGDB\n");
return -EBUSY;
}
@@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
spin_unlock(&kgdb_registration_lock);
printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
new_dbg_io_ops->name);
pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
/* Arm KGDB now. */
kgdb_register_callbacks();
@@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
spin_unlock(&kgdb_registration_lock);
printk(KERN_INFO
"kgdb: Unregistered I/O driver %s, debugger disabled.\n",
pr_info("Unregistered I/O driver %s, debugger disabled\n",
old_dbg_io_ops->name);
}
EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);

查看文件

@@ -531,22 +531,29 @@ void __init kdb_initbptab(void)
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
bp->bp_free = 1;
kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
"Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
"Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
kdb_register_flags("bp", kdb_bp, "[<vaddr>]",
"Set/Display breakpoints", 0,
KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
kdb_register_flags("bl", kdb_bp, "[<vaddr>]",
"Display breakpoints", 0,
KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
"[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("bc", kdb_bc, "<bpnum>",
"Clear Breakpoint", 0, KDB_REPEAT_NONE);
kdb_register_repeat("be", kdb_bc, "<bpnum>",
"Enable Breakpoint", 0, KDB_REPEAT_NONE);
kdb_register_repeat("bd", kdb_bc, "<bpnum>",
"Disable Breakpoint", 0, KDB_REPEAT_NONE);
kdb_register_flags("bph", kdb_bp, "[<vaddr>]",
"[datar [length]|dataw [length]] Set hw brk", 0,
KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
kdb_register_flags("bc", kdb_bc, "<bpnum>",
"Clear Breakpoint", 0,
KDB_ENABLE_FLOW_CTRL);
kdb_register_flags("be", kdb_bc, "<bpnum>",
"Enable Breakpoint", 0,
KDB_ENABLE_FLOW_CTRL);
kdb_register_flags("bd", kdb_bc, "<bpnum>",
"Disable Breakpoint", 0,
KDB_ENABLE_FLOW_CTRL);
kdb_register_repeat("ss", kdb_ss, "",
"Single Step", 1, KDB_REPEAT_NO_ARGS);
kdb_register_flags("ss", kdb_ss, "",
"Single Step", 1,
KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
/*
* Architecture dependent initialization.
*/

查看文件

@@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks)
ks->pass_exception = 1;
KDB_FLAG_SET(CATASTROPHIC);
}
/* set CATASTROPHIC if the system contains unresponsive processors */
for_each_online_cpu(i)
if (!kgdb_info[i].enter_kgdb)
KDB_FLAG_SET(CATASTROPHIC);
if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
KDB_STATE_CLEAR(SSBPT);
KDB_STATE_CLEAR(DOING_SS);

查看文件

@@ -12,6 +12,7 @@
*/
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/kmsg_dump.h>
@@ -23,6 +24,7 @@
#include <linux/vmalloc.h>
#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
@@ -42,6 +44,12 @@
#include <linux/slab.h>
#include "kdb_private.h"
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "kdb."
static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
#define GREP_LEN 256
char kdb_grep_string[GREP_LEN];
int kdb_grepping_flag;
@@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = {
KDBMSG(BADLENGTH, "Invalid length field"),
KDBMSG(NOBP, "No Breakpoint exists"),
KDBMSG(BADADDR, "Invalid address"),
KDBMSG(NOPERM, "Permission denied"),
};
#undef KDBMSG
@@ -187,6 +196,26 @@ struct task_struct *kdb_curr_task(int cpu)
return p;
}
/*
* Check whether the flags of the current command and the permissions
* of the kdb console has allow a command to be run.
*/
static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
bool no_args)
{
/* permissions comes from userspace so needs massaging slightly */
permissions &= KDB_ENABLE_MASK;
permissions |= KDB_ENABLE_ALWAYS_SAFE;
/* some commands change group when launched with no arguments */
if (no_args)
permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT;
flags |= KDB_ENABLE_ALL;
return permissions & flags;
}
/*
* kdbgetenv - This function will return the character string value of
* an environment variable.
@@ -475,6 +504,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
char *cp;
kdb_symtab_t symtab;
/*
* If the enable flags prohibit both arbitrary memory access
* and flow control then there are no reasonable grounds to
* provide symbol lookup.
*/
if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL,
kdb_cmd_enabled, false))
return KDB_NOPERM;
/*
* Process arguments which follow the following syntax:
*
@@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
if (!s->count)
s->usable = 0;
if (s->usable)
kdb_register(s->name, kdb_exec_defcmd,
s->usage, s->help, 0);
/* macros are always safe because when executed each
* internal command re-enters kdb_parse() and is
* safety checked individually.
*/
kdb_register_flags(s->name, kdb_exec_defcmd, s->usage,
s->help, 0,
KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
if (!s->usable)
@@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr)
if (i < kdb_max_commands) {
int result;
if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1))
return KDB_NOPERM;
KDB_STATE_SET(CMD);
result = (*tp->cmd_func)(argc-1, (const char **)argv);
if (result && ignore_errors && result > KDB_CMD_GO)
result = 0;
KDB_STATE_CLEAR(CMD);
switch (tp->cmd_repeat) {
case KDB_REPEAT_NONE:
argc = 0;
if (argv[0])
*(argv[0]) = '\0';
break;
case KDB_REPEAT_NO_ARGS:
argc = 1;
if (argv[1])
*(argv[1]) = '\0';
break;
case KDB_REPEAT_WITH_ARGS:
break;
}
if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS)
return result;
argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0;
if (argv[argc])
*(argv[argc]) = '\0';
return result;
}
@@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv)
*/
static int kdb_sr(int argc, const char **argv)
{
bool check_mask =
!kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false);
if (argc != 1)
return KDB_ARGCOUNT;
kdb_trap_printk++;
__handle_sysrq(*argv[1], false);
__handle_sysrq(*argv[1], check_mask);
kdb_trap_printk--;
return 0;
@@ -1979,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv)
kdb_printf("%-20s%8u 0x%p ", mod->name,
mod->core_size, (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD
kdb_printf("%4ld ", module_refcount(mod));
kdb_printf("%4d ", module_refcount(mod));
#endif
if (mod->state == MODULE_STATE_GOING)
kdb_printf(" (Unloading)");
@@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void)
for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i)) {
state = 'F'; /* cpu is offline */
} else if (!kgdb_info[i].enter_kgdb) {
state = 'D'; /* cpu is online but unresponsive */
} else {
state = ' '; /* cpu is responding to kdb */
if (kdb_task_state_char(KDB_TSK(i)) == 'I')
@@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
/*
* Validate cpunum
*/
if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
return KDB_BADCPUNUM;
dbg_switch_cpu = cpunum;
@@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv)
return 0;
if (!kt->cmd_name)
continue;
if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true))
continue;
if (strlen(kt->cmd_usage) > 20)
space = "\n ";
kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
@@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv)
}
/*
* kdb_register_repeat - This function is used to register a kernel
* kdb_register_flags - This function is used to register a kernel
* debugger command.
* Inputs:
* cmd Command name
@@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv)
* zero for success, one if a duplicate command.
*/
#define kdb_command_extend 50 /* arbitrary */
int kdb_register_repeat(char *cmd,
kdb_func_t func,
char *usage,
char *help,
short minlen,
kdb_repeat_t repeat)
int kdb_register_flags(char *cmd,
kdb_func_t func,
char *usage,
char *help,
short minlen,
kdb_cmdflags_t flags)
{
int i;
kdbtab_t *kp;
@@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd,
kp->cmd_func = func;
kp->cmd_usage = usage;
kp->cmd_help = help;
kp->cmd_flags = 0;
kp->cmd_minlen = minlen;
kp->cmd_repeat = repeat;
kp->cmd_flags = flags;
return 0;
}
EXPORT_SYMBOL_GPL(kdb_register_repeat);
EXPORT_SYMBOL_GPL(kdb_register_flags);
/*
* kdb_register - Compatibility register function for commands that do
* not need to specify a repeat state. Equivalent to
* kdb_register_repeat with KDB_REPEAT_NONE.
* kdb_register_flags with flags set to 0.
* Inputs:
* cmd Command name
* func Function to execute the command
@@ -2721,8 +2768,7 @@ int kdb_register(char *cmd,
char *help,
short minlen)
{
return kdb_register_repeat(cmd, func, usage, help, minlen,
KDB_REPEAT_NONE);
return kdb_register_flags(cmd, func, usage, help, minlen, 0);
}
EXPORT_SYMBOL_GPL(kdb_register);
@@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void)
for_each_kdbcmd(kp, i)
kp->cmd_name = NULL;
kdb_register_repeat("md", kdb_md, "<vaddr>",
kdb_register_flags("md", kdb_md, "<vaddr>",
"Display Memory Contents, also mdWcN, e.g. md8c1", 1,
KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
"Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
"Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mds", kdb_md, "<vaddr>",
"Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
"Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("go", kdb_go, "[<vaddr>]",
"Continue Execution", 1, KDB_REPEAT_NONE);
kdb_register_repeat("rd", kdb_rd, "",
"Display Registers", 0, KDB_REPEAT_NONE);
kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
"Modify Registers", 0, KDB_REPEAT_NONE);
kdb_register_repeat("ef", kdb_ef, "<vaddr>",
"Display exception frame", 0, KDB_REPEAT_NONE);
kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
"Stack traceback", 1, KDB_REPEAT_NONE);
kdb_register_repeat("btp", kdb_bt, "<pid>",
"Display stack for process <pid>", 0, KDB_REPEAT_NONE);
kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
"Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btc", kdb_bt, "",
"Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btt", kdb_bt, "<vaddr>",
KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>",
"Display Raw Memory", 0,
KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>",
"Display Physical Memory", 0,
KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
kdb_register_flags("mds", kdb_md, "<vaddr>",
"Display Memory Symbolically", 0,
KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>",
"Modify Memory Contents", 0,
KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS);
kdb_register_flags("go", kdb_go, "[<vaddr>]",
"Continue Execution", 1,
KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
kdb_register_flags("rd", kdb_rd, "",
"Display Registers", 0,
KDB_ENABLE_REG_READ);
kdb_register_flags("rm", kdb_rm, "<reg> <contents>",
"Modify Registers", 0,
KDB_ENABLE_REG_WRITE);
kdb_register_flags("ef", kdb_ef, "<vaddr>",
"Display exception frame", 0,
KDB_ENABLE_MEM_READ);
kdb_register_flags("bt", kdb_bt, "[<vaddr>]",
"Stack traceback", 1,
KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
kdb_register_flags("btp", kdb_bt, "<pid>",
"Display stack for process <pid>", 0,
KDB_ENABLE_INSPECT);
kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
"Backtrace all processes matching state flag", 0,
KDB_ENABLE_INSPECT);
kdb_register_flags("btc", kdb_bt, "",
"Backtrace current process on each cpu", 0,
KDB_ENABLE_INSPECT);
kdb_register_flags("btt", kdb_bt, "<vaddr>",
"Backtrace process given its struct task address", 0,
KDB_REPEAT_NONE);
kdb_register_repeat("env", kdb_env, "",
"Show environment variables", 0, KDB_REPEAT_NONE);
kdb_register_repeat("set", kdb_set, "",
"Set environment variables", 0, KDB_REPEAT_NONE);
kdb_register_repeat("help", kdb_help, "",
"Display Help Message", 1, KDB_REPEAT_NONE);
kdb_register_repeat("?", kdb_help, "",
"Display Help Message", 0, KDB_REPEAT_NONE);
kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
"Switch to new cpu", 0, KDB_REPEAT_NONE);
kdb_register_repeat("kgdb", kdb_kgdb, "",
"Enter kgdb mode", 0, KDB_REPEAT_NONE);
kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
"Display active task list", 0, KDB_REPEAT_NONE);
kdb_register_repeat("pid", kdb_pid, "<pidnum>",
"Switch to another task", 0, KDB_REPEAT_NONE);
kdb_register_repeat("reboot", kdb_reboot, "",
"Reboot the machine immediately", 0, KDB_REPEAT_NONE);
KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
kdb_register_flags("env", kdb_env, "",
"Show environment variables", 0,
KDB_ENABLE_ALWAYS_SAFE);
kdb_register_flags("set", kdb_set, "",
"Set environment variables", 0,
KDB_ENABLE_ALWAYS_SAFE);
kdb_register_flags("help", kdb_help, "",
"Display Help Message", 1,
KDB_ENABLE_ALWAYS_SAFE);
kdb_register_flags("?", kdb_help, "",
"Display Help Message", 0,
KDB_ENABLE_ALWAYS_SAFE);
kdb_register_flags("cpu", kdb_cpu, "<cpunum>",
"Switch to new cpu", 0,
KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
kdb_register_flags("kgdb", kdb_kgdb, "",
"Enter kgdb mode", 0, 0);
kdb_register_flags("ps", kdb_ps, "[<flags>|A]",
"Display active task list", 0,
KDB_ENABLE_INSPECT);
kdb_register_flags("pid", kdb_pid, "<pidnum>",
"Switch to another task", 0,
KDB_ENABLE_INSPECT);
kdb_register_flags("reboot", kdb_reboot, "",
"Reboot the machine immediately", 0,
KDB_ENABLE_REBOOT);
#if defined(CONFIG_MODULES)
kdb_register_repeat("lsmod", kdb_lsmod, "",
"List loaded kernel modules", 0, KDB_REPEAT_NONE);
kdb_register_flags("lsmod", kdb_lsmod, "",
"List loaded kernel modules", 0,
KDB_ENABLE_INSPECT);
#endif
#if defined(CONFIG_MAGIC_SYSRQ)
kdb_register_repeat("sr", kdb_sr, "<key>",
"Magic SysRq key", 0, KDB_REPEAT_NONE);
kdb_register_flags("sr", kdb_sr, "<key>",
"Magic SysRq key", 0,
KDB_ENABLE_ALWAYS_SAFE);
#endif
#if defined(CONFIG_PRINTK)
kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
"Display syslog buffer", 0, KDB_REPEAT_NONE);
kdb_register_flags("dmesg", kdb_dmesg, "[lines]",
"Display syslog buffer", 0,
KDB_ENABLE_ALWAYS_SAFE);
#endif
if (arch_kgdb_ops.enable_nmi) {
kdb_register_repeat("disable_nmi", kdb_disable_nmi, "",
"Disable NMI entry to KDB", 0, KDB_REPEAT_NONE);
kdb_register_flags("disable_nmi", kdb_disable_nmi, "",
"Disable NMI entry to KDB", 0,
KDB_ENABLE_ALWAYS_SAFE);
}
kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
"Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
"Send a signal to a process", 0, KDB_REPEAT_NONE);
kdb_register_repeat("summary", kdb_summary, "",
"Summarize the system", 4, KDB_REPEAT_NONE);
kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
"Display per_cpu variables", 3, KDB_REPEAT_NONE);
kdb_register_repeat("grephelp", kdb_grep_help, "",
"Display help on | grep", 0, KDB_REPEAT_NONE);
kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
"Define a set of commands, down to endefcmd", 0,
KDB_ENABLE_ALWAYS_SAFE);
kdb_register_flags("kill", kdb_kill, "<-signal> <pid>",
"Send a signal to a process", 0,
KDB_ENABLE_SIGNAL);
kdb_register_flags("summary", kdb_summary, "",
"Summarize the system", 4,
KDB_ENABLE_ALWAYS_SAFE);
kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
"Display per_cpu variables", 3,
KDB_ENABLE_MEM_READ);
kdb_register_flags("grephelp", kdb_grep_help, "",
"Display help on | grep", 0,
KDB_ENABLE_ALWAYS_SAFE);
}
/* Execute any commands defined in kdb_cmds. */

查看文件

@@ -172,10 +172,9 @@ typedef struct _kdbtab {
kdb_func_t cmd_func; /* Function to execute command */
char *cmd_usage; /* Usage String for this command */
char *cmd_help; /* Help message for this command */
short cmd_flags; /* Parsing flags */
short cmd_minlen; /* Minimum legal # command
* chars required */
kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */
kdb_cmdflags_t cmd_flags; /* Command behaviour flags */
} kdbtab_t;
extern int kdb_bt(int, const char **); /* KDB display back trace */

查看文件

@@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle,
}
static void perf_sample_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
struct pt_regs *regs,
struct pt_regs *regs_user_copy)
{
if (!user_mode(regs)) {
if (current->mm)
regs = task_pt_regs(current);
else
regs = NULL;
}
if (regs) {
regs_user->abi = perf_reg_abi(current);
if (user_mode(regs)) {
regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
} else if (current->mm) {
perf_get_regs_user(regs_user, regs, regs_user_copy);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL;
@@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
}
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
perf_sample_regs_user(&data->regs_user, regs);
perf_sample_regs_user(&data->regs_user, regs,
&data->regs_user_copy);
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */

查看文件

@@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
static int wait_consider_task(struct wait_opts *wo, int ptrace,
struct task_struct *p)
{
/*
* We can race with wait_task_zombie() from another thread.
* Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
* can't confuse the checks below.
*/
int exit_state = ACCESS_ONCE(p->exit_state);
int ret;
if (unlikely(p->exit_state == EXIT_DEAD))
if (unlikely(exit_state == EXIT_DEAD))
return 0;
ret = eligible_child(wo, p);
@@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
return 0;
}
if (unlikely(p->exit_state == EXIT_TRACE)) {
if (unlikely(exit_state == EXIT_TRACE)) {
/*
* ptrace == 0 means we are the natural parent. In this case
* we should clear notask_error, debugger will notify us.
@@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
}
/* slay zombie? */
if (p->exit_state == EXIT_ZOMBIE) {
if (exit_state == EXIT_ZOMBIE) {
/* we don't reap group leaders with subthreads */
if (!delay_group_leader(p)) {
/*

查看文件

@@ -127,7 +127,7 @@ static void *alloc_insn_page(void)
static void free_insn_page(void *page)
{
module_free(NULL, page);
module_memfree(page);
}
struct kprobe_insn_cache kprobe_insn_slots = {

查看文件

@@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock)
DEBUG_LOCKS_WARN_ON(lock->owner != current);
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
mutex_clear_owner(lock);
}
/*
* __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
* mutexes so that we can do it here after we've verified state.
*/
mutex_clear_owner(lock);
atomic_set(&lock->count, 1);
}

查看文件

@@ -772,9 +772,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
return 0;
}
unsigned long module_refcount(struct module *mod)
/**
* module_refcount - return the refcount or -1 if unloading
*
* @mod: the module we're checking
*
* Returns:
* -1 if the module is in the process of unloading
* otherwise the number of references in the kernel to the module
*/
int module_refcount(struct module *mod)
{
return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
}
EXPORT_SYMBOL(module_refcount);
@@ -856,7 +865,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
struct module_use *use;
int printed_something = 0;
seq_printf(m, " %lu ", module_refcount(mod));
seq_printf(m, " %i ", module_refcount(mod));
/*
* Always include a trailing , so userspace can differentiate
@@ -908,7 +917,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr);
static ssize_t show_refcnt(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
return sprintf(buffer, "%i\n", module_refcount(mk->mod));
}
static struct module_attribute modinfo_refcnt =
@@ -1795,7 +1804,7 @@ static void unset_module_core_ro_nx(struct module *mod) { }
static void unset_module_init_ro_nx(struct module *mod) { }
#endif
void __weak module_free(struct module *mod, void *module_region)
void __weak module_memfree(void *module_region)
{
vfree(module_region);
}
@@ -1804,6 +1813,10 @@ void __weak module_arch_cleanup(struct module *mod)
{
}
void __weak module_arch_freeing_init(struct module *mod)
{
}
/* Free a module, remove from lists, etc. */
static void free_module(struct module *mod)
{
@@ -1841,7 +1854,8 @@ static void free_module(struct module *mod)
/* This may be NULL, but that's OK */
unset_module_init_ro_nx(mod);
module_free(mod, mod->module_init);
module_arch_freeing_init(mod);
module_memfree(mod->module_init);
kfree(mod->args);
percpu_modfree(mod);
@@ -1850,7 +1864,7 @@ static void free_module(struct module *mod)
/* Finally, free the core (containing the module structure) */
unset_module_core_ro_nx(mod);
module_free(mod, mod->module_core);
module_memfree(mod->module_core);
#ifdef CONFIG_MPU
update_protections(current->mm);
@@ -2785,7 +2799,7 @@ static int move_module(struct module *mod, struct load_info *info)
*/
kmemleak_ignore(ptr);
if (!ptr) {
module_free(mod, mod->module_core);
module_memfree(mod->module_core);
return -ENOMEM;
}
memset(ptr, 0, mod->init_size);
@@ -2930,8 +2944,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
module_free(mod, mod->module_init);
module_free(mod, mod->module_core);
module_arch_freeing_init(mod);
module_memfree(mod->module_init);
module_memfree(mod->module_core);
}
int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2983,10 +2998,31 @@ static void do_mod_ctors(struct module *mod)
#endif
}
/* For freeing module_init on success, in case kallsyms traversing */
struct mod_initfree {
struct rcu_head rcu;
void *module_init;
};
static void do_free_init(struct rcu_head *head)
{
struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
module_memfree(m->module_init);
kfree(m);
}
/* This is where the real work happens */
static int do_init_module(struct module *mod)
{
int ret = 0;
struct mod_initfree *freeinit;
freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
if (!freeinit) {
ret = -ENOMEM;
goto fail;
}
freeinit->module_init = mod->module_init;
/*
* We want to find out whether @mod uses async during init. Clear
@@ -2999,18 +3035,7 @@ static int do_init_module(struct module *mod)
if (mod->init != NULL)
ret = do_one_initcall(mod->init);
if (ret < 0) {
/*
* Init routine failed: abort. Try to protect us from
* buggy refcounters.
*/
mod->state = MODULE_STATE_GOING;
synchronize_sched();
module_put(mod);
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
free_module(mod);
wake_up_all(&module_wq);
return ret;
goto fail_free_freeinit;
}
if (ret > 0) {
pr_warn("%s: '%s'->init suspiciously returned %d, it should "
@@ -3055,15 +3080,35 @@ static int do_init_module(struct module *mod)
mod->strtab = mod->core_strtab;
#endif
unset_module_init_ro_nx(mod);
module_free(mod, mod->module_init);
module_arch_freeing_init(mod);
mod->module_init = NULL;
mod->init_size = 0;
mod->init_ro_size = 0;
mod->init_text_size = 0;
/*
* We want to free module_init, but be aware that kallsyms may be
* walking this with preempt disabled. In all the failure paths,
* we call synchronize_rcu/synchronize_sched, but we don't want
* to slow down the success path, so use actual RCU here.
*/
call_rcu(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
return 0;
fail_free_freeinit:
kfree(freeinit);
fail:
/* Try to protect us from buggy refcounters. */
mod->state = MODULE_STATE_GOING;
synchronize_sched();
module_put(mod);
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
free_module(mod);
wake_up_all(&module_wq);
return ret;
}
static int may_init_module(void)

查看文件

@@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
mk->mp->grp.attrs = new_attrs;
/* Tack new one on the end. */
memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
mk->mp->attrs[mk->mp->num].param = kp;
mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
/* Do not allow runtime DAC changes to make param writable. */
if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
else
mk->mp->attrs[mk->mp->num].mattr.store = NULL;
mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
mk->mp->num++;

查看文件

@@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2)
{
const struct range *r1 = x1;
const struct range *r2 = x2;
s64 start1, start2;
start1 = r1->start;
start2 = r2->start;
return start1 - start2;
if (r1->start < r2->start)
return -1;
if (r1->start > r2->start)
return 1;
return 0;
}
int clean_sort_range(struct range *range, int az)

查看文件

@@ -4650,6 +4650,9 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
struct dl_bw *cur_dl_b;
unsigned long flags;
if (!cpumask_weight(cur))
return ret;
rcu_read_lock_sched();
cur_dl_b = dl_bw_of(cpumask_any(cur));
trial_cpus = cpumask_weight(trial);

查看文件

@@ -1729,7 +1729,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
nodes = node_online_map;
for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
unsigned long max_faults = 0;
nodemask_t max_group;
nodemask_t max_group = NODE_MASK_NONE;
int a, b;
/* Are there nodes at this distance from each other? */

查看文件

@@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
up_write(&me->mm->mmap_sem);
break;
case PR_MPX_ENABLE_MANAGEMENT:
if (arg2 || arg3 || arg4 || arg5)
return -EINVAL;
error = MPX_ENABLE_MANAGEMENT(me);
break;
case PR_MPX_DISABLE_MANAGEMENT:
if (arg2 || arg3 || arg4 || arg5)
return -EINVAL;
error = MPX_DISABLE_MANAGEMENT(me);
break;
default:

查看文件

@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc)
if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
return -EPERM;
if (txc->modes & ADJ_FREQUENCY) {
if (LONG_MIN / PPM_SCALE > txc->freq)
return -EINVAL;
if (LONG_MAX / PPM_SCALE < txc->freq)
return -EINVAL;
}
return 0;
}

查看文件

@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
if (tv) {
if (copy_from_user(&user_tv, tv, sizeof(*tv)))
return -EFAULT;
if (!timeval_valid(&user_tv))
return -EINVAL;
new_ts.tv_sec = user_tv.tv_sec;
new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
}

查看文件

@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command)
}
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
struct ftrace_hash *old_hash)
struct ftrace_ops_hash *old_hash)
{
ops->flags |= FTRACE_OPS_FL_MODIFYING;
ops->old_hash.filter_hash = old_hash;
ops->old_hash.filter_hash = old_hash->filter_hash;
ops->old_hash.notrace_hash = old_hash->notrace_hash;
ftrace_run_update_code(command);
ops->old_hash.filter_hash = NULL;
ops->old_hash.notrace_hash = NULL;
ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
}
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
static int ftrace_probe_registered;
static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
{
int ret;
int i;
@@ -3637,6 +3639,7 @@ int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data)
{
struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_probe *entry;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
struct ftrace_hash *old_hash = *orig_hash;
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
old_hash_ops.filter_hash = old_hash;
/* Probes only have filters */
old_hash_ops.notrace_hash = NULL;
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
if (!hash) {
count = -ENOMEM;
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
__enable_ftrace_function_probe(old_hash);
__enable_ftrace_function_probe(&old_hash_ops);
if (!ret)
free_ftrace_hash_rcu(old_hash);
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
}
static void ftrace_ops_update_code(struct ftrace_ops *ops,
struct ftrace_hash *old_hash)
struct ftrace_ops_hash *old_hash)
{
if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
struct ftrace_ops *op;
if (!ftrace_enabled)
return;
if (ops->flags & FTRACE_OPS_FL_ENABLED) {
ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
return;
}
/*
* If this is the shared global_ops filter, then we need to
* check if there is another ops that shares it, is enabled.
* If so, we still need to run the modify code.
*/
if (ops->func_hash != &global_ops.local_hash)
return;
do_for_each_ftrace_op(op, ftrace_ops_list) {
if (op->func_hash == &global_ops.local_hash &&
op->flags & FTRACE_OPS_FL_ENABLED) {
ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
/* Only need to do this once */
return;
}
} while_for_each_ftrace_op(op);
}
static int
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
{
struct ftrace_hash **orig_hash;
struct ftrace_ops_hash old_hash_ops;
struct ftrace_hash *old_hash;
struct ftrace_hash *hash;
int ret;
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
old_hash_ops.filter_hash = ops->func_hash->filter_hash;
old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
if (!ret) {
ftrace_ops_update_code(ops, old_hash);
ftrace_ops_update_code(ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock);
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void)
int ftrace_regex_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
struct ftrace_ops_hash old_hash_ops;
struct ftrace_iterator *iter;
struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
if (!ret) {
ftrace_ops_update_code(iter->ops, old_hash);
ftrace_ops_update_code(iter->ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock);

查看文件

@@ -6918,7 +6918,6 @@ void __init trace_init(void)
tracepoint_printk = 0;
}
tracer_alloc_buffers();
init_ftrace_syscalls();
trace_event_init();
}

查看文件

@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void)
return 0;
}
static __init void
early_enable_events(struct trace_array *tr, bool disable_first)
{
char *buf = bootup_event_buf;
char *token;
int ret;
while (true) {
token = strsep(&buf, ",");
if (!token)
break;
if (!*token)
continue;
/* Restarting syscalls requires that we stop them first */
if (disable_first)
ftrace_set_clr_event(tr, token, 0);
ret = ftrace_set_clr_event(tr, token, 1);
if (ret)
pr_warn("Failed to enable trace event: %s\n", token);
/* Put back the comma to allow this to be called again */
if (buf)
*(buf - 1) = ',';
}
}
static __init int event_trace_enable(void)
{
struct trace_array *tr = top_trace_array();
struct ftrace_event_call **iter, *call;
char *buf = bootup_event_buf;
char *token;
int ret;
if (!tr)
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void)
*/
__trace_early_add_events(tr);
while (true) {
token = strsep(&buf, ",");
if (!token)
break;
if (!*token)
continue;
ret = ftrace_set_clr_event(tr, token, 1);
if (ret)
pr_warn("Failed to enable trace event: %s\n", token);
}
early_enable_events(tr, false);
trace_printk_start_comm();
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void)
return 0;
}
/*
* event_trace_enable() is called from trace_event_init() first to
* initialize events and perhaps start any events that are on the
* command line. Unfortunately, there are some events that will not
* start this early, like the system call tracepoints that need
* to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
* is called before pid 1 starts, and this flag is never set, making
* the syscall tracepoint never get reached, but the event is enabled
* regardless (and not doing anything).
*/
static __init int event_trace_enable_again(void)
{
struct trace_array *tr;
tr = top_trace_array();
if (!tr)
return -ENODEV;
early_enable_events(tr, true);
return 0;
}
early_initcall(event_trace_enable_again);
static __init int event_trace_init(void)
{
struct trace_array *tr;

查看文件

@@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv)
static __init int kdb_ftrace_register(void)
{
kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
"Dump ftrace log", 0, KDB_REPEAT_NONE);
kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
"Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
return 0;
}

查看文件

@@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool)
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*
* Return:
* %false if no action was taken and pool->lock stayed locked, %true
* otherwise.
*/
static bool maybe_create_worker(struct worker_pool *pool)
static void maybe_create_worker(struct worker_pool *pool)
__releases(&pool->lock)
__acquires(&pool->lock)
{
if (!need_to_create_worker(pool))
return false;
restart:
spin_unlock_irq(&pool->lock);
@@ -1877,7 +1871,6 @@ restart:
*/
if (need_to_create_worker(pool))
goto restart;
return true;
}
/**
@@ -1897,16 +1890,14 @@ restart:
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
* %false if the pool don't need management and the caller can safely start
* processing works, %true indicates that the function released pool->lock
* and reacquired it to perform some management function and that the
* conditions that the caller verified while holding the lock before
* calling the function might no longer be true.
* %false if the pool doesn't need management and the caller can safely
* start processing works, %true if management function was performed and
* the conditions that the caller verified before calling the function may
* no longer be true.
*/
static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
bool ret = false;
/*
* Anyone who successfully grabs manager_arb wins the arbitration
@@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker)
* actual management, the pool may stall indefinitely.
*/
if (!mutex_trylock(&pool->manager_arb))
return ret;
return false;
ret |= maybe_create_worker(pool);
maybe_create_worker(pool);
mutex_unlock(&pool->manager_arb);
return ret;
return true;
}
/**