Merge branch 'master' into for-2.6.31
Conflicts: drivers/block/hd.c drivers/block/mg_disk.c Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
@@ -1028,7 +1028,7 @@ static void audit_update_watch(struct audit_parent *parent,
|
||||
|
||||
if (audit_enabled) {
|
||||
struct audit_buffer *ab;
|
||||
ab = audit_log_start(NULL, GFP_KERNEL,
|
||||
ab = audit_log_start(NULL, GFP_NOFS,
|
||||
AUDIT_CONFIG_CHANGE);
|
||||
audit_log_format(ab, "auid=%u ses=%u",
|
||||
audit_get_loginuid(current),
|
||||
@@ -1067,7 +1067,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
|
||||
e = container_of(r, struct audit_entry, rule);
|
||||
if (audit_enabled) {
|
||||
struct audit_buffer *ab;
|
||||
ab = audit_log_start(NULL, GFP_KERNEL,
|
||||
ab = audit_log_start(NULL, GFP_NOFS,
|
||||
AUDIT_CONFIG_CHANGE);
|
||||
audit_log_format(ab, "auid=%u ses=%u",
|
||||
audit_get_loginuid(current),
|
||||
|
@@ -1133,8 +1133,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
|
||||
free_cg_links:
|
||||
free_cg_links(&tmp_cg_links);
|
||||
drop_new_super:
|
||||
up_write(&sb->s_umount);
|
||||
deactivate_super(sb);
|
||||
deactivate_locked_super(sb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -363,8 +363,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
|
||||
irqreturn_t ret, retval = IRQ_NONE;
|
||||
unsigned int status = 0;
|
||||
|
||||
WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
|
||||
|
||||
if (!(action->flags & IRQF_DISABLED))
|
||||
local_irq_enable_in_hardirq();
|
||||
|
||||
|
@@ -109,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
if (desc->status & IRQ_MOVE_PCNTXT)
|
||||
desc->chip->set_affinity(irq, cpumask);
|
||||
} else {
|
||||
else {
|
||||
desc->status |= IRQ_MOVE_PENDING;
|
||||
cpumask_copy(desc->pending_mask, cpumask);
|
||||
}
|
||||
|
@@ -1583,8 +1583,8 @@ static void sysrq_handle_gdb(int key, struct tty_struct *tty)
|
||||
|
||||
static struct sysrq_key_op sysrq_gdb_op = {
|
||||
.handler = sysrq_handle_gdb,
|
||||
.help_msg = "Gdb",
|
||||
.action_msg = "GDB",
|
||||
.help_msg = "debug(G)",
|
||||
.action_msg = "DEBUG",
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@@ -319,6 +319,22 @@ struct kprobe __kprobes *get_kprobe(void *addr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Arm a kprobe with text_mutex */
|
||||
static void __kprobes arm_kprobe(struct kprobe *kp)
|
||||
{
|
||||
mutex_lock(&text_mutex);
|
||||
arch_arm_kprobe(kp);
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
/* Disarm a kprobe with text_mutex */
|
||||
static void __kprobes disarm_kprobe(struct kprobe *kp)
|
||||
{
|
||||
mutex_lock(&text_mutex);
|
||||
arch_disarm_kprobe(kp);
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Aggregate handlers for multiple kprobes support - these handlers
|
||||
* take care of invoking the individual kprobe handlers on p->list
|
||||
@@ -538,7 +554,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
|
||||
ap->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
if (!kprobes_all_disarmed)
|
||||
/* Arm the breakpoint again. */
|
||||
arch_arm_kprobe(ap);
|
||||
arm_kprobe(ap);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -789,11 +805,8 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
|
||||
* enabled and not gone - otherwise, the breakpoint would
|
||||
* already have been removed. We save on flushing icache.
|
||||
*/
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) {
|
||||
mutex_lock(&text_mutex);
|
||||
arch_disarm_kprobe(p);
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
|
||||
disarm_kprobe(p);
|
||||
hlist_del_rcu(&old_p->hlist);
|
||||
} else {
|
||||
if (p->break_handler && !kprobe_gone(p))
|
||||
@@ -810,7 +823,7 @@ noclean:
|
||||
if (!kprobe_disabled(old_p)) {
|
||||
try_to_disable_aggr_kprobe(old_p);
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(old_p))
|
||||
arch_disarm_kprobe(old_p);
|
||||
disarm_kprobe(old_p);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@@ -1364,7 +1377,7 @@ int __kprobes disable_kprobe(struct kprobe *kp)
|
||||
try_to_disable_aggr_kprobe(p);
|
||||
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p))
|
||||
arch_disarm_kprobe(p);
|
||||
disarm_kprobe(p);
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
@@ -1393,7 +1406,7 @@ int __kprobes enable_kprobe(struct kprobe *kp)
|
||||
}
|
||||
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p))
|
||||
arch_arm_kprobe(p);
|
||||
arm_kprobe(p);
|
||||
|
||||
p->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
if (p != kp)
|
||||
|
@@ -2490,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||
struct lock_class_key *key, int subclass)
|
||||
{
|
||||
if (unlikely(!debug_locks))
|
||||
lock->class_cache = NULL;
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
lock->cpu = raw_smp_processor_id();
|
||||
#endif
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(!name)) {
|
||||
lock->name = "NULL";
|
||||
return;
|
||||
}
|
||||
|
||||
lock->name = name;
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(!key))
|
||||
return;
|
||||
if (DEBUG_LOCKS_WARN_ON(!name))
|
||||
return;
|
||||
/*
|
||||
* Sanity check, the lock-class key must be persistent:
|
||||
*/
|
||||
@@ -2505,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
||||
DEBUG_LOCKS_WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
lock->name = name;
|
||||
lock->key = key;
|
||||
lock->class_cache = NULL;
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
lock->cpu = raw_smp_processor_id();
|
||||
#endif
|
||||
|
||||
if (unlikely(!debug_locks))
|
||||
return;
|
||||
|
||||
if (subclass)
|
||||
register_lock_class(lock, subclass, 1);
|
||||
}
|
||||
|
@@ -54,9 +54,9 @@ enum {
|
||||
* table (if it's not there yet), and we check it for lock order
|
||||
* conflicts and deadlocks.
|
||||
*/
|
||||
#define MAX_LOCKDEP_ENTRIES 8192UL
|
||||
#define MAX_LOCKDEP_ENTRIES 16384UL
|
||||
|
||||
#define MAX_LOCKDEP_CHAINS_BITS 14
|
||||
#define MAX_LOCKDEP_CHAINS_BITS 15
|
||||
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
|
||||
|
||||
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
|
||||
|
@@ -221,7 +221,7 @@ void add_taint(unsigned flag)
|
||||
* post-warning case.
|
||||
*/
|
||||
if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
|
||||
printk(KERN_WARNING "Disabling lockdep due to kernel taint\n");
|
||||
printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
|
||||
|
||||
set_bit(flag, &tainted_mask);
|
||||
}
|
||||
@@ -340,34 +340,46 @@ void oops_exit(void)
|
||||
}
|
||||
|
||||
#ifdef WANT_WARN_ON_SLOWPATH
|
||||
void warn_slowpath(const char *file, int line, const char *fmt, ...)
|
||||
{
|
||||
struct slowpath_args {
|
||||
const char *fmt;
|
||||
va_list args;
|
||||
char function[KSYM_SYMBOL_LEN];
|
||||
unsigned long caller = (unsigned long)__builtin_return_address(0);
|
||||
};
|
||||
|
||||
static void warn_slowpath_common(const char *file, int line, void *caller, struct slowpath_args *args)
|
||||
{
|
||||
const char *board;
|
||||
|
||||
sprint_symbol(function, caller);
|
||||
|
||||
printk(KERN_WARNING "------------[ cut here ]------------\n");
|
||||
printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
|
||||
line, function);
|
||||
printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
|
||||
board = dmi_get_system_info(DMI_PRODUCT_NAME);
|
||||
if (board)
|
||||
printk(KERN_WARNING "Hardware name: %s\n", board);
|
||||
|
||||
if (fmt) {
|
||||
va_start(args, fmt);
|
||||
vprintk(fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
if (args)
|
||||
vprintk(args->fmt, args->args);
|
||||
|
||||
print_modules();
|
||||
dump_stack();
|
||||
print_oops_end_marker();
|
||||
add_taint(TAINT_WARN);
|
||||
}
|
||||
EXPORT_SYMBOL(warn_slowpath);
|
||||
|
||||
void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
|
||||
{
|
||||
struct slowpath_args args;
|
||||
|
||||
args.fmt = fmt;
|
||||
va_start(args.args, fmt);
|
||||
warn_slowpath_common(file, line, __builtin_return_address(0), &args);
|
||||
va_end(args.args);
|
||||
}
|
||||
EXPORT_SYMBOL(warn_slowpath_fmt);
|
||||
|
||||
void warn_slowpath_null(const char *file, int line)
|
||||
{
|
||||
warn_slowpath_common(file, line, __builtin_return_address(0), NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(warn_slowpath_null);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
|
@@ -1420,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
||||
* timer call will interfere.
|
||||
*/
|
||||
list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
|
||||
int firing;
|
||||
int cpu_firing;
|
||||
|
||||
spin_lock(&timer->it_lock);
|
||||
list_del_init(&timer->it.cpu.entry);
|
||||
firing = timer->it.cpu.firing;
|
||||
cpu_firing = timer->it.cpu.firing;
|
||||
timer->it.cpu.firing = 0;
|
||||
/*
|
||||
* The firing flag is -1 if we collided with a reset
|
||||
* of the timer, which already reported this
|
||||
* almost-firing as an overrun. So don't generate an event.
|
||||
*/
|
||||
if (likely(firing >= 0)) {
|
||||
if (likely(cpu_firing >= 0))
|
||||
cpu_timer_fire(timer);
|
||||
}
|
||||
spin_unlock(&timer->it_lock);
|
||||
}
|
||||
}
|
||||
|
@@ -241,9 +241,9 @@ static int create_image(int platform_mode)
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
sysdev_suspend(PMSG_FREEZE);
|
||||
error = sysdev_suspend(PMSG_FREEZE);
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: Some devices failed to power down, "
|
||||
printk(KERN_ERR "PM: Some system devices failed to power down, "
|
||||
"aborting hibernation\n");
|
||||
goto Enable_irqs;
|
||||
}
|
||||
|
@@ -188,7 +188,7 @@ int ptrace_attach(struct task_struct *task)
|
||||
/* Protect exec's credential calculations against our interference;
|
||||
* SUID, SGID and LSM creds get determined differently under ptrace.
|
||||
*/
|
||||
retval = mutex_lock_interruptible(¤t->cred_exec_mutex);
|
||||
retval = mutex_lock_interruptible(&task->cred_exec_mutex);
|
||||
if (retval < 0)
|
||||
goto out;
|
||||
|
||||
@@ -232,7 +232,7 @@ repeat:
|
||||
bad:
|
||||
write_unlock_irqrestore(&tasklist_lock, flags);
|
||||
task_unlock(task);
|
||||
mutex_unlock(¤t->cred_exec_mutex);
|
||||
mutex_unlock(&task->cred_exec_mutex);
|
||||
out:
|
||||
return retval;
|
||||
}
|
||||
|
@@ -4732,7 +4732,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
||||
|
||||
if (user_tick)
|
||||
account_user_time(p, one_jiffy, one_jiffy_scaled);
|
||||
else if (p != rq->idle)
|
||||
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
|
||||
account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
|
||||
one_jiffy_scaled);
|
||||
else
|
||||
|
@@ -38,7 +38,8 @@
|
||||
*/
|
||||
unsigned long long __attribute__((weak)) sched_clock(void)
|
||||
{
|
||||
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
|
||||
return (unsigned long long)(jiffies - INITIAL_JIFFIES)
|
||||
* (NSEC_PER_SEC / HZ);
|
||||
}
|
||||
|
||||
static __read_mostly int sched_clock_running;
|
||||
|
@@ -101,7 +101,9 @@ static int __maybe_unused one = 1;
|
||||
static int __maybe_unused two = 2;
|
||||
static unsigned long one_ul = 1;
|
||||
static int one_hundred = 100;
|
||||
static int one_thousand = 1000;
|
||||
|
||||
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
|
||||
static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
|
||||
|
||||
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
|
||||
static int maxolduid = 65535;
|
||||
@@ -1006,7 +1008,7 @@ static struct ctl_table vm_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &dirty_bytes_handler,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &one_ul,
|
||||
.extra1 = &dirty_bytes_min,
|
||||
},
|
||||
{
|
||||
.procname = "dirty_writeback_centisecs",
|
||||
@@ -1030,28 +1032,6 @@ static struct ctl_table vm_table[] = {
|
||||
.mode = 0444 /* read-only*/,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nr_pdflush_threads_min",
|
||||
.data = &nr_pdflush_threads_min,
|
||||
.maxlen = sizeof nr_pdflush_threads_min,
|
||||
.mode = 0644 /* read-write */,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &one,
|
||||
.extra2 = &nr_pdflush_threads_max,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "nr_pdflush_threads_max",
|
||||
.data = &nr_pdflush_threads_max,
|
||||
.maxlen = sizeof nr_pdflush_threads_max,
|
||||
.mode = 0644 /* read-write */,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &nr_pdflush_threads_min,
|
||||
.extra2 = &one_thousand,
|
||||
},
|
||||
{
|
||||
.ctl_name = VM_SWAPPINESS,
|
||||
.procname = "swappiness",
|
||||
|
@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
|
||||
for (;;) {
|
||||
if (!clockevents_program_event(dev, next, ktime_get()))
|
||||
return;
|
||||
tick_periodic(cpu);
|
||||
/*
|
||||
* Have to be careful here. If we're in oneshot mode,
|
||||
* before we call tick_periodic() in a loop, we need
|
||||
* to be sure we're using a real hardware clocksource.
|
||||
* Otherwise we could get trapped in an infinite
|
||||
* loop, as the tick_periodic() increments jiffies,
|
||||
* when then will increment time, posibly causing
|
||||
* the loop to trigger again and again.
|
||||
*/
|
||||
if (timekeeping_valid_for_hres())
|
||||
tick_periodic(cpu);
|
||||
next = ktime_add(next, tick_period);
|
||||
}
|
||||
}
|
||||
|
@@ -2380,7 +2380,7 @@ static const char readme_msg[] =
|
||||
"# echo print-parent > /debug/tracing/trace_options\n"
|
||||
"# echo 1 > /debug/tracing/tracing_enabled\n"
|
||||
"# cat /debug/tracing/trace > /tmp/trace.txt\n"
|
||||
"echo 0 > /debug/tracing/tracing_enabled\n"
|
||||
"# echo 0 > /debug/tracing/tracing_enabled\n"
|
||||
;
|
||||
|
||||
static ssize_t
|
||||
@@ -3448,6 +3448,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
if (!ref)
|
||||
break;
|
||||
|
||||
ref->ref = 1;
|
||||
ref->buffer = info->tr->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer);
|
||||
if (!ref->page) {
|
||||
|
Reference in New Issue
Block a user