Merge branch 'x86/apic' into irq/numa
Merge reason: both topics modify the APIC code but were able to do it in parallel so far. An upcoming patch generates a conflict so merge them to avoid the conflict. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -1028,7 +1028,7 @@ static void audit_update_watch(struct audit_parent *parent,
|
||||
|
||||
if (audit_enabled) {
|
||||
struct audit_buffer *ab;
|
||||
ab = audit_log_start(NULL, GFP_KERNEL,
|
||||
ab = audit_log_start(NULL, GFP_NOFS,
|
||||
AUDIT_CONFIG_CHANGE);
|
||||
audit_log_format(ab, "auid=%u ses=%u",
|
||||
audit_get_loginuid(current),
|
||||
@@ -1067,7 +1067,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
|
||||
e = container_of(r, struct audit_entry, rule);
|
||||
if (audit_enabled) {
|
||||
struct audit_buffer *ab;
|
||||
ab = audit_log_start(NULL, GFP_KERNEL,
|
||||
ab = audit_log_start(NULL, GFP_NOFS,
|
||||
AUDIT_CONFIG_CHANGE);
|
||||
audit_log_format(ab, "auid=%u ses=%u",
|
||||
audit_get_loginuid(current),
|
||||
|
@@ -360,8 +360,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
|
||||
irqreturn_t ret, retval = IRQ_NONE;
|
||||
unsigned int status = 0;
|
||||
|
||||
WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
|
||||
|
||||
if (!(action->flags & IRQF_DISABLED))
|
||||
local_irq_enable_in_hardirq();
|
||||
|
||||
|
@@ -319,6 +319,22 @@ struct kprobe __kprobes *get_kprobe(void *addr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Arm a kprobe with text_mutex */
|
||||
static void __kprobes arm_kprobe(struct kprobe *kp)
|
||||
{
|
||||
mutex_lock(&text_mutex);
|
||||
arch_arm_kprobe(kp);
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
/* Disarm a kprobe with text_mutex */
|
||||
static void __kprobes disarm_kprobe(struct kprobe *kp)
|
||||
{
|
||||
mutex_lock(&text_mutex);
|
||||
arch_disarm_kprobe(kp);
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Aggregate handlers for multiple kprobes support - these handlers
|
||||
* take care of invoking the individual kprobe handlers on p->list
|
||||
@@ -538,7 +554,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
|
||||
ap->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
if (!kprobes_all_disarmed)
|
||||
/* Arm the breakpoint again. */
|
||||
arch_arm_kprobe(ap);
|
||||
arm_kprobe(ap);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -789,11 +805,8 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
|
||||
* enabled and not gone - otherwise, the breakpoint would
|
||||
* already have been removed. We save on flushing icache.
|
||||
*/
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) {
|
||||
mutex_lock(&text_mutex);
|
||||
arch_disarm_kprobe(p);
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
|
||||
disarm_kprobe(p);
|
||||
hlist_del_rcu(&old_p->hlist);
|
||||
} else {
|
||||
if (p->break_handler && !kprobe_gone(p))
|
||||
@@ -810,7 +823,7 @@ noclean:
|
||||
if (!kprobe_disabled(old_p)) {
|
||||
try_to_disable_aggr_kprobe(old_p);
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(old_p))
|
||||
arch_disarm_kprobe(old_p);
|
||||
disarm_kprobe(old_p);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@@ -1364,7 +1377,7 @@ int __kprobes disable_kprobe(struct kprobe *kp)
|
||||
try_to_disable_aggr_kprobe(p);
|
||||
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p))
|
||||
arch_disarm_kprobe(p);
|
||||
disarm_kprobe(p);
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
@@ -1393,7 +1406,7 @@ int __kprobes enable_kprobe(struct kprobe *kp)
|
||||
}
|
||||
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p))
|
||||
arch_arm_kprobe(p);
|
||||
arm_kprobe(p);
|
||||
|
||||
p->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
if (p != kp)
|
||||
|
@@ -340,7 +340,7 @@ void oops_exit(void)
|
||||
}
|
||||
|
||||
#ifdef WANT_WARN_ON_SLOWPATH
|
||||
void warn_slowpath(const char *file, int line, const char *fmt, ...)
|
||||
void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
char function[KSYM_SYMBOL_LEN];
|
||||
@@ -356,7 +356,7 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...)
|
||||
if (board)
|
||||
printk(KERN_WARNING "Hardware name: %s\n", board);
|
||||
|
||||
if (fmt) {
|
||||
if (*fmt) {
|
||||
va_start(args, fmt);
|
||||
vprintk(fmt, args);
|
||||
va_end(args);
|
||||
@@ -367,7 +367,14 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...)
|
||||
print_oops_end_marker();
|
||||
add_taint(TAINT_WARN);
|
||||
}
|
||||
EXPORT_SYMBOL(warn_slowpath);
|
||||
EXPORT_SYMBOL(warn_slowpath_fmt);
|
||||
|
||||
void warn_slowpath_null(const char *file, int line)
|
||||
{
|
||||
static const char *empty = "";
|
||||
warn_slowpath_fmt(file, line, empty);
|
||||
}
|
||||
EXPORT_SYMBOL(warn_slowpath_null);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
|
@@ -1420,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
||||
* timer call will interfere.
|
||||
*/
|
||||
list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
|
||||
int firing;
|
||||
int cpu_firing;
|
||||
|
||||
spin_lock(&timer->it_lock);
|
||||
list_del_init(&timer->it.cpu.entry);
|
||||
firing = timer->it.cpu.firing;
|
||||
cpu_firing = timer->it.cpu.firing;
|
||||
timer->it.cpu.firing = 0;
|
||||
/*
|
||||
* The firing flag is -1 if we collided with a reset
|
||||
* of the timer, which already reported this
|
||||
* almost-firing as an overrun. So don't generate an event.
|
||||
*/
|
||||
if (likely(firing >= 0)) {
|
||||
if (likely(cpu_firing >= 0))
|
||||
cpu_timer_fire(timer);
|
||||
}
|
||||
spin_unlock(&timer->it_lock);
|
||||
}
|
||||
}
|
||||
|
@@ -4732,7 +4732,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
||||
|
||||
if (user_tick)
|
||||
account_user_time(p, one_jiffy, one_jiffy_scaled);
|
||||
else if (p != rq->idle)
|
||||
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
|
||||
account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
|
||||
one_jiffy_scaled);
|
||||
else
|
||||
|
@@ -103,6 +103,9 @@ static unsigned long one_ul = 1;
|
||||
static int one_hundred = 100;
|
||||
static int one_thousand = 1000;
|
||||
|
||||
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
|
||||
static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
|
||||
|
||||
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
|
||||
static int maxolduid = 65535;
|
||||
static int minolduid;
|
||||
@@ -1006,7 +1009,7 @@ static struct ctl_table vm_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &dirty_bytes_handler,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &one_ul,
|
||||
.extra1 = &dirty_bytes_min,
|
||||
},
|
||||
{
|
||||
.procname = "dirty_writeback_centisecs",
|
||||
|
@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
|
||||
for (;;) {
|
||||
if (!clockevents_program_event(dev, next, ktime_get()))
|
||||
return;
|
||||
tick_periodic(cpu);
|
||||
/*
|
||||
* Have to be careful here. If we're in oneshot mode,
|
||||
* before we call tick_periodic() in a loop, we need
|
||||
* to be sure we're using a real hardware clocksource.
|
||||
* Otherwise we could get trapped in an infinite
|
||||
* loop, as the tick_periodic() increments jiffies,
|
||||
* when then will increment time, posibly causing
|
||||
* the loop to trigger again and again.
|
||||
*/
|
||||
if (timekeeping_valid_for_hres())
|
||||
tick_periodic(cpu);
|
||||
next = ktime_add(next, tick_period);
|
||||
}
|
||||
}
|
||||
|
@@ -3448,6 +3448,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
if (!ref)
|
||||
break;
|
||||
|
||||
ref->ref = 1;
|
||||
ref->buffer = info->tr->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer);
|
||||
if (!ref->page) {
|
||||
|
Reference in New Issue
Block a user