Merge branch 'linus' into x86/fpu, to pick up fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
|
||||
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
|
||||
i++;
|
||||
|
||||
if (i == 0)
|
||||
return 0;
|
||||
if (!i)
|
||||
return -ENODEV;
|
||||
|
||||
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
|
||||
if (!nb)
|
||||
|
@@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
|
||||
res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
|
||||
mem += IOAPIC_RESOURCE_NAME_SIZE;
|
||||
ioapics[i].iomem_res = &res[num];
|
||||
num++;
|
||||
ioapics[i].iomem_res = res;
|
||||
}
|
||||
|
||||
ioapic_resources = res;
|
||||
|
@@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
u64 value;
|
||||
|
||||
/* re-enable TopologyExtensions if switched off by BIOS */
|
||||
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
|
||||
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
|
||||
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
|
||||
|
||||
if (msr_set_bit(0xc0011005, 54) > 0) {
|
||||
rdmsrl(0xc0011005, value);
|
||||
if (value & BIT_64(54)) {
|
||||
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
|
||||
pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
|
||||
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -42,16 +42,14 @@ void printk_address(unsigned long address)
|
||||
static void
|
||||
print_ftrace_graph_addr(unsigned long addr, void *data,
|
||||
const struct stacktrace_ops *ops,
|
||||
struct thread_info *tinfo, int *graph)
|
||||
struct task_struct *task, int *graph)
|
||||
{
|
||||
struct task_struct *task;
|
||||
unsigned long ret_addr;
|
||||
int index;
|
||||
|
||||
if (addr != (unsigned long)return_to_handler)
|
||||
return;
|
||||
|
||||
task = tinfo->task;
|
||||
index = task->curr_ret_stack;
|
||||
|
||||
if (!task->ret_stack || index < *graph)
|
||||
@@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
|
||||
static inline void
|
||||
print_ftrace_graph_addr(unsigned long addr, void *data,
|
||||
const struct stacktrace_ops *ops,
|
||||
struct thread_info *tinfo, int *graph)
|
||||
struct task_struct *task, int *graph)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
@@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
|
||||
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
|
||||
*/
|
||||
|
||||
static inline int valid_stack_ptr(struct thread_info *tinfo,
|
||||
static inline int valid_stack_ptr(struct task_struct *task,
|
||||
void *p, unsigned int size, void *end)
|
||||
{
|
||||
void *t = tinfo;
|
||||
void *t = task_stack_page(task);
|
||||
if (end) {
|
||||
if (p < end && p >= (end-THREAD_SIZE))
|
||||
return 1;
|
||||
@@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
|
||||
}
|
||||
|
||||
unsigned long
|
||||
print_context_stack(struct thread_info *tinfo,
|
||||
print_context_stack(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
unsigned long *end, int *graph)
|
||||
{
|
||||
struct stack_frame *frame = (struct stack_frame *)bp;
|
||||
|
||||
while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
|
||||
while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
|
||||
unsigned long addr;
|
||||
|
||||
addr = *stack;
|
||||
@@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
|
||||
} else {
|
||||
ops->address(data, addr, 0);
|
||||
}
|
||||
print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
|
||||
print_ftrace_graph_addr(addr, data, ops, task, graph);
|
||||
}
|
||||
stack++;
|
||||
}
|
||||
@@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo,
|
||||
EXPORT_SYMBOL_GPL(print_context_stack);
|
||||
|
||||
unsigned long
|
||||
print_context_stack_bp(struct thread_info *tinfo,
|
||||
print_context_stack_bp(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
unsigned long *end, int *graph)
|
||||
@@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
|
||||
struct stack_frame *frame = (struct stack_frame *)bp;
|
||||
unsigned long *ret_addr = &frame->return_address;
|
||||
|
||||
while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
|
||||
while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
|
||||
unsigned long addr = *ret_addr;
|
||||
|
||||
if (!__kernel_text_address(addr))
|
||||
@@ -139,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo,
|
||||
break;
|
||||
frame = frame->next_frame;
|
||||
ret_addr = &frame->return_address;
|
||||
print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
|
||||
print_ftrace_graph_addr(addr, data, ops, task, graph);
|
||||
}
|
||||
|
||||
return (unsigned long)frame;
|
||||
|
@@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
bp = stack_frame(task, regs);
|
||||
|
||||
for (;;) {
|
||||
struct thread_info *context;
|
||||
void *end_stack;
|
||||
|
||||
end_stack = is_hardirq_stack(stack, cpu);
|
||||
if (!end_stack)
|
||||
end_stack = is_softirq_stack(stack, cpu);
|
||||
|
||||
context = task_thread_info(task);
|
||||
bp = ops->walk_stack(context, stack, bp, ops, data,
|
||||
bp = ops->walk_stack(task, stack, bp, ops, data,
|
||||
end_stack, &graph);
|
||||
|
||||
/* Stop if not on irq stack */
|
||||
|
@@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
const struct stacktrace_ops *ops, void *data)
|
||||
{
|
||||
const unsigned cpu = get_cpu();
|
||||
struct thread_info *tinfo;
|
||||
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
|
||||
unsigned long dummy;
|
||||
unsigned used = 0;
|
||||
@@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
* current stack address. If the stacks consist of nested
|
||||
* exceptions
|
||||
*/
|
||||
tinfo = task_thread_info(task);
|
||||
while (!done) {
|
||||
unsigned long *stack_end;
|
||||
enum stack_type stype;
|
||||
@@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
if (ops->stack(data, id) < 0)
|
||||
break;
|
||||
|
||||
bp = ops->walk_stack(tinfo, stack, bp, ops,
|
||||
bp = ops->walk_stack(task, stack, bp, ops,
|
||||
data, stack_end, &graph);
|
||||
ops->stack(data, "<EOE>");
|
||||
/*
|
||||
@@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
|
||||
if (ops->stack(data, "IRQ") < 0)
|
||||
break;
|
||||
bp = ops->walk_stack(tinfo, stack, bp,
|
||||
bp = ops->walk_stack(task, stack, bp,
|
||||
ops, data, stack_end, &graph);
|
||||
/*
|
||||
* We link to the next stack (which would be
|
||||
@@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
/*
|
||||
* This handles the process stack:
|
||||
*/
|
||||
bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
|
||||
bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
|
||||
put_cpu();
|
||||
}
|
||||
EXPORT_SYMBOL(dump_trace);
|
||||
|
@@ -57,7 +57,7 @@
|
||||
# error "Need more than one PGD for the ESPFIX hack"
|
||||
#endif
|
||||
|
||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
|
||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
||||
|
||||
/* This contains the *bottom* address of the espfix stack */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
|
||||
|
@@ -130,11 +130,9 @@ void irq_ctx_init(int cpu)
|
||||
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
struct thread_info *curstk;
|
||||
struct irq_stack *irqstk;
|
||||
u32 *isp, *prev_esp;
|
||||
|
||||
curstk = current_stack();
|
||||
irqstk = __this_cpu_read(softirq_stack);
|
||||
|
||||
/* build the stack frame on the softirq stack */
|
||||
|
@@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
* normal page fault.
|
||||
*/
|
||||
regs->ip = (unsigned long)cur->addr;
|
||||
/*
|
||||
* Trap flag (TF) has been set here because this fault
|
||||
* happened where the single stepping will be done.
|
||||
* So clear it by resetting the current kprobe:
|
||||
*/
|
||||
regs->flags &= ~X86_EFLAGS_TF;
|
||||
|
||||
/*
|
||||
* If the TF flag was set before the kprobe hit,
|
||||
* don't touch it:
|
||||
*/
|
||||
regs->flags |= kcb->kprobe_old_flags;
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
|
@@ -61,11 +61,16 @@ void pvclock_resume(void)
|
||||
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
|
||||
{
|
||||
unsigned version;
|
||||
cycle_t ret;
|
||||
u8 flags;
|
||||
|
||||
do {
|
||||
version = __pvclock_read_cycles(src, &ret, &flags);
|
||||
version = src->version;
|
||||
/* Make the latest version visible */
|
||||
smp_rmb();
|
||||
|
||||
flags = src->flags;
|
||||
/* Make sure that the version double-check is last. */
|
||||
smp_rmb();
|
||||
} while ((src->version & 1) || version != src->version);
|
||||
|
||||
return flags & valid_flags;
|
||||
@@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||
|
||||
do {
|
||||
version = __pvclock_read_cycles(src, &ret, &flags);
|
||||
/* Make sure that the version double-check is last. */
|
||||
smp_rmb();
|
||||
} while ((src->version & 1) || version != src->version);
|
||||
|
||||
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
|
||||
|
@@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
/*
|
||||
* In IST context, we explicitly disable preemption. This serves two
|
||||
* purposes: it makes it much less likely that we would accidentally
|
||||
* schedule in IST context and it will force a warning if we somehow
|
||||
* manage to schedule by accident.
|
||||
*/
|
||||
void ist_enter(struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs)) {
|
||||
@@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
|
||||
rcu_nmi_enter();
|
||||
}
|
||||
|
||||
/*
|
||||
* We are atomic because we're on the IST stack; or we're on
|
||||
* x86_32, in which case we still shouldn't schedule; or we're
|
||||
* on x86_64 and entered from user mode, in which case we're
|
||||
* still atomic unless ist_begin_non_atomic is called.
|
||||
*/
|
||||
preempt_count_add(HARDIRQ_OFFSET);
|
||||
preempt_disable();
|
||||
|
||||
/* This code is a bit fragile. Test it. */
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
|
||||
@@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
|
||||
|
||||
void ist_exit(struct pt_regs *regs)
|
||||
{
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
if (!user_mode(regs))
|
||||
rcu_nmi_exit();
|
||||
@@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
|
||||
BUG_ON((unsigned long)(current_top_of_stack() -
|
||||
current_stack_pointer()) >= THREAD_SIZE);
|
||||
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
|
||||
*/
|
||||
void ist_end_non_atomic(void)
|
||||
{
|
||||
preempt_count_add(HARDIRQ_OFFSET);
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
static nokprobe_inline int
|
||||
|
Reference in New Issue
Block a user