Merge branch 'x86-irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 irq updates from Ingo Molnar: "Here are the main changes in this tree: - Introduce x86-64 IRQ/exception/debug stack guard pages to detect stack overflows immediately and deterministically. - Clean up over a decade worth of cruft accumulated. The outcome of this should be more clear-cut faults/crashes when any of the low level x86 CPU stacks overflow, instead of silent memory corruption and sporadic failures much later on" * 'x86-irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) x86/irq: Fix outdated comments x86/irq/64: Remove stack overflow debug code x86/irq/64: Remap the IRQ stack with guard pages x86/irq/64: Split the IRQ stack into its own pages x86/irq/64: Init hardirq_stack_ptr during CPU hotplug x86/irq/32: Handle irq stack allocation failure proper x86/irq/32: Invoke irq_ctx_init() from init_IRQ() x86/irq/64: Rename irq_stack_ptr to hardirq_stack_ptr x86/irq/32: Rename hard/softirq_stack to hard/softirq_stack_ptr x86/irq/32: Make irq stack a character array x86/irq/32: Define IRQ_STACK_SIZE x86/dumpstack/64: Speedup in_exception_stack() x86/exceptions: Split debug IST stack x86/exceptions: Enable IST guard pages x86/exceptions: Disconnect IST index and stack order x86/cpu: Remove orig_ist array x86/cpu: Prepare TSS.IST setup for guard pages x86/dumpstack/64: Use cpu_entry_area instead of orig_ist x86/irq/64: Use cpu entry area instead of orig_ist x86/traps: Use cpu_entry_area instead of orig_ist ...
This commit is contained in:
@@ -507,19 +507,6 @@ void load_percpu_segment(int cpu)
|
||||
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Special IST stacks which the CPU switches to when it calls
|
||||
* an IST-marked descriptor entry. Up to 7 stacks (hardware
|
||||
* limit), all of them are 4K, except the debug stack which
|
||||
* is 8K.
|
||||
*/
|
||||
static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
|
||||
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
|
||||
[DEBUG_STACK - 1] = DEBUG_STKSZ
|
||||
};
|
||||
#endif
|
||||
|
||||
/* Load the original GDT from the per-cpu structure */
|
||||
void load_direct_gdt(int cpu)
|
||||
{
|
||||
@@ -1511,9 +1498,9 @@ static __init int setup_clearcpuid(char *arg)
|
||||
__setup("clearcpuid=", setup_clearcpuid);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
DEFINE_PER_CPU_FIRST(union irq_stack_union,
|
||||
irq_stack_union) __aligned(PAGE_SIZE) __visible;
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union);
|
||||
DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
|
||||
fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
|
||||
|
||||
/*
|
||||
* The following percpu variables are hot. Align current_task to
|
||||
@@ -1523,9 +1510,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
|
||||
&init_task;
|
||||
EXPORT_PER_CPU_SYMBOL(current_task);
|
||||
|
||||
DEFINE_PER_CPU(char *, irq_stack_ptr) =
|
||||
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE;
|
||||
|
||||
DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
|
||||
DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
|
||||
|
||||
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
|
||||
@@ -1562,23 +1547,7 @@ void syscall_init(void)
|
||||
X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copies of the original ist values from the tss are only accessed during
|
||||
* debugging, no special alignment required.
|
||||
*/
|
||||
DEFINE_PER_CPU(struct orig_ist, orig_ist);
|
||||
|
||||
static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
|
||||
DEFINE_PER_CPU(int, debug_stack_usage);
|
||||
|
||||
int is_debug_stack(unsigned long addr)
|
||||
{
|
||||
return __this_cpu_read(debug_stack_usage) ||
|
||||
(addr <= __this_cpu_read(debug_stack_addr) &&
|
||||
addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
|
||||
}
|
||||
NOKPROBE_SYMBOL(is_debug_stack);
|
||||
|
||||
DEFINE_PER_CPU(u32, debug_idt_ctr);
|
||||
|
||||
void debug_stack_set_zero(void)
|
||||
@@ -1690,17 +1659,14 @@ static void setup_getcpu(int cpu)
|
||||
* initialized (naturally) in the bootstrap process, such as the GDT
|
||||
* and IDT. We reload them nevertheless, this function acts as a
|
||||
* 'CPU state barrier', nothing should get across.
|
||||
* A lot of state is already set up in PDA init for 64 bit
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
void cpu_init(void)
|
||||
{
|
||||
struct orig_ist *oist;
|
||||
int cpu = raw_smp_processor_id();
|
||||
struct task_struct *me;
|
||||
struct tss_struct *t;
|
||||
unsigned long v;
|
||||
int cpu = raw_smp_processor_id();
|
||||
int i;
|
||||
|
||||
wait_for_master_cpu(cpu);
|
||||
@@ -1715,7 +1681,6 @@ void cpu_init(void)
|
||||
load_ucode_ap();
|
||||
|
||||
t = &per_cpu(cpu_tss_rw, cpu);
|
||||
oist = &per_cpu(orig_ist, cpu);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
if (this_cpu_read(numa_node) == 0 &&
|
||||
@@ -1753,16 +1718,11 @@ void cpu_init(void)
|
||||
/*
|
||||
* set up and load the per-CPU TSS
|
||||
*/
|
||||
if (!oist->ist[0]) {
|
||||
char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
|
||||
|
||||
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
|
||||
estacks += exception_stack_sizes[v];
|
||||
oist->ist[v] = t->x86_tss.ist[v] =
|
||||
(unsigned long)estacks;
|
||||
if (v == DEBUG_STACK-1)
|
||||
per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
|
||||
}
|
||||
if (!t->x86_tss.ist[0]) {
|
||||
t->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
|
||||
t->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
|
||||
t->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
|
||||
t->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
|
||||
}
|
||||
|
||||
t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
|
||||
|
Reference in New Issue
Block a user