x86/irq/32: Handle irq stack allocation failure proper
irq_ctx_init() crashes hard on page allocation failures. While that's ok during early boot, it's just wrong in the CPU hotplug bringup code. Check the page allocation failure and return -ENOMEM and handle it at the call sites. On early boot the only way out is to BUG(), but on CPU hotplug there is no reason to crash, so just abort the operation. Rename the function to something more sensible while at it. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Alison Schofield <alison.schofield@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Nicolai Stange <nstange@suse.de> Cc: Pu Wen <puwen@hygon.cn> Cc: Sean Christopherson <sean.j.christopherson@intel.com> Cc: Shaokun Zhang <zhangshaokun@hisilicon.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cc: x86-ml <x86@kernel.org> Cc: xen-devel@lists.xenproject.org Cc: Yazen Ghannam <yazen.ghannam@amd.com> Cc: Yi Wang <wang.yi59@zte.com.cn> Cc: Zhenzhong Duan <zhenzhong.duan@oracle.com> Link: https://lkml.kernel.org/r/20190414160146.089060584@linutronix.de
This commit is contained in:

committed by
Borislav Petkov

parent
451f743a64
commit
66c7ceb47f
@@ -107,28 +107,28 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate per-cpu stacks for hardirq and for softirq processing
|
||||
* Allocate per-cpu stacks for hardirq and softirq processing
|
||||
*/
|
||||
void irq_ctx_init(int cpu)
|
||||
int irq_init_percpu_irqstack(unsigned int cpu)
|
||||
{
|
||||
struct irq_stack *irqstk;
|
||||
int node = cpu_to_node(cpu);
|
||||
struct page *ph, *ps;
|
||||
|
||||
if (per_cpu(hardirq_stack_ptr, cpu))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
|
||||
THREADINFO_GFP,
|
||||
THREAD_SIZE_ORDER));
|
||||
per_cpu(hardirq_stack_ptr, cpu) = irqstk;
|
||||
ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
|
||||
if (!ph)
|
||||
return -ENOMEM;
|
||||
ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
|
||||
if (!ps) {
|
||||
__free_pages(ph, THREAD_SIZE_ORDER);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
|
||||
THREADINFO_GFP,
|
||||
THREAD_SIZE_ORDER));
|
||||
per_cpu(softirq_stack_ptr, cpu) = irqstk;
|
||||
|
||||
pr_debug("CPU %u irqstacks, hard=%p soft=%p\n",
|
||||
cpu, per_cpu(hardirq_stack_ptr, cpu),
|
||||
per_cpu(softirq_stack_ptr, cpu));
|
||||
per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
|
||||
per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_softirq_own_stack(void)
|
||||
|
@@ -91,7 +91,7 @@ void __init init_IRQ(void)
|
||||
for (i = 0; i < nr_legacy_irqs(); i++)
|
||||
per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
|
||||
|
||||
irq_ctx_init(smp_processor_id());
|
||||
BUG_ON(irq_init_percpu_irqstack(smp_processor_id()));
|
||||
|
||||
x86_init.irqs.intr_init();
|
||||
}
|
||||
|
@@ -935,20 +935,27 @@ out:
|
||||
return boot_error;
|
||||
}
|
||||
|
||||
void common_cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
int common_cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Just in case we booted with a single CPU. */
|
||||
alternatives_enable_smp();
|
||||
|
||||
per_cpu(current_task, cpu) = idle;
|
||||
|
||||
/* Initialize the interrupt stack(s) */
|
||||
ret = irq_init_percpu_irqstack(cpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* Stack for startup_32 can be just as for start_secondary onwards */
|
||||
irq_ctx_init(cpu);
|
||||
per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
|
||||
#else
|
||||
initial_gs = per_cpu_offset(cpu);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1106,7 +1113,9 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
/* the FPU context is blank, nobody can own it */
|
||||
per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
|
||||
|
||||
common_cpu_up(cpu, tidle);
|
||||
err = common_cpu_up(cpu, tidle);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
|
||||
if (err) {
|
||||
|
Reference in New Issue
Block a user