irq_64.c 2.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  4. *
  5. * This file contains the lowest level x86_64-specific interrupt
  6. * entry and irq statistics code. All the remaining irq logic is
  7. * done by the generic kernel/irq/ code and in the
  8. * x86_64-specific irq controller code. (e.g. i8259.c and
  9. * io_apic.c.)
  10. */
  11. #include <linux/kernel_stat.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/irq.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/delay.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/smp.h>
  19. #include <linux/sched/task_stack.h>
  20. #include <asm/cpu_entry_area.h>
  21. #include <asm/softirq_stack.h>
  22. #include <asm/irq_stack.h>
  23. #include <asm/io_apic.h>
  24. #include <asm/apic.h>
  25. DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
  26. DECLARE_INIT_PER_CPU(irq_stack_backing_store);
  27. #ifdef CONFIG_VMAP_STACK
  28. /*
  29. * VMAP the backing store with guard pages
  30. */
  31. static int map_irq_stack(unsigned int cpu)
  32. {
  33. char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
  34. struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
  35. void *va;
  36. int i;
  37. for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) {
  38. phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT));
  39. pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
  40. }
  41. va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL);
  42. if (!va)
  43. return -ENOMEM;
  44. /* Store actual TOS to avoid adjustment in the hotpath */
  45. per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
  46. return 0;
  47. }
  48. #else
  49. /*
  50. * If VMAP stacks are disabled due to KASAN, just use the per cpu
  51. * backing store without guard pages.
  52. */
  53. static int map_irq_stack(unsigned int cpu)
  54. {
  55. void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
  56. /* Store actual TOS to avoid adjustment in the hotpath */
  57. per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
  58. return 0;
  59. }
  60. #endif
  61. int irq_init_percpu_irqstack(unsigned int cpu)
  62. {
  63. if (per_cpu(hardirq_stack_ptr, cpu))
  64. return 0;
  65. return map_irq_stack(cpu);
  66. }