cpu_entry_area.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/spinlock.h>
  3. #include <linux/percpu.h>
  4. #include <linux/kallsyms.h>
  5. #include <linux/kcore.h>
  6. #include <linux/pgtable.h>
  7. #include <asm/cpu_entry_area.h>
  8. #include <asm/fixmap.h>
  9. #include <asm/desc.h>
  10. static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
  11. #ifdef CONFIG_X86_64
  12. static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
  13. DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
  14. #endif
  15. #ifdef CONFIG_X86_32
  16. DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
  17. #endif
  18. /* Is called from entry code, so must be noinstr */
  19. noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
  20. {
  21. unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
  22. BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
  23. return (struct cpu_entry_area *) va;
  24. }
  25. EXPORT_SYMBOL(get_cpu_entry_area);
  26. void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
  27. {
  28. unsigned long va = (unsigned long) cea_vaddr;
  29. pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
  30. /*
  31. * The cpu_entry_area is shared between the user and kernel
  32. * page tables. All of its ptes can safely be global.
  33. * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
  34. * non-present PTEs, so be careful not to set it in that
  35. * case to avoid confusion.
  36. */
  37. if (boot_cpu_has(X86_FEATURE_PGE) &&
  38. (pgprot_val(flags) & _PAGE_PRESENT))
  39. pte = pte_set_flags(pte, _PAGE_GLOBAL);
  40. set_pte_vaddr(va, pte);
  41. }
  42. static void __init
  43. cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
  44. {
  45. for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
  46. cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
  47. }
  48. static void __init percpu_setup_debug_store(unsigned int cpu)
  49. {
  50. #ifdef CONFIG_CPU_SUP_INTEL
  51. unsigned int npages;
  52. void *cea;
  53. if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
  54. return;
  55. cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
  56. npages = sizeof(struct debug_store) / PAGE_SIZE;
  57. BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
  58. cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
  59. PAGE_KERNEL);
  60. cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
  61. /*
  62. * Force the population of PMDs for not yet allocated per cpu
  63. * memory like debug store buffers.
  64. */
  65. npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
  66. for (; npages; npages--, cea += PAGE_SIZE)
  67. cea_set_pte(cea, 0, PAGE_NONE);
  68. #endif
  69. }
  70. #ifdef CONFIG_X86_64
  71. #define cea_map_stack(name) do { \
  72. npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
  73. cea_map_percpu_pages(cea->estacks.name## _stack, \
  74. estacks->name## _stack, npages, PAGE_KERNEL); \
  75. } while (0)
  76. static void __init percpu_setup_exception_stacks(unsigned int cpu)
  77. {
  78. struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
  79. struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
  80. unsigned int npages;
  81. BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
  82. per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
  83. /*
  84. * The exceptions stack mappings in the per cpu area are protected
  85. * by guard pages so each stack must be mapped separately. DB2 is
  86. * not mapped; it just exists to catch triple nesting of #DB.
  87. */
  88. cea_map_stack(DF);
  89. cea_map_stack(NMI);
  90. cea_map_stack(DB);
  91. cea_map_stack(MCE);
  92. if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
  93. if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
  94. cea_map_stack(VC);
  95. cea_map_stack(VC2);
  96. }
  97. }
  98. }
  99. #else
  100. static inline void percpu_setup_exception_stacks(unsigned int cpu)
  101. {
  102. struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
  103. cea_map_percpu_pages(&cea->doublefault_stack,
  104. &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
  105. }
  106. #endif
  107. /* Setup the fixmap mappings only once per-processor */
  108. static void __init setup_cpu_entry_area(unsigned int cpu)
  109. {
  110. struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
  111. #ifdef CONFIG_X86_64
  112. /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
  113. pgprot_t gdt_prot = PAGE_KERNEL_RO;
  114. pgprot_t tss_prot = PAGE_KERNEL_RO;
  115. #else
  116. /*
  117. * On native 32-bit systems, the GDT cannot be read-only because
  118. * our double fault handler uses a task gate, and entering through
  119. * a task gate needs to change an available TSS to busy. If the
  120. * GDT is read-only, that will triple fault. The TSS cannot be
  121. * read-only because the CPU writes to it on task switches.
  122. *
  123. * On Xen PV, the GDT must be read-only because the hypervisor
  124. * requires it.
  125. */
  126. pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
  127. PAGE_KERNEL_RO : PAGE_KERNEL;
  128. pgprot_t tss_prot = PAGE_KERNEL;
  129. #endif
  130. cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
  131. cea_map_percpu_pages(&cea->entry_stack_page,
  132. per_cpu_ptr(&entry_stack_storage, cpu), 1,
  133. PAGE_KERNEL);
  134. /*
  135. * The Intel SDM says (Volume 3, 7.2.1):
  136. *
  137. * Avoid placing a page boundary in the part of the TSS that the
  138. * processor reads during a task switch (the first 104 bytes). The
  139. * processor may not correctly perform address translations if a
  140. * boundary occurs in this area. During a task switch, the processor
  141. * reads and writes into the first 104 bytes of each TSS (using
  142. * contiguous physical addresses beginning with the physical address
  143. * of the first byte of the TSS). So, after TSS access begins, if
  144. * part of the 104 bytes is not physically contiguous, the processor
  145. * will access incorrect information without generating a page-fault
  146. * exception.
  147. *
  148. * There are also a lot of errata involving the TSS spanning a page
  149. * boundary. Assert that we're not doing that.
  150. */
  151. BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
  152. offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
  153. BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
  154. /*
  155. * VMX changes the host TR limit to 0x67 after a VM exit. This is
  156. * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
  157. * that this is correct.
  158. */
  159. BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
  160. BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
  161. cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
  162. sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
  163. #ifdef CONFIG_X86_32
  164. per_cpu(cpu_entry_area, cpu) = cea;
  165. #endif
  166. percpu_setup_exception_stacks(cpu);
  167. percpu_setup_debug_store(cpu);
  168. }
  169. static __init void setup_cpu_entry_area_ptes(void)
  170. {
  171. #ifdef CONFIG_X86_32
  172. unsigned long start, end;
  173. /* The +1 is for the readonly IDT: */
  174. BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
  175. BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
  176. BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
  177. start = CPU_ENTRY_AREA_BASE;
  178. end = start + CPU_ENTRY_AREA_MAP_SIZE;
  179. /* Careful here: start + PMD_SIZE might wrap around */
  180. for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
  181. populate_extra_pte(start);
  182. #endif
  183. }
  184. void __init setup_cpu_entry_areas(void)
  185. {
  186. unsigned int cpu;
  187. setup_cpu_entry_area_ptes();
  188. for_each_possible_cpu(cpu)
  189. setup_cpu_entry_area(cpu);
  190. /*
  191. * This is the last essential update to swapper_pgdir which needs
  192. * to be synchronized to initial_page_table on 32bit.
  193. */
  194. sync_initial_page_table();
  195. }