kexec.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_KEXEC_H
  3. #define _ASM_X86_KEXEC_H
  4. #ifdef CONFIG_X86_32
  5. # define PA_CONTROL_PAGE 0
  6. # define VA_CONTROL_PAGE 1
  7. # define PA_PGD 2
  8. # define PA_SWAP_PAGE 3
  9. # define PAGES_NR 4
  10. #else
  11. # define PA_CONTROL_PAGE 0
  12. # define VA_CONTROL_PAGE 1
  13. # define PA_TABLE_PAGE 2
  14. # define PA_SWAP_PAGE 3
  15. # define PAGES_NR 4
  16. #endif
  17. # define KEXEC_CONTROL_CODE_MAX_SIZE 2048
  18. #ifndef __ASSEMBLY__
  19. #include <linux/string.h>
  20. #include <linux/kernel.h>
  21. #include <asm/page.h>
  22. #include <asm/ptrace.h>
  23. #include <asm/bootparam.h>
  24. struct kimage;
  25. /*
  26. * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
  27. * I.e. Maximum page that is mapped directly into kernel memory,
  28. * and kmap is not required.
  29. *
  30. * So far x86_64 is limited to 40 physical address bits.
  31. */
  32. #ifdef CONFIG_X86_32
  33. /* Maximum physical address we can use pages from */
  34. # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
  35. /* Maximum address we can reach in physical address mode */
  36. # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
  37. /* Maximum address we can use for the control code buffer */
  38. # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
  39. # define KEXEC_CONTROL_PAGE_SIZE 4096
  40. /* The native architecture */
  41. # define KEXEC_ARCH KEXEC_ARCH_386
  42. /* We can also handle crash dumps from 64 bit kernel. */
  43. # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
  44. #else
  45. /* Maximum physical address we can use pages from */
  46. # define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
  47. /* Maximum address we can reach in physical address mode */
  48. # define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
  49. /* Maximum address we can use for the control pages */
  50. # define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
  51. /* Allocate one page for the pdp and the second for the code */
  52. # define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
  53. /* The native architecture */
  54. # define KEXEC_ARCH KEXEC_ARCH_X86_64
  55. #endif
  56. /*
  57. * This function is responsible for capturing register states if coming
  58. * via panic otherwise just fix up the ss and sp if coming via kernel
  59. * mode exception.
  60. */
  61. static inline void crash_setup_regs(struct pt_regs *newregs,
  62. struct pt_regs *oldregs)
  63. {
  64. if (oldregs) {
  65. memcpy(newregs, oldregs, sizeof(*newregs));
  66. } else {
  67. #ifdef CONFIG_X86_32
  68. asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
  69. asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
  70. asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
  71. asm volatile("movl %%esi,%0" : "=m"(newregs->si));
  72. asm volatile("movl %%edi,%0" : "=m"(newregs->di));
  73. asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
  74. asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
  75. asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
  76. asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
  77. asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
  78. asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
  79. asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
  80. asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
  81. #else
  82. asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
  83. asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
  84. asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
  85. asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
  86. asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
  87. asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
  88. asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
  89. asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
  90. asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
  91. asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
  92. asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
  93. asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
  94. asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
  95. asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
  96. asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
  97. asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
  98. asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
  99. asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
  100. asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
  101. #endif
  102. newregs->ip = _THIS_IP_;
  103. }
  104. }
  105. #ifdef CONFIG_X86_32
  106. asmlinkage unsigned long
  107. relocate_kernel(unsigned long indirection_page,
  108. unsigned long control_page,
  109. unsigned long start_address,
  110. unsigned int has_pae,
  111. unsigned int preserve_context);
  112. #else
  113. unsigned long
  114. relocate_kernel(unsigned long indirection_page,
  115. unsigned long page_list,
  116. unsigned long start_address,
  117. unsigned int preserve_context,
  118. unsigned int host_mem_enc_active);
  119. #endif
  120. #define ARCH_HAS_KIMAGE_ARCH
  121. #ifdef CONFIG_X86_32
  122. struct kimage_arch {
  123. pgd_t *pgd;
  124. #ifdef CONFIG_X86_PAE
  125. pmd_t *pmd0;
  126. pmd_t *pmd1;
  127. #endif
  128. pte_t *pte0;
  129. pte_t *pte1;
  130. };
  131. #else
  132. struct kimage_arch {
  133. p4d_t *p4d;
  134. pud_t *pud;
  135. pmd_t *pmd;
  136. pte_t *pte;
  137. };
  138. #endif /* CONFIG_X86_32 */
  139. #ifdef CONFIG_X86_64
  140. /*
  141. * Number of elements and order of elements in this structure should match
  142. * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
  143. * make an appropriate change in purgatory too.
  144. */
  145. struct kexec_entry64_regs {
  146. uint64_t rax;
  147. uint64_t rcx;
  148. uint64_t rdx;
  149. uint64_t rbx;
  150. uint64_t rsp;
  151. uint64_t rbp;
  152. uint64_t rsi;
  153. uint64_t rdi;
  154. uint64_t r8;
  155. uint64_t r9;
  156. uint64_t r10;
  157. uint64_t r11;
  158. uint64_t r12;
  159. uint64_t r13;
  160. uint64_t r14;
  161. uint64_t r15;
  162. uint64_t rip;
  163. };
  164. extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
  165. gfp_t gfp);
  166. #define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
  167. extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
  168. #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
  169. void arch_kexec_protect_crashkres(void);
  170. #define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
  171. void arch_kexec_unprotect_crashkres(void);
  172. #define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
  173. #ifdef CONFIG_KEXEC_FILE
  174. struct purgatory_info;
  175. int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
  176. Elf_Shdr *section,
  177. const Elf_Shdr *relsec,
  178. const Elf_Shdr *symtab);
  179. #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
  180. void *arch_kexec_kernel_image_load(struct kimage *image);
  181. #define arch_kexec_kernel_image_load arch_kexec_kernel_image_load
  182. int arch_kimage_file_post_load_cleanup(struct kimage *image);
  183. #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
  184. #endif
  185. #endif
  186. extern void kdump_nmi_shootdown_cpus(void);
  187. #endif /* __ASSEMBLY__ */
  188. #endif /* _ASM_X86_KEXEC_H */