trampoline_64.S 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. *
  4. * Trampoline.S Derived from Setup.S by Linus Torvalds
  5. *
  6. * 4 Jan 1997 Michael Chastain: changed to gnu as.
  7. * 15 Sept 2005 Eric Biederman: 64bit PIC support
  8. *
  9. * Entry: CS:IP point to the start of our code, we are
  10. * in real mode with no stack, but the rest of the
  11. * trampoline page to make our stack and everything else
  12. * is a mystery.
  13. *
  14. * On entry to trampoline_start, the processor is in real mode
  15. * with 16-bit addressing and 16-bit data. CS has some value
  16. * and IP is zero. Thus, data addresses need to be absolute
  17. * (no relocation) and are taken with regard to r_base.
  18. *
  19. * With the addition of trampoline_level4_pgt this code can
  20. * now enter a 64bit kernel that lives at arbitrary 64bit
  21. * physical addresses.
  22. *
  23. * If you work on this file, check the object module with objdump
  24. * --full-contents --reloc to make sure there are no relocation
  25. * entries.
  26. */
  27. #include <linux/linkage.h>
  28. #include <asm/pgtable_types.h>
  29. #include <asm/page_types.h>
  30. #include <asm/msr.h>
  31. #include <asm/segment.h>
  32. #include <asm/processor-flags.h>
  33. #include <asm/realmode.h>
  34. #include "realmode.h"
  35. .text
  36. .code16
  37. .balign PAGE_SIZE
  38. SYM_CODE_START(trampoline_start)
  39. cli # We should be safe anyway
  40. wbinvd
  41. LJMPW_RM(1f)
  42. 1:
  43. mov %cs, %ax # Code and data in the same place
  44. mov %ax, %ds
  45. mov %ax, %es
  46. mov %ax, %ss
  47. # Setup stack
  48. movl $rm_stack_end, %esp
  49. call verify_cpu # Verify the cpu supports long mode
  50. testl %eax, %eax # Check for return code
  51. jnz no_longmode
  52. .Lswitch_to_protected:
  53. /*
  54. * GDT tables in non default location kernel can be beyond 16MB and
  55. * lgdt will not be able to load the address as in real mode default
  56. * operand size is 16bit. Use lgdtl instead to force operand size
  57. * to 32 bit.
  58. */
  59. lidtl tr_idt # load idt with 0, 0
  60. lgdtl tr_gdt # load gdt with whatever is appropriate
  61. movw $__KERNEL_DS, %dx # Data segment descriptor
  62. # Enable protected mode
  63. movl $(CR0_STATE & ~X86_CR0_PG), %eax
  64. movl %eax, %cr0 # into protected mode
  65. # flush prefetch and jump to startup_32
  66. ljmpl $__KERNEL32_CS, $pa_startup_32
  67. no_longmode:
  68. hlt
  69. jmp no_longmode
  70. SYM_CODE_END(trampoline_start)
  71. #ifdef CONFIG_AMD_MEM_ENCRYPT
  72. /* SEV-ES supports non-zero IP for entry points - no alignment needed */
  73. SYM_CODE_START(sev_es_trampoline_start)
  74. cli # We should be safe anyway
  75. LJMPW_RM(1f)
  76. 1:
  77. mov %cs, %ax # Code and data in the same place
  78. mov %ax, %ds
  79. mov %ax, %es
  80. mov %ax, %ss
  81. # Setup stack
  82. movl $rm_stack_end, %esp
  83. jmp .Lswitch_to_protected
  84. SYM_CODE_END(sev_es_trampoline_start)
  85. #endif /* CONFIG_AMD_MEM_ENCRYPT */
  86. #include "../kernel/verify_cpu.S"
  87. .section ".text32","ax"
  88. .code32
  89. .balign 4
  90. SYM_CODE_START(startup_32)
  91. movl %edx, %ss
  92. addl $pa_real_mode_base, %esp
  93. movl %edx, %ds
  94. movl %edx, %es
  95. movl %edx, %fs
  96. movl %edx, %gs
  97. /*
  98. * Check for memory encryption support. This is a safety net in
  99. * case BIOS hasn't done the necessary step of setting the bit in
  100. * the MSR for this AP. If SME is active and we've gotten this far
  101. * then it is safe for us to set the MSR bit and continue. If we
  102. * don't we'll eventually crash trying to execute encrypted
  103. * instructions.
  104. */
  105. btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
  106. jnc .Ldone
  107. movl $MSR_AMD64_SYSCFG, %ecx
  108. rdmsr
  109. bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
  110. jc .Ldone
  111. /*
  112. * Memory encryption is enabled but the SME enable bit for this
  113. * CPU has has not been set. It is safe to set it, so do so.
  114. */
  115. wrmsr
  116. .Ldone:
  117. movl pa_tr_cr4, %eax
  118. movl %eax, %cr4 # Enable PAE mode
  119. # Setup trampoline 4 level pagetables
  120. movl $pa_trampoline_pgd, %eax
  121. movl %eax, %cr3
  122. # Set up EFER
  123. movl $MSR_EFER, %ecx
  124. rdmsr
  125. /*
  126. * Skip writing to EFER if the register already has desired
  127. * value (to avoid #VE for the TDX guest).
  128. */
  129. cmp pa_tr_efer, %eax
  130. jne .Lwrite_efer
  131. cmp pa_tr_efer + 4, %edx
  132. je .Ldone_efer
  133. .Lwrite_efer:
  134. movl pa_tr_efer, %eax
  135. movl pa_tr_efer + 4, %edx
  136. wrmsr
  137. .Ldone_efer:
  138. # Enable paging and in turn activate Long Mode.
  139. movl $CR0_STATE, %eax
  140. movl %eax, %cr0
  141. /*
  142. * At this point we're in long mode but in 32bit compatibility mode
  143. * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
  144. * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
  145. * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
  146. */
  147. ljmpl $__KERNEL_CS, $pa_startup_64
  148. SYM_CODE_END(startup_32)
  149. SYM_CODE_START(pa_trampoline_compat)
  150. /*
  151. * In compatibility mode. Prep ESP and DX for startup_32, then disable
  152. * paging and complete the switch to legacy 32-bit mode.
  153. */
  154. movl $rm_stack_end, %esp
  155. movw $__KERNEL_DS, %dx
  156. movl $(CR0_STATE & ~X86_CR0_PG), %eax
  157. movl %eax, %cr0
  158. ljmpl $__KERNEL32_CS, $pa_startup_32
  159. SYM_CODE_END(pa_trampoline_compat)
  160. .section ".text64","ax"
  161. .code64
  162. .balign 4
  163. SYM_CODE_START(startup_64)
  164. # Now jump into the kernel using virtual addresses
  165. jmpq *tr_start(%rip)
  166. SYM_CODE_END(startup_64)
  167. SYM_CODE_START(trampoline_start64)
  168. /*
  169. * APs start here on a direct transfer from 64-bit BIOS with identity
  170. * mapped page tables. Load the kernel's GDT in order to gear down to
  171. * 32-bit mode (to handle 4-level vs. 5-level paging), and to (re)load
  172. * segment registers. Load the zero IDT so any fault triggers a
  173. * shutdown instead of jumping back into BIOS.
  174. */
  175. lidt tr_idt(%rip)
  176. lgdt tr_gdt64(%rip)
  177. ljmpl *tr_compat(%rip)
  178. SYM_CODE_END(trampoline_start64)
  179. .section ".rodata","a"
  180. # Duplicate the global descriptor table
  181. # so the kernel can live anywhere
  182. .balign 16
  183. SYM_DATA_START(tr_gdt)
  184. .short tr_gdt_end - tr_gdt - 1 # gdt limit
  185. .long pa_tr_gdt
  186. .short 0
  187. .quad 0x00cf9b000000ffff # __KERNEL32_CS
  188. .quad 0x00af9b000000ffff # __KERNEL_CS
  189. .quad 0x00cf93000000ffff # __KERNEL_DS
  190. SYM_DATA_END_LABEL(tr_gdt, SYM_L_LOCAL, tr_gdt_end)
  191. SYM_DATA_START(tr_gdt64)
  192. .short tr_gdt_end - tr_gdt - 1 # gdt limit
  193. .long pa_tr_gdt
  194. .long 0
  195. SYM_DATA_END(tr_gdt64)
  196. SYM_DATA_START(tr_compat)
  197. .long pa_trampoline_compat
  198. .short __KERNEL32_CS
  199. SYM_DATA_END(tr_compat)
  200. .bss
  201. .balign PAGE_SIZE
  202. SYM_DATA(trampoline_pgd, .space PAGE_SIZE)
  203. .balign 8
  204. SYM_DATA_START(trampoline_header)
  205. SYM_DATA_LOCAL(tr_start, .space 8)
  206. SYM_DATA(tr_efer, .space 8)
  207. SYM_DATA(tr_cr4, .space 4)
  208. SYM_DATA(tr_flags, .space 4)
  209. SYM_DATA_END(trampoline_header)
  210. #include "trampoline_common.S"