xen-asm.S 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Asm versions of Xen pv-ops, suitable for direct use.
  4. *
  5. * We only bother with direct forms (ie, vcpu in percpu data) of the
  6. * operations here; the indirect forms are better handled in C.
  7. */
  8. #include <asm/errno.h>
  9. #include <asm/asm-offsets.h>
  10. #include <asm/percpu.h>
  11. #include <asm/processor-flags.h>
  12. #include <asm/segment.h>
  13. #include <asm/thread_info.h>
  14. #include <asm/asm.h>
  15. #include <asm/frame.h>
  16. #include <asm/unwind_hints.h>
  17. #include <xen/interface/xen.h>
  18. #include <linux/init.h>
  19. #include <linux/linkage.h>
  20. #include <../entry/calling.h>
  21. .pushsection .noinstr.text, "ax"
  22. /*
  23. * Disabling events is simply a matter of making the event mask
  24. * non-zero.
  25. */
  26. SYM_FUNC_START(xen_irq_disable_direct)
  27. movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  28. RET
  29. SYM_FUNC_END(xen_irq_disable_direct)
  30. /*
  31. * Force an event check by making a hypercall, but preserve regs
  32. * before making the call.
  33. */
  34. SYM_FUNC_START(check_events)
  35. FRAME_BEGIN
  36. push %rax
  37. push %rcx
  38. push %rdx
  39. push %rsi
  40. push %rdi
  41. push %r8
  42. push %r9
  43. push %r10
  44. push %r11
  45. call xen_force_evtchn_callback
  46. pop %r11
  47. pop %r10
  48. pop %r9
  49. pop %r8
  50. pop %rdi
  51. pop %rsi
  52. pop %rdx
  53. pop %rcx
  54. pop %rax
  55. FRAME_END
  56. RET
  57. SYM_FUNC_END(check_events)
  58. /*
  59. * Enable events. This clears the event mask and tests the pending
  60. * event status with one and operation. If there are pending events,
  61. * then enter the hypervisor to get them handled.
  62. */
  63. SYM_FUNC_START(xen_irq_enable_direct)
  64. FRAME_BEGIN
  65. /* Unmask events */
  66. movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  67. /*
  68. * Preempt here doesn't matter because that will deal with any
  69. * pending interrupts. The pending check may end up being run
  70. * on the wrong CPU, but that doesn't hurt.
  71. */
  72. /* Test for pending */
  73. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  74. jz 1f
  75. call check_events
  76. 1:
  77. FRAME_END
  78. RET
  79. SYM_FUNC_END(xen_irq_enable_direct)
  80. /*
  81. * (xen_)save_fl is used to get the current interrupt enable status.
  82. * Callers expect the status to be in X86_EFLAGS_IF, and other bits
  83. * may be set in the return value. We take advantage of this by
  84. * making sure that X86_EFLAGS_IF has the right value (and other bits
  85. * in that byte are 0), but other bits in the return value are
  86. * undefined. We need to toggle the state of the bit, because Xen and
  87. * x86 use opposite senses (mask vs enable).
  88. */
  89. SYM_FUNC_START(xen_save_fl_direct)
  90. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  91. setz %ah
  92. addb %ah, %ah
  93. RET
  94. SYM_FUNC_END(xen_save_fl_direct)
  95. SYM_FUNC_START(xen_read_cr2)
  96. FRAME_BEGIN
  97. _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
  98. _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
  99. FRAME_END
  100. RET
  101. SYM_FUNC_END(xen_read_cr2);
  102. SYM_FUNC_START(xen_read_cr2_direct)
  103. FRAME_BEGIN
  104. _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
  105. FRAME_END
  106. RET
  107. SYM_FUNC_END(xen_read_cr2_direct);
  108. .popsection
  109. .macro xen_pv_trap name
  110. SYM_CODE_START(xen_\name)
  111. UNWIND_HINT_ENTRY
  112. ENDBR
  113. pop %rcx
  114. pop %r11
  115. jmp \name
  116. SYM_CODE_END(xen_\name)
  117. _ASM_NOKPROBE(xen_\name)
  118. .endm
  119. xen_pv_trap asm_exc_divide_error
  120. xen_pv_trap asm_xenpv_exc_debug
  121. xen_pv_trap asm_exc_int3
  122. xen_pv_trap asm_xenpv_exc_nmi
  123. xen_pv_trap asm_exc_overflow
  124. xen_pv_trap asm_exc_bounds
  125. xen_pv_trap asm_exc_invalid_op
  126. xen_pv_trap asm_exc_device_not_available
  127. xen_pv_trap asm_xenpv_exc_double_fault
  128. xen_pv_trap asm_exc_coproc_segment_overrun
  129. xen_pv_trap asm_exc_invalid_tss
  130. xen_pv_trap asm_exc_segment_not_present
  131. xen_pv_trap asm_exc_stack_segment
  132. xen_pv_trap asm_exc_general_protection
  133. xen_pv_trap asm_exc_page_fault
  134. xen_pv_trap asm_exc_spurious_interrupt_bug
  135. xen_pv_trap asm_exc_coprocessor_error
  136. xen_pv_trap asm_exc_alignment_check
  137. #ifdef CONFIG_X86_KERNEL_IBT
  138. xen_pv_trap asm_exc_control_protection
  139. #endif
  140. #ifdef CONFIG_X86_MCE
  141. xen_pv_trap asm_xenpv_exc_machine_check
  142. #endif /* CONFIG_X86_MCE */
  143. xen_pv_trap asm_exc_simd_coprocessor_error
  144. #ifdef CONFIG_IA32_EMULATION
  145. xen_pv_trap asm_int80_emulation
  146. #endif
  147. xen_pv_trap asm_exc_xen_unknown_trap
  148. xen_pv_trap asm_exc_xen_hypervisor_callback
  149. __INIT
  150. SYM_CODE_START(xen_early_idt_handler_array)
  151. i = 0
  152. .rept NUM_EXCEPTION_VECTORS
  153. UNWIND_HINT_EMPTY
  154. ENDBR
  155. pop %rcx
  156. pop %r11
  157. jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
  158. i = i + 1
  159. .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
  160. .endr
  161. SYM_CODE_END(xen_early_idt_handler_array)
  162. __FINIT
  163. hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
  164. /*
  165. * Xen64 iret frame:
  166. *
  167. * ss
  168. * rsp
  169. * rflags
  170. * cs
  171. * rip <-- standard iret frame
  172. *
  173. * flags
  174. *
  175. * rcx }
  176. * r11 }<-- pushed by hypercall page
  177. * rsp->rax }
  178. */
  179. SYM_CODE_START(xen_iret)
  180. UNWIND_HINT_EMPTY
  181. ANNOTATE_NOENDBR
  182. pushq $0
  183. jmp hypercall_iret
  184. SYM_CODE_END(xen_iret)
  185. /*
  186. * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
  187. * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
  188. * in XEN pv would cause %rsp to move up to the top of the kernel stack and
  189. * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
  190. * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
  191. * frame at the same address is useless.
  192. */
  193. SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
  194. UNWIND_HINT_REGS
  195. POP_REGS
  196. /* stackleak_erase() can work safely on the kernel stack. */
  197. STACKLEAK_ERASE_NOCLOBBER
  198. addq $8, %rsp /* skip regs->orig_ax */
  199. jmp xen_iret
  200. SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
  201. /*
  202. * Xen handles syscall callbacks much like ordinary exceptions, which
  203. * means we have:
  204. * - kernel gs
  205. * - kernel rsp
  206. * - an iret-like stack frame on the stack (including rcx and r11):
  207. * ss
  208. * rsp
  209. * rflags
  210. * cs
  211. * rip
  212. * r11
  213. * rsp->rcx
  214. */
  215. /* Normal 64-bit system call target */
  216. SYM_CODE_START(xen_entry_SYSCALL_64)
  217. UNWIND_HINT_ENTRY
  218. ENDBR
  219. popq %rcx
  220. popq %r11
  221. /*
  222. * Neither Xen nor the kernel really knows what the old SS and
  223. * CS were. The kernel expects __USER_DS and __USER_CS, so
  224. * report those values even though Xen will guess its own values.
  225. */
  226. movq $__USER_DS, 4*8(%rsp)
  227. movq $__USER_CS, 1*8(%rsp)
  228. jmp entry_SYSCALL_64_after_hwframe
  229. SYM_CODE_END(xen_entry_SYSCALL_64)
  230. #ifdef CONFIG_IA32_EMULATION
  231. /* 32-bit compat syscall target */
  232. SYM_CODE_START(xen_entry_SYSCALL_compat)
  233. UNWIND_HINT_ENTRY
  234. ENDBR
  235. popq %rcx
  236. popq %r11
  237. /*
  238. * Neither Xen nor the kernel really knows what the old SS and
  239. * CS were. The kernel expects __USER32_DS and __USER32_CS, so
  240. * report those values even though Xen will guess its own values.
  241. */
  242. movq $__USER32_DS, 4*8(%rsp)
  243. movq $__USER32_CS, 1*8(%rsp)
  244. jmp entry_SYSCALL_compat_after_hwframe
  245. SYM_CODE_END(xen_entry_SYSCALL_compat)
  246. /* 32-bit compat sysenter target */
  247. SYM_CODE_START(xen_entry_SYSENTER_compat)
  248. UNWIND_HINT_ENTRY
  249. ENDBR
  250. /*
  251. * NB: Xen is polite and clears TF from EFLAGS for us. This means
  252. * that we don't need to guard against single step exceptions here.
  253. */
  254. popq %rcx
  255. popq %r11
  256. /*
  257. * Neither Xen nor the kernel really knows what the old SS and
  258. * CS were. The kernel expects __USER32_DS and __USER32_CS, so
  259. * report those values even though Xen will guess its own values.
  260. */
  261. movq $__USER32_DS, 4*8(%rsp)
  262. movq $__USER32_CS, 1*8(%rsp)
  263. jmp entry_SYSENTER_compat_after_hwframe
  264. SYM_CODE_END(xen_entry_SYSENTER_compat)
  265. #else /* !CONFIG_IA32_EMULATION */
  266. SYM_CODE_START(xen_entry_SYSCALL_compat)
  267. SYM_CODE_START(xen_entry_SYSENTER_compat)
  268. UNWIND_HINT_ENTRY
  269. ENDBR
  270. lea 16(%rsp), %rsp /* strip %rcx, %r11 */
  271. mov $-ENOSYS, %rax
  272. pushq $0
  273. jmp hypercall_iret
  274. SYM_CODE_END(xen_entry_SYSENTER_compat)
  275. SYM_CODE_END(xen_entry_SYSCALL_compat)
  276. #endif /* CONFIG_IA32_EMULATION */