vsgx.S 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/linkage.h>
  3. #include <asm/export.h>
  4. #include <asm/errno.h>
  5. #include <asm/enclu.h>
  6. #include "extable.h"
  7. /* Relative to %rbp. */
  8. #define SGX_ENCLAVE_OFFSET_OF_RUN 16
  9. /* The offsets relative to struct sgx_enclave_run. */
  10. #define SGX_ENCLAVE_RUN_TCS 0
  11. #define SGX_ENCLAVE_RUN_LEAF 8
  12. #define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR 12
  13. #define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE 14
  14. #define SGX_ENCLAVE_RUN_EXCEPTION_ADDR 16
  15. #define SGX_ENCLAVE_RUN_USER_HANDLER 24
  16. #define SGX_ENCLAVE_RUN_USER_DATA 32 /* not used */
  17. #define SGX_ENCLAVE_RUN_RESERVED_START 40
  18. #define SGX_ENCLAVE_RUN_RESERVED_END 256
  19. .code64
  20. .section .text, "ax"
  21. SYM_FUNC_START(__vdso_sgx_enter_enclave)
  22. /* Prolog */
  23. .cfi_startproc
  24. push %rbp
  25. .cfi_adjust_cfa_offset 8
  26. .cfi_rel_offset %rbp, 0
  27. mov %rsp, %rbp
  28. .cfi_def_cfa_register %rbp
  29. push %rbx
  30. .cfi_rel_offset %rbx, -8
  31. mov %ecx, %eax
  32. .Lenter_enclave:
  33. /* EENTER <= function <= ERESUME */
  34. cmp $EENTER, %eax
  35. jb .Linvalid_input
  36. cmp $ERESUME, %eax
  37. ja .Linvalid_input
  38. mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
  39. /* Validate that the reserved area contains only zeros. */
  40. mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx
  41. 1:
  42. cmpq $0, (%rcx, %rbx)
  43. jne .Linvalid_input
  44. add $8, %rbx
  45. cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx
  46. jne 1b
  47. /* Load TCS and AEP */
  48. mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
  49. lea .Lasync_exit_pointer(%rip), %rcx
  50. /* Single ENCLU serving as both EENTER and AEP (ERESUME) */
  51. .Lasync_exit_pointer:
  52. .Lenclu_eenter_eresume:
  53. enclu
  54. /* EEXIT jumps here unless the enclave is doing something fancy. */
  55. mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
  56. /* Set exit_reason. */
  57. movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
  58. /* Invoke userspace's exit handler if one was provided. */
  59. .Lhandle_exit:
  60. cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
  61. jne .Linvoke_userspace_handler
  62. /* Success, in the sense that ENCLU was attempted. */
  63. xor %eax, %eax
  64. .Lout:
  65. pop %rbx
  66. leave
  67. .cfi_def_cfa %rsp, 8
  68. RET
  69. /* The out-of-line code runs with the pre-leave stack frame. */
  70. .cfi_def_cfa %rbp, 16
  71. .Linvalid_input:
  72. mov $(-EINVAL), %eax
  73. jmp .Lout
  74. .Lhandle_exception:
  75. mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
  76. /* Set the exception info. */
  77. mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
  78. mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
  79. mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
  80. mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
  81. jmp .Lhandle_exit
  82. .Linvoke_userspace_handler:
  83. /* Pass the untrusted RSP (at exit) to the callback via %rcx. */
  84. mov %rsp, %rcx
  85. /* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
  86. mov %rbx, %rax
  87. /* Save the untrusted RSP offset in %rbx (non-volatile register). */
  88. mov %rsp, %rbx
  89. and $0xf, %rbx
  90. /*
  91. * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
  92. * _after_ pushing the parameters on the stack, hence the bonus push.
  93. */
  94. and $-0x10, %rsp
  95. push %rax
  96. /* Push struct sgx_enclave_exception as a param to the callback. */
  97. push %rax
  98. /* Clear RFLAGS.DF per x86_64 ABI */
  99. cld
  100. /*
  101. * Load the callback pointer to %rax and lfence for LVI (load value
  102. * injection) protection before making the call.
  103. */
  104. mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
  105. lfence
  106. call *%rax
  107. /* Undo the post-exit %rsp adjustment. */
  108. lea 0x10(%rsp, %rbx), %rsp
  109. /*
  110. * If the return from callback is zero or negative, return immediately,
  111. * else re-execute ENCLU with the positive return value interpreted as
  112. * the requested ENCLU function.
  113. */
  114. cmp $0, %eax
  115. jle .Lout
  116. jmp .Lenter_enclave
  117. .cfi_endproc
  118. _ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
  119. SYM_FUNC_END(__vdso_sgx_enter_enclave)