mem_encrypt_boot.S 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * AMD Memory Encryption Support
  4. *
  5. * Copyright (C) 2016 Advanced Micro Devices, Inc.
  6. *
  7. * Author: Tom Lendacky <[email protected]>
  8. */
  9. #include <linux/linkage.h>
  10. #include <linux/pgtable.h>
  11. #include <asm/page.h>
  12. #include <asm/processor-flags.h>
  13. #include <asm/msr-index.h>
  14. #include <asm/nospec-branch.h>
  15. .text
  16. .code64
  17. SYM_FUNC_START(sme_encrypt_execute)
  18. /*
  19. * Entry parameters:
  20. * RDI - virtual address for the encrypted mapping
  21. * RSI - virtual address for the decrypted mapping
  22. * RDX - length to encrypt
  23. * RCX - virtual address of the encryption workarea, including:
  24. * - stack page (PAGE_SIZE)
  25. * - encryption routine page (PAGE_SIZE)
  26. * - intermediate copy buffer (PMD_PAGE_SIZE)
  27. * R8 - physical address of the pagetables to use for encryption
  28. */
  29. push %rbp
  30. movq %rsp, %rbp /* RBP now has original stack pointer */
  31. /* Set up a one page stack in the non-encrypted memory area */
  32. movq %rcx, %rax /* Workarea stack page */
  33. leaq PAGE_SIZE(%rax), %rsp /* Set new stack pointer */
  34. addq $PAGE_SIZE, %rax /* Workarea encryption routine */
  35. push %r12
  36. movq %rdi, %r10 /* Encrypted area */
  37. movq %rsi, %r11 /* Decrypted area */
  38. movq %rdx, %r12 /* Area length */
  39. /* Copy encryption routine into the workarea */
  40. movq %rax, %rdi /* Workarea encryption routine */
  41. leaq __enc_copy(%rip), %rsi /* Encryption routine */
  42. movq $(.L__enc_copy_end - __enc_copy), %rcx /* Encryption routine length */
  43. rep movsb
  44. /* Setup registers for call */
  45. movq %r10, %rdi /* Encrypted area */
  46. movq %r11, %rsi /* Decrypted area */
  47. movq %r8, %rdx /* Pagetables used for encryption */
  48. movq %r12, %rcx /* Area length */
  49. movq %rax, %r8 /* Workarea encryption routine */
  50. addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
  51. ANNOTATE_RETPOLINE_SAFE
  52. call *%rax /* Call the encryption routine */
  53. pop %r12
  54. movq %rbp, %rsp /* Restore original stack pointer */
  55. pop %rbp
  56. /* Offset to __x86_return_thunk would be wrong here */
  57. ANNOTATE_UNRET_SAFE
  58. ret
  59. int3
  60. SYM_FUNC_END(sme_encrypt_execute)
  61. SYM_FUNC_START(__enc_copy)
  62. /*
  63. * Routine used to encrypt memory in place.
  64. * This routine must be run outside of the kernel proper since
  65. * the kernel will be encrypted during the process. So this
  66. * routine is defined here and then copied to an area outside
  67. * of the kernel where it will remain and run decrypted
  68. * during execution.
  69. *
  70. * On entry the registers must be:
  71. * RDI - virtual address for the encrypted mapping
  72. * RSI - virtual address for the decrypted mapping
  73. * RDX - address of the pagetables to use for encryption
  74. * RCX - length of area
  75. * R8 - intermediate copy buffer
  76. *
  77. * RAX - points to this routine
  78. *
  79. * The area will be encrypted by copying from the non-encrypted
  80. * memory space to an intermediate buffer and then copying from the
  81. * intermediate buffer back to the encrypted memory space. The physical
  82. * addresses of the two mappings are the same which results in the area
  83. * being encrypted "in place".
  84. */
  85. /* Enable the new page tables */
  86. mov %rdx, %cr3
  87. /* Flush any global TLBs */
  88. mov %cr4, %rdx
  89. andq $~X86_CR4_PGE, %rdx
  90. mov %rdx, %cr4
  91. orq $X86_CR4_PGE, %rdx
  92. mov %rdx, %cr4
  93. push %r15
  94. push %r12
  95. movq %rcx, %r9 /* Save area length */
  96. movq %rdi, %r10 /* Save encrypted area address */
  97. movq %rsi, %r11 /* Save decrypted area address */
  98. /* Set the PAT register PA5 entry to write-protect */
  99. movl $MSR_IA32_CR_PAT, %ecx
  100. rdmsr
  101. mov %rdx, %r15 /* Save original PAT value */
  102. andl $0xffff00ff, %edx /* Clear PA5 */
  103. orl $0x00000500, %edx /* Set PA5 to WP */
  104. wrmsr
  105. wbinvd /* Invalidate any cache entries */
  106. /* Copy/encrypt up to 2MB at a time */
  107. movq $PMD_PAGE_SIZE, %r12
  108. 1:
  109. cmpq %r12, %r9
  110. jnb 2f
  111. movq %r9, %r12
  112. 2:
  113. movq %r11, %rsi /* Source - decrypted area */
  114. movq %r8, %rdi /* Dest - intermediate copy buffer */
  115. movq %r12, %rcx
  116. rep movsb
  117. movq %r8, %rsi /* Source - intermediate copy buffer */
  118. movq %r10, %rdi /* Dest - encrypted area */
  119. movq %r12, %rcx
  120. rep movsb
  121. addq %r12, %r11
  122. addq %r12, %r10
  123. subq %r12, %r9 /* Kernel length decrement */
  124. jnz 1b /* Kernel length not zero? */
  125. /* Restore PAT register */
  126. movl $MSR_IA32_CR_PAT, %ecx
  127. rdmsr
  128. mov %r15, %rdx /* Restore original PAT value */
  129. wrmsr
  130. pop %r12
  131. pop %r15
  132. /* Offset to __x86_return_thunk would be wrong here */
  133. ANNOTATE_UNRET_SAFE
  134. ret
  135. int3
  136. .L__enc_copy_end:
  137. SYM_FUNC_END(__enc_copy)