mem_encrypt_identity.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * AMD Memory Encryption Support
  4. *
  5. * Copyright (C) 2016 Advanced Micro Devices, Inc.
  6. *
  7. * Author: Tom Lendacky <[email protected]>
  8. */
  9. #define DISABLE_BRANCH_PROFILING
  10. /*
  11. * Since we're dealing with identity mappings, physical and virtual
  12. * addresses are the same, so override these defines which are ultimately
  13. * used by the headers in misc.h.
  14. */
  15. #define __pa(x) ((unsigned long)(x))
  16. #define __va(x) ((void *)((unsigned long)(x)))
  17. /*
  18. * Special hack: we have to be careful, because no indirections are
  19. * allowed here, and paravirt_ops is a kind of one. As it will only run in
  20. * baremetal anyway, we just keep it from happening. (This list needs to
  21. * be extended when new paravirt and debugging variants are added.)
  22. */
  23. #undef CONFIG_PARAVIRT
  24. #undef CONFIG_PARAVIRT_XXL
  25. #undef CONFIG_PARAVIRT_SPINLOCKS
  26. /*
  27. * This code runs before CPU feature bits are set. By default, the
  28. * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if
  29. * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5
  30. * is provided to handle this situation and, instead, use a variable that
  31. * has been set by the early boot code.
  32. */
  33. #define USE_EARLY_PGTABLE_L5
  34. #include <linux/kernel.h>
  35. #include <linux/mm.h>
  36. #include <linux/mem_encrypt.h>
  37. #include <linux/cc_platform.h>
  38. #include <asm/setup.h>
  39. #include <asm/sections.h>
  40. #include <asm/cmdline.h>
  41. #include <asm/coco.h>
  42. #include <asm/sev.h>
  43. #include "mm_internal.h"
  44. #define PGD_FLAGS _KERNPG_TABLE_NOENC
  45. #define P4D_FLAGS _KERNPG_TABLE_NOENC
  46. #define PUD_FLAGS _KERNPG_TABLE_NOENC
  47. #define PMD_FLAGS _KERNPG_TABLE_NOENC
  48. #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
  49. #define PMD_FLAGS_DEC PMD_FLAGS_LARGE
  50. #define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
  51. (_PAGE_PAT_LARGE | _PAGE_PWT))
  52. #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
  53. #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
  54. #define PTE_FLAGS_DEC PTE_FLAGS
  55. #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
  56. (_PAGE_PAT | _PAGE_PWT))
  57. #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
  58. struct sme_populate_pgd_data {
  59. void *pgtable_area;
  60. pgd_t *pgd;
  61. pmdval_t pmd_flags;
  62. pteval_t pte_flags;
  63. unsigned long paddr;
  64. unsigned long vaddr;
  65. unsigned long vaddr_end;
  66. };
  67. /*
  68. * This work area lives in the .init.scratch section, which lives outside of
  69. * the kernel proper. It is sized to hold the intermediate copy buffer and
  70. * more than enough pagetable pages.
  71. *
  72. * By using this section, the kernel can be encrypted in place and it
  73. * avoids any possibility of boot parameters or initramfs images being
  74. * placed such that the in-place encryption logic overwrites them. This
  75. * section is 2MB aligned to allow for simple pagetable setup using only
  76. * PMD entries (see vmlinux.lds.S).
  77. */
  78. static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch");
  79. static char sme_cmdline_arg[] __initdata = "mem_encrypt";
  80. static char sme_cmdline_on[] __initdata = "on";
  81. static char sme_cmdline_off[] __initdata = "off";
  82. static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
  83. {
  84. unsigned long pgd_start, pgd_end, pgd_size;
  85. pgd_t *pgd_p;
  86. pgd_start = ppd->vaddr & PGDIR_MASK;
  87. pgd_end = ppd->vaddr_end & PGDIR_MASK;
  88. pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
  89. pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
  90. memset(pgd_p, 0, pgd_size);
  91. }
  92. static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
  93. {
  94. pgd_t *pgd;
  95. p4d_t *p4d;
  96. pud_t *pud;
  97. pmd_t *pmd;
  98. pgd = ppd->pgd + pgd_index(ppd->vaddr);
  99. if (pgd_none(*pgd)) {
  100. p4d = ppd->pgtable_area;
  101. memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
  102. ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
  103. set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
  104. }
  105. p4d = p4d_offset(pgd, ppd->vaddr);
  106. if (p4d_none(*p4d)) {
  107. pud = ppd->pgtable_area;
  108. memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
  109. ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
  110. set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
  111. }
  112. pud = pud_offset(p4d, ppd->vaddr);
  113. if (pud_none(*pud)) {
  114. pmd = ppd->pgtable_area;
  115. memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
  116. ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
  117. set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
  118. }
  119. if (pud_large(*pud))
  120. return NULL;
  121. return pud;
  122. }
  123. static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
  124. {
  125. pud_t *pud;
  126. pmd_t *pmd;
  127. pud = sme_prepare_pgd(ppd);
  128. if (!pud)
  129. return;
  130. pmd = pmd_offset(pud, ppd->vaddr);
  131. if (pmd_large(*pmd))
  132. return;
  133. set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
  134. }
  135. static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
  136. {
  137. pud_t *pud;
  138. pmd_t *pmd;
  139. pte_t *pte;
  140. pud = sme_prepare_pgd(ppd);
  141. if (!pud)
  142. return;
  143. pmd = pmd_offset(pud, ppd->vaddr);
  144. if (pmd_none(*pmd)) {
  145. pte = ppd->pgtable_area;
  146. memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
  147. ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
  148. set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
  149. }
  150. if (pmd_large(*pmd))
  151. return;
  152. pte = pte_offset_map(pmd, ppd->vaddr);
  153. if (pte_none(*pte))
  154. set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
  155. }
  156. static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
  157. {
  158. while (ppd->vaddr < ppd->vaddr_end) {
  159. sme_populate_pgd_large(ppd);
  160. ppd->vaddr += PMD_PAGE_SIZE;
  161. ppd->paddr += PMD_PAGE_SIZE;
  162. }
  163. }
  164. static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
  165. {
  166. while (ppd->vaddr < ppd->vaddr_end) {
  167. sme_populate_pgd(ppd);
  168. ppd->vaddr += PAGE_SIZE;
  169. ppd->paddr += PAGE_SIZE;
  170. }
  171. }
  172. static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
  173. pmdval_t pmd_flags, pteval_t pte_flags)
  174. {
  175. unsigned long vaddr_end;
  176. ppd->pmd_flags = pmd_flags;
  177. ppd->pte_flags = pte_flags;
  178. /* Save original end value since we modify the struct value */
  179. vaddr_end = ppd->vaddr_end;
  180. /* If start is not 2MB aligned, create PTE entries */
  181. ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
  182. __sme_map_range_pte(ppd);
  183. /* Create PMD entries */
  184. ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
  185. __sme_map_range_pmd(ppd);
  186. /* If end is not 2MB aligned, create PTE entries */
  187. ppd->vaddr_end = vaddr_end;
  188. __sme_map_range_pte(ppd);
  189. }
  190. static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
  191. {
  192. __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
  193. }
  194. static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
  195. {
  196. __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
  197. }
  198. static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
  199. {
  200. __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
  201. }
  202. static unsigned long __init sme_pgtable_calc(unsigned long len)
  203. {
  204. unsigned long entries = 0, tables = 0;
  205. /*
  206. * Perform a relatively simplistic calculation of the pagetable
  207. * entries that are needed. Those mappings will be covered mostly
  208. * by 2MB PMD entries so we can conservatively calculate the required
  209. * number of P4D, PUD and PMD structures needed to perform the
  210. * mappings. For mappings that are not 2MB aligned, PTE mappings
  211. * would be needed for the start and end portion of the address range
  212. * that fall outside of the 2MB alignment. This results in, at most,
  213. * two extra pages to hold PTE entries for each range that is mapped.
  214. * Incrementing the count for each covers the case where the addresses
  215. * cross entries.
  216. */
  217. /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
  218. if (PTRS_PER_P4D > 1)
  219. entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
  220. entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
  221. entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
  222. entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
  223. /*
  224. * Now calculate the added pagetable structures needed to populate
  225. * the new pagetables.
  226. */
  227. if (PTRS_PER_P4D > 1)
  228. tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
  229. tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
  230. tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
  231. return entries + tables;
  232. }
  233. void __init sme_encrypt_kernel(struct boot_params *bp)
  234. {
  235. unsigned long workarea_start, workarea_end, workarea_len;
  236. unsigned long execute_start, execute_end, execute_len;
  237. unsigned long kernel_start, kernel_end, kernel_len;
  238. unsigned long initrd_start, initrd_end, initrd_len;
  239. struct sme_populate_pgd_data ppd;
  240. unsigned long pgtable_area_len;
  241. unsigned long decrypted_base;
  242. /*
  243. * This is early code, use an open coded check for SME instead of
  244. * using cc_platform_has(). This eliminates worries about removing
  245. * instrumentation or checking boot_cpu_data in the cc_platform_has()
  246. * function.
  247. */
  248. if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
  249. return;
  250. /*
  251. * Prepare for encrypting the kernel and initrd by building new
  252. * pagetables with the necessary attributes needed to encrypt the
  253. * kernel in place.
  254. *
  255. * One range of virtual addresses will map the memory occupied
  256. * by the kernel and initrd as encrypted.
  257. *
  258. * Another range of virtual addresses will map the memory occupied
  259. * by the kernel and initrd as decrypted and write-protected.
  260. *
  261. * The use of write-protect attribute will prevent any of the
  262. * memory from being cached.
  263. */
  264. /* Physical addresses gives us the identity mapped virtual addresses */
  265. kernel_start = __pa_symbol(_text);
  266. kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
  267. kernel_len = kernel_end - kernel_start;
  268. initrd_start = 0;
  269. initrd_end = 0;
  270. initrd_len = 0;
  271. #ifdef CONFIG_BLK_DEV_INITRD
  272. initrd_len = (unsigned long)bp->hdr.ramdisk_size |
  273. ((unsigned long)bp->ext_ramdisk_size << 32);
  274. if (initrd_len) {
  275. initrd_start = (unsigned long)bp->hdr.ramdisk_image |
  276. ((unsigned long)bp->ext_ramdisk_image << 32);
  277. initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
  278. initrd_len = initrd_end - initrd_start;
  279. }
  280. #endif
  281. /*
  282. * We're running identity mapped, so we must obtain the address to the
  283. * SME encryption workarea using rip-relative addressing.
  284. */
  285. asm ("lea sme_workarea(%%rip), %0"
  286. : "=r" (workarea_start)
  287. : "p" (sme_workarea));
  288. /*
  289. * Calculate required number of workarea bytes needed:
  290. * executable encryption area size:
  291. * stack page (PAGE_SIZE)
  292. * encryption routine page (PAGE_SIZE)
  293. * intermediate copy buffer (PMD_PAGE_SIZE)
  294. * pagetable structures for the encryption of the kernel
  295. * pagetable structures for workarea (in case not currently mapped)
  296. */
  297. execute_start = workarea_start;
  298. execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
  299. execute_len = execute_end - execute_start;
  300. /*
  301. * One PGD for both encrypted and decrypted mappings and a set of
  302. * PUDs and PMDs for each of the encrypted and decrypted mappings.
  303. */
  304. pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
  305. pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
  306. if (initrd_len)
  307. pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
  308. /* PUDs and PMDs needed in the current pagetables for the workarea */
  309. pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
  310. /*
  311. * The total workarea includes the executable encryption area and
  312. * the pagetable area. The start of the workarea is already 2MB
  313. * aligned, align the end of the workarea on a 2MB boundary so that
  314. * we don't try to create/allocate PTE entries from the workarea
  315. * before it is mapped.
  316. */
  317. workarea_len = execute_len + pgtable_area_len;
  318. workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
  319. /*
  320. * Set the address to the start of where newly created pagetable
  321. * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
  322. * structures are created when the workarea is added to the current
  323. * pagetables and when the new encrypted and decrypted kernel
  324. * mappings are populated.
  325. */
  326. ppd.pgtable_area = (void *)execute_end;
  327. /*
  328. * Make sure the current pagetable structure has entries for
  329. * addressing the workarea.
  330. */
  331. ppd.pgd = (pgd_t *)native_read_cr3_pa();
  332. ppd.paddr = workarea_start;
  333. ppd.vaddr = workarea_start;
  334. ppd.vaddr_end = workarea_end;
  335. sme_map_range_decrypted(&ppd);
  336. /* Flush the TLB - no globals so cr3 is enough */
  337. native_write_cr3(__native_read_cr3());
  338. /*
  339. * A new pagetable structure is being built to allow for the kernel
  340. * and initrd to be encrypted. It starts with an empty PGD that will
  341. * then be populated with new PUDs and PMDs as the encrypted and
  342. * decrypted kernel mappings are created.
  343. */
  344. ppd.pgd = ppd.pgtable_area;
  345. memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
  346. ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
  347. /*
  348. * A different PGD index/entry must be used to get different
  349. * pagetable entries for the decrypted mapping. Choose the next
  350. * PGD index and convert it to a virtual address to be used as
  351. * the base of the mapping.
  352. */
  353. decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
  354. if (initrd_len) {
  355. unsigned long check_base;
  356. check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
  357. decrypted_base = max(decrypted_base, check_base);
  358. }
  359. decrypted_base <<= PGDIR_SHIFT;
  360. /* Add encrypted kernel (identity) mappings */
  361. ppd.paddr = kernel_start;
  362. ppd.vaddr = kernel_start;
  363. ppd.vaddr_end = kernel_end;
  364. sme_map_range_encrypted(&ppd);
  365. /* Add decrypted, write-protected kernel (non-identity) mappings */
  366. ppd.paddr = kernel_start;
  367. ppd.vaddr = kernel_start + decrypted_base;
  368. ppd.vaddr_end = kernel_end + decrypted_base;
  369. sme_map_range_decrypted_wp(&ppd);
  370. if (initrd_len) {
  371. /* Add encrypted initrd (identity) mappings */
  372. ppd.paddr = initrd_start;
  373. ppd.vaddr = initrd_start;
  374. ppd.vaddr_end = initrd_end;
  375. sme_map_range_encrypted(&ppd);
  376. /*
  377. * Add decrypted, write-protected initrd (non-identity) mappings
  378. */
  379. ppd.paddr = initrd_start;
  380. ppd.vaddr = initrd_start + decrypted_base;
  381. ppd.vaddr_end = initrd_end + decrypted_base;
  382. sme_map_range_decrypted_wp(&ppd);
  383. }
  384. /* Add decrypted workarea mappings to both kernel mappings */
  385. ppd.paddr = workarea_start;
  386. ppd.vaddr = workarea_start;
  387. ppd.vaddr_end = workarea_end;
  388. sme_map_range_decrypted(&ppd);
  389. ppd.paddr = workarea_start;
  390. ppd.vaddr = workarea_start + decrypted_base;
  391. ppd.vaddr_end = workarea_end + decrypted_base;
  392. sme_map_range_decrypted(&ppd);
  393. /* Perform the encryption */
  394. sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
  395. kernel_len, workarea_start, (unsigned long)ppd.pgd);
  396. if (initrd_len)
  397. sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
  398. initrd_len, workarea_start,
  399. (unsigned long)ppd.pgd);
  400. /*
  401. * At this point we are running encrypted. Remove the mappings for
  402. * the decrypted areas - all that is needed for this is to remove
  403. * the PGD entry/entries.
  404. */
  405. ppd.vaddr = kernel_start + decrypted_base;
  406. ppd.vaddr_end = kernel_end + decrypted_base;
  407. sme_clear_pgd(&ppd);
  408. if (initrd_len) {
  409. ppd.vaddr = initrd_start + decrypted_base;
  410. ppd.vaddr_end = initrd_end + decrypted_base;
  411. sme_clear_pgd(&ppd);
  412. }
  413. ppd.vaddr = workarea_start + decrypted_base;
  414. ppd.vaddr_end = workarea_end + decrypted_base;
  415. sme_clear_pgd(&ppd);
  416. /* Flush the TLB - no globals so cr3 is enough */
  417. native_write_cr3(__native_read_cr3());
  418. }
  419. void __init sme_enable(struct boot_params *bp)
  420. {
  421. const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
  422. unsigned int eax, ebx, ecx, edx;
  423. unsigned long feature_mask;
  424. bool active_by_default;
  425. unsigned long me_mask;
  426. char buffer[16];
  427. bool snp;
  428. u64 msr;
  429. snp = snp_init(bp);
  430. /* Check for the SME/SEV support leaf */
  431. eax = 0x80000000;
  432. ecx = 0;
  433. native_cpuid(&eax, &ebx, &ecx, &edx);
  434. if (eax < 0x8000001f)
  435. return;
  436. #define AMD_SME_BIT BIT(0)
  437. #define AMD_SEV_BIT BIT(1)
  438. /*
  439. * Check for the SME/SEV feature:
  440. * CPUID Fn8000_001F[EAX]
  441. * - Bit 0 - Secure Memory Encryption support
  442. * - Bit 1 - Secure Encrypted Virtualization support
  443. * CPUID Fn8000_001F[EBX]
  444. * - Bits 5:0 - Pagetable bit position used to indicate encryption
  445. */
  446. eax = 0x8000001f;
  447. ecx = 0;
  448. native_cpuid(&eax, &ebx, &ecx, &edx);
  449. /* Check whether SEV or SME is supported */
  450. if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
  451. return;
  452. me_mask = 1UL << (ebx & 0x3f);
  453. /* Check the SEV MSR whether SEV or SME is enabled */
  454. sev_status = __rdmsr(MSR_AMD64_SEV);
  455. feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
  456. /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
  457. if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
  458. snp_abort();
  459. /* Check if memory encryption is enabled */
  460. if (feature_mask == AMD_SME_BIT) {
  461. /*
  462. * No SME if Hypervisor bit is set. This check is here to
  463. * prevent a guest from trying to enable SME. For running as a
  464. * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
  465. * might be other hypervisors which emulate that MSR as non-zero
  466. * or even pass it through to the guest.
  467. * A malicious hypervisor can still trick a guest into this
  468. * path, but there is no way to protect against that.
  469. */
  470. eax = 1;
  471. ecx = 0;
  472. native_cpuid(&eax, &ebx, &ecx, &edx);
  473. if (ecx & BIT(31))
  474. return;
  475. /* For SME, check the SYSCFG MSR */
  476. msr = __rdmsr(MSR_AMD64_SYSCFG);
  477. if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
  478. return;
  479. } else {
  480. /* SEV state cannot be controlled by a command line option */
  481. sme_me_mask = me_mask;
  482. goto out;
  483. }
  484. /*
  485. * Fixups have not been applied to phys_base yet and we're running
  486. * identity mapped, so we must obtain the address to the SME command
  487. * line argument data using rip-relative addressing.
  488. */
  489. asm ("lea sme_cmdline_arg(%%rip), %0"
  490. : "=r" (cmdline_arg)
  491. : "p" (sme_cmdline_arg));
  492. asm ("lea sme_cmdline_on(%%rip), %0"
  493. : "=r" (cmdline_on)
  494. : "p" (sme_cmdline_on));
  495. asm ("lea sme_cmdline_off(%%rip), %0"
  496. : "=r" (cmdline_off)
  497. : "p" (sme_cmdline_off));
  498. if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
  499. active_by_default = true;
  500. else
  501. active_by_default = false;
  502. cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
  503. ((u64)bp->ext_cmd_line_ptr << 32));
  504. if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
  505. return;
  506. if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
  507. sme_me_mask = me_mask;
  508. else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
  509. sme_me_mask = 0;
  510. else
  511. sme_me_mask = active_by_default ? me_mask : 0;
  512. out:
  513. if (sme_me_mask) {
  514. physical_mask &= ~sme_me_mask;
  515. cc_set_vendor(CC_VENDOR_AMD);
  516. cc_set_mask(sme_me_mask);
  517. }
  518. }