sev.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * AMD Memory Encryption Support
  4. *
  5. * Copyright (C) 2019 SUSE
  6. *
  7. * Author: Joerg Roedel <[email protected]>
  8. */
  9. #define pr_fmt(fmt) "SEV: " fmt
  10. #include <linux/sched/debug.h> /* For show_regs() */
  11. #include <linux/percpu-defs.h>
  12. #include <linux/cc_platform.h>
  13. #include <linux/printk.h>
  14. #include <linux/mm_types.h>
  15. #include <linux/set_memory.h>
  16. #include <linux/memblock.h>
  17. #include <linux/kernel.h>
  18. #include <linux/mm.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/efi.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/io.h>
  23. #include <linux/psp-sev.h>
  24. #include <uapi/linux/sev-guest.h>
  25. #include <asm/cpu_entry_area.h>
  26. #include <asm/stacktrace.h>
  27. #include <asm/sev.h>
  28. #include <asm/insn-eval.h>
  29. #include <asm/fpu/xcr.h>
  30. #include <asm/processor.h>
  31. #include <asm/realmode.h>
  32. #include <asm/setup.h>
  33. #include <asm/traps.h>
  34. #include <asm/svm.h>
  35. #include <asm/smp.h>
  36. #include <asm/cpu.h>
  37. #include <asm/apic.h>
  38. #include <asm/cpuid.h>
  39. #include <asm/cmdline.h>
  40. #define DR7_RESET_VALUE 0x400
  41. /* AP INIT values as documented in the APM2 section "Processor Initialization State" */
  42. #define AP_INIT_CS_LIMIT 0xffff
  43. #define AP_INIT_DS_LIMIT 0xffff
  44. #define AP_INIT_LDTR_LIMIT 0xffff
  45. #define AP_INIT_GDTR_LIMIT 0xffff
  46. #define AP_INIT_IDTR_LIMIT 0xffff
  47. #define AP_INIT_TR_LIMIT 0xffff
  48. #define AP_INIT_RFLAGS_DEFAULT 0x2
  49. #define AP_INIT_DR6_DEFAULT 0xffff0ff0
  50. #define AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL
  51. #define AP_INIT_XCR0_DEFAULT 0x1
  52. #define AP_INIT_X87_FTW_DEFAULT 0x5555
  53. #define AP_INIT_X87_FCW_DEFAULT 0x0040
  54. #define AP_INIT_CR0_DEFAULT 0x60000010
  55. #define AP_INIT_MXCSR_DEFAULT 0x1f80
  56. /* For early boot hypervisor communication in SEV-ES enabled guests */
  57. static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
  58. /*
  59. * Needs to be in the .data section because we need it NULL before bss is
  60. * cleared
  61. */
  62. static struct ghcb *boot_ghcb __section(".data");
  63. /* Bitmap of SEV features supported by the hypervisor */
  64. static u64 sev_hv_features __ro_after_init;
  65. /* #VC handler runtime per-CPU data */
  66. struct sev_es_runtime_data {
  67. struct ghcb ghcb_page;
  68. /*
  69. * Reserve one page per CPU as backup storage for the unencrypted GHCB.
  70. * It is needed when an NMI happens while the #VC handler uses the real
  71. * GHCB, and the NMI handler itself is causing another #VC exception. In
  72. * that case the GHCB content of the first handler needs to be backed up
  73. * and restored.
  74. */
  75. struct ghcb backup_ghcb;
  76. /*
  77. * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
  78. * There is no need for it to be atomic, because nothing is written to
  79. * the GHCB between the read and the write of ghcb_active. So it is safe
  80. * to use it when a nested #VC exception happens before the write.
  81. *
  82. * This is necessary for example in the #VC->NMI->#VC case when the NMI
  83. * happens while the first #VC handler uses the GHCB. When the NMI code
  84. * raises a second #VC handler it might overwrite the contents of the
  85. * GHCB written by the first handler. To avoid this the content of the
  86. * GHCB is saved and restored when the GHCB is detected to be in use
  87. * already.
  88. */
  89. bool ghcb_active;
  90. bool backup_ghcb_active;
  91. /*
  92. * Cached DR7 value - write it on DR7 writes and return it on reads.
  93. * That value will never make it to the real hardware DR7 as debugging
  94. * is currently unsupported in SEV-ES guests.
  95. */
  96. unsigned long dr7;
  97. };
  98. struct ghcb_state {
  99. struct ghcb *ghcb;
  100. };
  101. static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
  102. DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
  103. static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
  104. struct sev_config {
  105. __u64 debug : 1,
  106. __reserved : 63;
  107. };
  108. static struct sev_config sev_cfg __read_mostly;
  109. static __always_inline bool on_vc_stack(struct pt_regs *regs)
  110. {
  111. unsigned long sp = regs->sp;
  112. /* User-mode RSP is not trusted */
  113. if (user_mode(regs))
  114. return false;
  115. /* SYSCALL gap still has user-mode RSP */
  116. if (ip_within_syscall_gap(regs))
  117. return false;
  118. return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
  119. }
  120. /*
  121. * This function handles the case when an NMI is raised in the #VC
  122. * exception handler entry code, before the #VC handler has switched off
  123. * its IST stack. In this case, the IST entry for #VC must be adjusted,
  124. * so that any nested #VC exception will not overwrite the stack
  125. * contents of the interrupted #VC handler.
  126. *
  127. * The IST entry is adjusted unconditionally so that it can be also be
  128. * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
  129. * nested sev_es_ist_exit() call may adjust back the IST entry too
  130. * early.
  131. *
  132. * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
  133. * on the NMI IST stack, as they are only called from NMI handling code
  134. * right now.
  135. */
  136. void noinstr __sev_es_ist_enter(struct pt_regs *regs)
  137. {
  138. unsigned long old_ist, new_ist;
  139. /* Read old IST entry */
  140. new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
  141. /*
  142. * If NMI happened while on the #VC IST stack, set the new IST
  143. * value below regs->sp, so that the interrupted stack frame is
  144. * not overwritten by subsequent #VC exceptions.
  145. */
  146. if (on_vc_stack(regs))
  147. new_ist = regs->sp;
  148. /*
  149. * Reserve additional 8 bytes and store old IST value so this
  150. * adjustment can be unrolled in __sev_es_ist_exit().
  151. */
  152. new_ist -= sizeof(old_ist);
  153. *(unsigned long *)new_ist = old_ist;
  154. /* Set new IST entry */
  155. this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
  156. }
  157. void noinstr __sev_es_ist_exit(void)
  158. {
  159. unsigned long ist;
  160. /* Read IST entry */
  161. ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
  162. if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
  163. return;
  164. /* Read back old IST entry and write it to the TSS */
  165. this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
  166. }
  167. /*
  168. * Nothing shall interrupt this code path while holding the per-CPU
  169. * GHCB. The backup GHCB is only for NMIs interrupting this path.
  170. *
  171. * Callers must disable local interrupts around it.
  172. */
  173. static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
  174. {
  175. struct sev_es_runtime_data *data;
  176. struct ghcb *ghcb;
  177. WARN_ON(!irqs_disabled());
  178. data = this_cpu_read(runtime_data);
  179. ghcb = &data->ghcb_page;
  180. if (unlikely(data->ghcb_active)) {
  181. /* GHCB is already in use - save its contents */
  182. if (unlikely(data->backup_ghcb_active)) {
  183. /*
  184. * Backup-GHCB is also already in use. There is no way
  185. * to continue here so just kill the machine. To make
  186. * panic() work, mark GHCBs inactive so that messages
  187. * can be printed out.
  188. */
  189. data->ghcb_active = false;
  190. data->backup_ghcb_active = false;
  191. instrumentation_begin();
  192. panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
  193. instrumentation_end();
  194. }
  195. /* Mark backup_ghcb active before writing to it */
  196. data->backup_ghcb_active = true;
  197. state->ghcb = &data->backup_ghcb;
  198. /* Backup GHCB content */
  199. *state->ghcb = *ghcb;
  200. } else {
  201. state->ghcb = NULL;
  202. data->ghcb_active = true;
  203. }
  204. return ghcb;
  205. }
  206. static inline u64 sev_es_rd_ghcb_msr(void)
  207. {
  208. return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
  209. }
  210. static __always_inline void sev_es_wr_ghcb_msr(u64 val)
  211. {
  212. u32 low, high;
  213. low = (u32)(val);
  214. high = (u32)(val >> 32);
  215. native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
  216. }
  217. static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
  218. unsigned char *buffer)
  219. {
  220. return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
  221. }
  222. static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
  223. {
  224. char buffer[MAX_INSN_SIZE];
  225. int insn_bytes;
  226. insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
  227. if (insn_bytes == 0) {
  228. /* Nothing could be copied */
  229. ctxt->fi.vector = X86_TRAP_PF;
  230. ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
  231. ctxt->fi.cr2 = ctxt->regs->ip;
  232. return ES_EXCEPTION;
  233. } else if (insn_bytes == -EINVAL) {
  234. /* Effective RIP could not be calculated */
  235. ctxt->fi.vector = X86_TRAP_GP;
  236. ctxt->fi.error_code = 0;
  237. ctxt->fi.cr2 = 0;
  238. return ES_EXCEPTION;
  239. }
  240. if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
  241. return ES_DECODE_FAILED;
  242. if (ctxt->insn.immediate.got)
  243. return ES_OK;
  244. else
  245. return ES_DECODE_FAILED;
  246. }
  247. static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
  248. {
  249. char buffer[MAX_INSN_SIZE];
  250. int res, ret;
  251. res = vc_fetch_insn_kernel(ctxt, buffer);
  252. if (res) {
  253. ctxt->fi.vector = X86_TRAP_PF;
  254. ctxt->fi.error_code = X86_PF_INSTR;
  255. ctxt->fi.cr2 = ctxt->regs->ip;
  256. return ES_EXCEPTION;
  257. }
  258. ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
  259. if (ret < 0)
  260. return ES_DECODE_FAILED;
  261. else
  262. return ES_OK;
  263. }
  264. static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
  265. {
  266. if (user_mode(ctxt->regs))
  267. return __vc_decode_user_insn(ctxt);
  268. else
  269. return __vc_decode_kern_insn(ctxt);
  270. }
  271. static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
  272. char *dst, char *buf, size_t size)
  273. {
  274. unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
  275. /*
  276. * This function uses __put_user() independent of whether kernel or user
  277. * memory is accessed. This works fine because __put_user() does no
  278. * sanity checks of the pointer being accessed. All that it does is
  279. * to report when the access failed.
  280. *
  281. * Also, this function runs in atomic context, so __put_user() is not
  282. * allowed to sleep. The page-fault handler detects that it is running
  283. * in atomic context and will not try to take mmap_sem and handle the
  284. * fault, so additional pagefault_enable()/disable() calls are not
  285. * needed.
  286. *
  287. * The access can't be done via copy_to_user() here because
  288. * vc_write_mem() must not use string instructions to access unsafe
  289. * memory. The reason is that MOVS is emulated by the #VC handler by
  290. * splitting the move up into a read and a write and taking a nested #VC
  291. * exception on whatever of them is the MMIO access. Using string
  292. * instructions here would cause infinite nesting.
  293. */
  294. switch (size) {
  295. case 1: {
  296. u8 d1;
  297. u8 __user *target = (u8 __user *)dst;
  298. memcpy(&d1, buf, 1);
  299. if (__put_user(d1, target))
  300. goto fault;
  301. break;
  302. }
  303. case 2: {
  304. u16 d2;
  305. u16 __user *target = (u16 __user *)dst;
  306. memcpy(&d2, buf, 2);
  307. if (__put_user(d2, target))
  308. goto fault;
  309. break;
  310. }
  311. case 4: {
  312. u32 d4;
  313. u32 __user *target = (u32 __user *)dst;
  314. memcpy(&d4, buf, 4);
  315. if (__put_user(d4, target))
  316. goto fault;
  317. break;
  318. }
  319. case 8: {
  320. u64 d8;
  321. u64 __user *target = (u64 __user *)dst;
  322. memcpy(&d8, buf, 8);
  323. if (__put_user(d8, target))
  324. goto fault;
  325. break;
  326. }
  327. default:
  328. WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
  329. return ES_UNSUPPORTED;
  330. }
  331. return ES_OK;
  332. fault:
  333. if (user_mode(ctxt->regs))
  334. error_code |= X86_PF_USER;
  335. ctxt->fi.vector = X86_TRAP_PF;
  336. ctxt->fi.error_code = error_code;
  337. ctxt->fi.cr2 = (unsigned long)dst;
  338. return ES_EXCEPTION;
  339. }
  340. static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
  341. char *src, char *buf, size_t size)
  342. {
  343. unsigned long error_code = X86_PF_PROT;
  344. /*
  345. * This function uses __get_user() independent of whether kernel or user
  346. * memory is accessed. This works fine because __get_user() does no
  347. * sanity checks of the pointer being accessed. All that it does is
  348. * to report when the access failed.
  349. *
  350. * Also, this function runs in atomic context, so __get_user() is not
  351. * allowed to sleep. The page-fault handler detects that it is running
  352. * in atomic context and will not try to take mmap_sem and handle the
  353. * fault, so additional pagefault_enable()/disable() calls are not
  354. * needed.
  355. *
  356. * The access can't be done via copy_from_user() here because
  357. * vc_read_mem() must not use string instructions to access unsafe
  358. * memory. The reason is that MOVS is emulated by the #VC handler by
  359. * splitting the move up into a read and a write and taking a nested #VC
  360. * exception on whatever of them is the MMIO access. Using string
  361. * instructions here would cause infinite nesting.
  362. */
  363. switch (size) {
  364. case 1: {
  365. u8 d1;
  366. u8 __user *s = (u8 __user *)src;
  367. if (__get_user(d1, s))
  368. goto fault;
  369. memcpy(buf, &d1, 1);
  370. break;
  371. }
  372. case 2: {
  373. u16 d2;
  374. u16 __user *s = (u16 __user *)src;
  375. if (__get_user(d2, s))
  376. goto fault;
  377. memcpy(buf, &d2, 2);
  378. break;
  379. }
  380. case 4: {
  381. u32 d4;
  382. u32 __user *s = (u32 __user *)src;
  383. if (__get_user(d4, s))
  384. goto fault;
  385. memcpy(buf, &d4, 4);
  386. break;
  387. }
  388. case 8: {
  389. u64 d8;
  390. u64 __user *s = (u64 __user *)src;
  391. if (__get_user(d8, s))
  392. goto fault;
  393. memcpy(buf, &d8, 8);
  394. break;
  395. }
  396. default:
  397. WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
  398. return ES_UNSUPPORTED;
  399. }
  400. return ES_OK;
  401. fault:
  402. if (user_mode(ctxt->regs))
  403. error_code |= X86_PF_USER;
  404. ctxt->fi.vector = X86_TRAP_PF;
  405. ctxt->fi.error_code = error_code;
  406. ctxt->fi.cr2 = (unsigned long)src;
  407. return ES_EXCEPTION;
  408. }
  409. static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
  410. unsigned long vaddr, phys_addr_t *paddr)
  411. {
  412. unsigned long va = (unsigned long)vaddr;
  413. unsigned int level;
  414. phys_addr_t pa;
  415. pgd_t *pgd;
  416. pte_t *pte;
  417. pgd = __va(read_cr3_pa());
  418. pgd = &pgd[pgd_index(va)];
  419. pte = lookup_address_in_pgd(pgd, va, &level);
  420. if (!pte) {
  421. ctxt->fi.vector = X86_TRAP_PF;
  422. ctxt->fi.cr2 = vaddr;
  423. ctxt->fi.error_code = 0;
  424. if (user_mode(ctxt->regs))
  425. ctxt->fi.error_code |= X86_PF_USER;
  426. return ES_EXCEPTION;
  427. }
  428. if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
  429. /* Emulated MMIO to/from encrypted memory not supported */
  430. return ES_UNSUPPORTED;
  431. pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
  432. pa |= va & ~page_level_mask(level);
  433. *paddr = pa;
  434. return ES_OK;
  435. }
  436. static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
  437. {
  438. BUG_ON(size > 4);
  439. if (user_mode(ctxt->regs)) {
  440. struct thread_struct *t = &current->thread;
  441. struct io_bitmap *iobm = t->io_bitmap;
  442. size_t idx;
  443. if (!iobm)
  444. goto fault;
  445. for (idx = port; idx < port + size; ++idx) {
  446. if (test_bit(idx, iobm->bitmap))
  447. goto fault;
  448. }
  449. }
  450. return ES_OK;
  451. fault:
  452. ctxt->fi.vector = X86_TRAP_GP;
  453. ctxt->fi.error_code = 0;
  454. return ES_EXCEPTION;
  455. }
  456. /* Include code shared with pre-decompression boot stage */
  457. #include "sev-shared.c"
  458. static noinstr void __sev_put_ghcb(struct ghcb_state *state)
  459. {
  460. struct sev_es_runtime_data *data;
  461. struct ghcb *ghcb;
  462. WARN_ON(!irqs_disabled());
  463. data = this_cpu_read(runtime_data);
  464. ghcb = &data->ghcb_page;
  465. if (state->ghcb) {
  466. /* Restore GHCB from Backup */
  467. *ghcb = *state->ghcb;
  468. data->backup_ghcb_active = false;
  469. state->ghcb = NULL;
  470. } else {
  471. /*
  472. * Invalidate the GHCB so a VMGEXIT instruction issued
  473. * from userspace won't appear to be valid.
  474. */
  475. vc_ghcb_invalidate(ghcb);
  476. data->ghcb_active = false;
  477. }
  478. }
  479. void noinstr __sev_es_nmi_complete(void)
  480. {
  481. struct ghcb_state state;
  482. struct ghcb *ghcb;
  483. ghcb = __sev_get_ghcb(&state);
  484. vc_ghcb_invalidate(ghcb);
  485. ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
  486. ghcb_set_sw_exit_info_1(ghcb, 0);
  487. ghcb_set_sw_exit_info_2(ghcb, 0);
  488. sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
  489. VMGEXIT();
  490. __sev_put_ghcb(&state);
  491. }
  492. static u64 __init get_secrets_page(void)
  493. {
  494. u64 pa_data = boot_params.cc_blob_address;
  495. struct cc_blob_sev_info info;
  496. void *map;
  497. /*
  498. * The CC blob contains the address of the secrets page, check if the
  499. * blob is present.
  500. */
  501. if (!pa_data)
  502. return 0;
  503. map = early_memremap(pa_data, sizeof(info));
  504. if (!map) {
  505. pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
  506. return 0;
  507. }
  508. memcpy(&info, map, sizeof(info));
  509. early_memunmap(map, sizeof(info));
  510. /* smoke-test the secrets page passed */
  511. if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
  512. return 0;
  513. return info.secrets_phys;
  514. }
  515. static u64 __init get_snp_jump_table_addr(void)
  516. {
  517. struct snp_secrets_page_layout *layout;
  518. void __iomem *mem;
  519. u64 pa, addr;
  520. pa = get_secrets_page();
  521. if (!pa)
  522. return 0;
  523. mem = ioremap_encrypted(pa, PAGE_SIZE);
  524. if (!mem) {
  525. pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
  526. return 0;
  527. }
  528. layout = (__force struct snp_secrets_page_layout *)mem;
  529. addr = layout->os_area.ap_jump_table_pa;
  530. iounmap(mem);
  531. return addr;
  532. }
  533. static u64 __init get_jump_table_addr(void)
  534. {
  535. struct ghcb_state state;
  536. unsigned long flags;
  537. struct ghcb *ghcb;
  538. u64 ret = 0;
  539. if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
  540. return get_snp_jump_table_addr();
  541. local_irq_save(flags);
  542. ghcb = __sev_get_ghcb(&state);
  543. vc_ghcb_invalidate(ghcb);
  544. ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
  545. ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
  546. ghcb_set_sw_exit_info_2(ghcb, 0);
  547. sev_es_wr_ghcb_msr(__pa(ghcb));
  548. VMGEXIT();
  549. if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
  550. ghcb_sw_exit_info_2_is_valid(ghcb))
  551. ret = ghcb->save.sw_exit_info_2;
  552. __sev_put_ghcb(&state);
  553. local_irq_restore(flags);
  554. return ret;
  555. }
  556. static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool validate)
  557. {
  558. unsigned long vaddr_end;
  559. int rc;
  560. vaddr = vaddr & PAGE_MASK;
  561. vaddr_end = vaddr + (npages << PAGE_SHIFT);
  562. while (vaddr < vaddr_end) {
  563. rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
  564. if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc))
  565. sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
  566. vaddr = vaddr + PAGE_SIZE;
  567. }
  568. }
  569. static void __init early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
  570. {
  571. unsigned long paddr_end;
  572. u64 val;
  573. paddr = paddr & PAGE_MASK;
  574. paddr_end = paddr + (npages << PAGE_SHIFT);
  575. while (paddr < paddr_end) {
  576. /*
  577. * Use the MSR protocol because this function can be called before
  578. * the GHCB is established.
  579. */
  580. sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
  581. VMGEXIT();
  582. val = sev_es_rd_ghcb_msr();
  583. if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
  584. "Wrong PSC response code: 0x%x\n",
  585. (unsigned int)GHCB_RESP_CODE(val)))
  586. goto e_term;
  587. if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
  588. "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
  589. op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
  590. paddr, GHCB_MSR_PSC_RESP_VAL(val)))
  591. goto e_term;
  592. paddr = paddr + PAGE_SIZE;
  593. }
  594. return;
  595. e_term:
  596. sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
  597. }
  598. void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
  599. unsigned long npages)
  600. {
  601. /*
  602. * This can be invoked in early boot while running identity mapped, so
  603. * use an open coded check for SNP instead of using cc_platform_has().
  604. * This eliminates worries about jump tables or checking boot_cpu_data
  605. * in the cc_platform_has() function.
  606. */
  607. if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
  608. return;
  609. /*
  610. * Ask the hypervisor to mark the memory pages as private in the RMP
  611. * table.
  612. */
  613. early_set_pages_state(paddr, npages, SNP_PAGE_STATE_PRIVATE);
  614. /* Validate the memory pages after they've been added in the RMP table. */
  615. pvalidate_pages(vaddr, npages, true);
  616. }
  617. void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
  618. unsigned long npages)
  619. {
  620. /*
  621. * This can be invoked in early boot while running identity mapped, so
  622. * use an open coded check for SNP instead of using cc_platform_has().
  623. * This eliminates worries about jump tables or checking boot_cpu_data
  624. * in the cc_platform_has() function.
  625. */
  626. if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
  627. return;
  628. /* Invalidate the memory pages before they are marked shared in the RMP table. */
  629. pvalidate_pages(vaddr, npages, false);
  630. /* Ask hypervisor to mark the memory pages shared in the RMP table. */
  631. early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED);
  632. }
  633. void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
  634. {
  635. unsigned long vaddr, npages;
  636. vaddr = (unsigned long)__va(paddr);
  637. npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
  638. if (op == SNP_PAGE_STATE_PRIVATE)
  639. early_snp_set_memory_private(vaddr, paddr, npages);
  640. else if (op == SNP_PAGE_STATE_SHARED)
  641. early_snp_set_memory_shared(vaddr, paddr, npages);
  642. else
  643. WARN(1, "invalid memory op %d\n", op);
  644. }
  645. static int vmgexit_psc(struct snp_psc_desc *desc)
  646. {
  647. int cur_entry, end_entry, ret = 0;
  648. struct snp_psc_desc *data;
  649. struct ghcb_state state;
  650. struct es_em_ctxt ctxt;
  651. unsigned long flags;
  652. struct ghcb *ghcb;
  653. /*
  654. * __sev_get_ghcb() needs to run with IRQs disabled because it is using
  655. * a per-CPU GHCB.
  656. */
  657. local_irq_save(flags);
  658. ghcb = __sev_get_ghcb(&state);
  659. if (!ghcb) {
  660. ret = 1;
  661. goto out_unlock;
  662. }
  663. /* Copy the input desc into GHCB shared buffer */
  664. data = (struct snp_psc_desc *)ghcb->shared_buffer;
  665. memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
  666. /*
  667. * As per the GHCB specification, the hypervisor can resume the guest
  668. * before processing all the entries. Check whether all the entries
  669. * are processed. If not, then keep retrying. Note, the hypervisor
  670. * will update the data memory directly to indicate the status, so
  671. * reference the data->hdr everywhere.
  672. *
  673. * The strategy here is to wait for the hypervisor to change the page
  674. * state in the RMP table before guest accesses the memory pages. If the
  675. * page state change was not successful, then later memory access will
  676. * result in a crash.
  677. */
  678. cur_entry = data->hdr.cur_entry;
  679. end_entry = data->hdr.end_entry;
  680. while (data->hdr.cur_entry <= data->hdr.end_entry) {
  681. ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
  682. /* This will advance the shared buffer data points to. */
  683. ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
  684. /*
  685. * Page State Change VMGEXIT can pass error code through
  686. * exit_info_2.
  687. */
  688. if (WARN(ret || ghcb->save.sw_exit_info_2,
  689. "SNP: PSC failed ret=%d exit_info_2=%llx\n",
  690. ret, ghcb->save.sw_exit_info_2)) {
  691. ret = 1;
  692. goto out;
  693. }
  694. /* Verify that reserved bit is not set */
  695. if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
  696. ret = 1;
  697. goto out;
  698. }
  699. /*
  700. * Sanity check that entry processing is not going backwards.
  701. * This will happen only if hypervisor is tricking us.
  702. */
  703. if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
  704. "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
  705. end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
  706. ret = 1;
  707. goto out;
  708. }
  709. }
  710. out:
  711. __sev_put_ghcb(&state);
  712. out_unlock:
  713. local_irq_restore(flags);
  714. return ret;
  715. }
  716. static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
  717. unsigned long vaddr_end, int op)
  718. {
  719. struct psc_hdr *hdr;
  720. struct psc_entry *e;
  721. unsigned long pfn;
  722. int i;
  723. hdr = &data->hdr;
  724. e = data->entries;
  725. memset(data, 0, sizeof(*data));
  726. i = 0;
  727. while (vaddr < vaddr_end) {
  728. if (is_vmalloc_addr((void *)vaddr))
  729. pfn = vmalloc_to_pfn((void *)vaddr);
  730. else
  731. pfn = __pa(vaddr) >> PAGE_SHIFT;
  732. e->gfn = pfn;
  733. e->operation = op;
  734. hdr->end_entry = i;
  735. /*
  736. * Current SNP implementation doesn't keep track of the RMP page
  737. * size so use 4K for simplicity.
  738. */
  739. e->pagesize = RMP_PG_SIZE_4K;
  740. vaddr = vaddr + PAGE_SIZE;
  741. e++;
  742. i++;
  743. }
  744. if (vmgexit_psc(data))
  745. sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
  746. }
  747. static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
  748. {
  749. unsigned long vaddr_end, next_vaddr;
  750. struct snp_psc_desc *desc;
  751. desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
  752. if (!desc)
  753. panic("SNP: failed to allocate memory for PSC descriptor\n");
  754. vaddr = vaddr & PAGE_MASK;
  755. vaddr_end = vaddr + (npages << PAGE_SHIFT);
  756. while (vaddr < vaddr_end) {
  757. /* Calculate the last vaddr that fits in one struct snp_psc_desc. */
  758. next_vaddr = min_t(unsigned long, vaddr_end,
  759. (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
  760. __set_pages_state(desc, vaddr, next_vaddr, op);
  761. vaddr = next_vaddr;
  762. }
  763. kfree(desc);
  764. }
  765. void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
  766. {
  767. if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
  768. return;
  769. pvalidate_pages(vaddr, npages, false);
  770. set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
  771. }
  772. void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
  773. {
  774. if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
  775. return;
  776. set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
  777. pvalidate_pages(vaddr, npages, true);
  778. }
  779. static int snp_set_vmsa(void *va, bool vmsa)
  780. {
  781. u64 attrs;
  782. /*
  783. * Running at VMPL0 allows the kernel to change the VMSA bit for a page
  784. * using the RMPADJUST instruction. However, for the instruction to
  785. * succeed it must target the permissions of a lesser privileged
  786. * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
  787. * instruction in the AMD64 APM Volume 3).
  788. */
  789. attrs = 1;
  790. if (vmsa)
  791. attrs |= RMPADJUST_VMSA_PAGE_BIT;
  792. return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
  793. }
  794. #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
  795. #define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
  796. #define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
  797. #define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2)
  798. #define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3)
  799. static void *snp_alloc_vmsa_page(void)
  800. {
  801. struct page *p;
  802. /*
  803. * Allocate VMSA page to work around the SNP erratum where the CPU will
  804. * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
  805. * collides with the RMP entry of VMSA page. The recommended workaround
  806. * is to not use a large page.
  807. *
  808. * Allocate an 8k page which is also 8k-aligned.
  809. */
  810. p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
  811. if (!p)
  812. return NULL;
  813. split_page(p, 1);
  814. /* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
  815. __free_page(p);
  816. return page_address(p + 1);
  817. }
  818. static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
  819. {
  820. int err;
  821. err = snp_set_vmsa(vmsa, false);
  822. if (err)
  823. pr_err("clear VMSA page failed (%u), leaking page\n", err);
  824. else
  825. free_page((unsigned long)vmsa);
  826. }
  827. static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
  828. {
  829. struct sev_es_save_area *cur_vmsa, *vmsa;
  830. struct ghcb_state state;
  831. unsigned long flags;
  832. struct ghcb *ghcb;
  833. u8 sipi_vector;
  834. int cpu, ret;
  835. u64 cr4;
  836. /*
  837. * The hypervisor SNP feature support check has happened earlier, just check
  838. * the AP_CREATION one here.
  839. */
  840. if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
  841. return -EOPNOTSUPP;
  842. /*
  843. * Verify the desired start IP against the known trampoline start IP
  844. * to catch any future new trampolines that may be introduced that
  845. * would require a new protected guest entry point.
  846. */
  847. if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
  848. "Unsupported SNP start_ip: %lx\n", start_ip))
  849. return -EINVAL;
  850. /* Override start_ip with known protected guest start IP */
  851. start_ip = real_mode_header->sev_es_trampoline_start;
  852. /* Find the logical CPU for the APIC ID */
  853. for_each_present_cpu(cpu) {
  854. if (arch_match_cpu_phys_id(cpu, apic_id))
  855. break;
  856. }
  857. if (cpu >= nr_cpu_ids)
  858. return -EINVAL;
  859. cur_vmsa = per_cpu(sev_vmsa, cpu);
  860. /*
  861. * A new VMSA is created each time because there is no guarantee that
  862. * the current VMSA is the kernels or that the vCPU is not running. If
  863. * an attempt was done to use the current VMSA with a running vCPU, a
  864. * #VMEXIT of that vCPU would wipe out all of the settings being done
  865. * here.
  866. */
  867. vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
  868. if (!vmsa)
  869. return -ENOMEM;
  870. /* CR4 should maintain the MCE value */
  871. cr4 = native_read_cr4() & X86_CR4_MCE;
  872. /* Set the CS value based on the start_ip converted to a SIPI vector */
  873. sipi_vector = (start_ip >> 12);
  874. vmsa->cs.base = sipi_vector << 12;
  875. vmsa->cs.limit = AP_INIT_CS_LIMIT;
  876. vmsa->cs.attrib = INIT_CS_ATTRIBS;
  877. vmsa->cs.selector = sipi_vector << 8;
  878. /* Set the RIP value based on start_ip */
  879. vmsa->rip = start_ip & 0xfff;
  880. /* Set AP INIT defaults as documented in the APM */
  881. vmsa->ds.limit = AP_INIT_DS_LIMIT;
  882. vmsa->ds.attrib = INIT_DS_ATTRIBS;
  883. vmsa->es = vmsa->ds;
  884. vmsa->fs = vmsa->ds;
  885. vmsa->gs = vmsa->ds;
  886. vmsa->ss = vmsa->ds;
  887. vmsa->gdtr.limit = AP_INIT_GDTR_LIMIT;
  888. vmsa->ldtr.limit = AP_INIT_LDTR_LIMIT;
  889. vmsa->ldtr.attrib = INIT_LDTR_ATTRIBS;
  890. vmsa->idtr.limit = AP_INIT_IDTR_LIMIT;
  891. vmsa->tr.limit = AP_INIT_TR_LIMIT;
  892. vmsa->tr.attrib = INIT_TR_ATTRIBS;
  893. vmsa->cr4 = cr4;
  894. vmsa->cr0 = AP_INIT_CR0_DEFAULT;
  895. vmsa->dr7 = DR7_RESET_VALUE;
  896. vmsa->dr6 = AP_INIT_DR6_DEFAULT;
  897. vmsa->rflags = AP_INIT_RFLAGS_DEFAULT;
  898. vmsa->g_pat = AP_INIT_GPAT_DEFAULT;
  899. vmsa->xcr0 = AP_INIT_XCR0_DEFAULT;
  900. vmsa->mxcsr = AP_INIT_MXCSR_DEFAULT;
  901. vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT;
  902. vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT;
  903. /* SVME must be set. */
  904. vmsa->efer = EFER_SVME;
  905. /*
  906. * Set the SNP-specific fields for this VMSA:
  907. * VMPL level
  908. * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
  909. */
  910. vmsa->vmpl = 0;
  911. vmsa->sev_features = sev_status >> 2;
  912. /* Switch the page over to a VMSA page now that it is initialized */
  913. ret = snp_set_vmsa(vmsa, true);
  914. if (ret) {
  915. pr_err("set VMSA page failed (%u)\n", ret);
  916. free_page((unsigned long)vmsa);
  917. return -EINVAL;
  918. }
  919. /* Issue VMGEXIT AP Creation NAE event */
  920. local_irq_save(flags);
  921. ghcb = __sev_get_ghcb(&state);
  922. vc_ghcb_invalidate(ghcb);
  923. ghcb_set_rax(ghcb, vmsa->sev_features);
  924. ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
  925. ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
  926. ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
  927. sev_es_wr_ghcb_msr(__pa(ghcb));
  928. VMGEXIT();
  929. if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
  930. lower_32_bits(ghcb->save.sw_exit_info_1)) {
  931. pr_err("SNP AP Creation error\n");
  932. ret = -EINVAL;
  933. }
  934. __sev_put_ghcb(&state);
  935. local_irq_restore(flags);
  936. /* Perform cleanup if there was an error */
  937. if (ret) {
  938. snp_cleanup_vmsa(vmsa);
  939. vmsa = NULL;
  940. }
  941. /* Free up any previous VMSA page */
  942. if (cur_vmsa)
  943. snp_cleanup_vmsa(cur_vmsa);
  944. /* Record the current VMSA page */
  945. per_cpu(sev_vmsa, cpu) = vmsa;
  946. return ret;
  947. }
  948. void snp_set_wakeup_secondary_cpu(void)
  949. {
  950. if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
  951. return;
  952. /*
  953. * Always set this override if SNP is enabled. This makes it the
  954. * required method to start APs under SNP. If the hypervisor does
  955. * not support AP creation, then no APs will be started.
  956. */
  957. apic->wakeup_secondary_cpu = wakeup_cpu_via_vmgexit;
  958. }
  959. int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
  960. {
  961. u16 startup_cs, startup_ip;
  962. phys_addr_t jump_table_pa;
  963. u64 jump_table_addr;
  964. u16 __iomem *jump_table;
  965. jump_table_addr = get_jump_table_addr();
  966. /* On UP guests there is no jump table so this is not a failure */
  967. if (!jump_table_addr)
  968. return 0;
  969. /* Check if AP Jump Table is page-aligned */
  970. if (jump_table_addr & ~PAGE_MASK)
  971. return -EINVAL;
  972. jump_table_pa = jump_table_addr & PAGE_MASK;
  973. startup_cs = (u16)(rmh->trampoline_start >> 4);
  974. startup_ip = (u16)(rmh->sev_es_trampoline_start -
  975. rmh->trampoline_start);
  976. jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
  977. if (!jump_table)
  978. return -EIO;
  979. writew(startup_ip, &jump_table[0]);
  980. writew(startup_cs, &jump_table[1]);
  981. iounmap(jump_table);
  982. return 0;
  983. }
  984. /*
  985. * This is needed by the OVMF UEFI firmware which will use whatever it finds in
  986. * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
  987. * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
  988. */
  989. int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
  990. {
  991. struct sev_es_runtime_data *data;
  992. unsigned long address, pflags;
  993. int cpu;
  994. u64 pfn;
  995. if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
  996. return 0;
  997. pflags = _PAGE_NX | _PAGE_RW;
  998. for_each_possible_cpu(cpu) {
  999. data = per_cpu(runtime_data, cpu);
  1000. address = __pa(&data->ghcb_page);
  1001. pfn = address >> PAGE_SHIFT;
  1002. if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
  1003. return 1;
  1004. }
  1005. return 0;
  1006. }
  1007. static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
  1008. {
  1009. struct pt_regs *regs = ctxt->regs;
  1010. enum es_result ret;
  1011. u64 exit_info_1;
  1012. /* Is it a WRMSR? */
  1013. exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
  1014. ghcb_set_rcx(ghcb, regs->cx);
  1015. if (exit_info_1) {
  1016. ghcb_set_rax(ghcb, regs->ax);
  1017. ghcb_set_rdx(ghcb, regs->dx);
  1018. }
  1019. ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
  1020. if ((ret == ES_OK) && (!exit_info_1)) {
  1021. regs->ax = ghcb->save.rax;
  1022. regs->dx = ghcb->save.rdx;
  1023. }
  1024. return ret;
  1025. }
  1026. static void snp_register_per_cpu_ghcb(void)
  1027. {
  1028. struct sev_es_runtime_data *data;
  1029. struct ghcb *ghcb;
  1030. data = this_cpu_read(runtime_data);
  1031. ghcb = &data->ghcb_page;
  1032. snp_register_ghcb_early(__pa(ghcb));
  1033. }
  1034. void setup_ghcb(void)
  1035. {
  1036. if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
  1037. return;
  1038. /*
  1039. * Check whether the runtime #VC exception handler is active. It uses
  1040. * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
  1041. *
  1042. * If SNP is active, register the per-CPU GHCB page so that the runtime
  1043. * exception handler can use it.
  1044. */
  1045. if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
  1046. if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
  1047. snp_register_per_cpu_ghcb();
  1048. return;
  1049. }
  1050. /*
  1051. * Make sure the hypervisor talks a supported protocol.
  1052. * This gets called only in the BSP boot phase.
  1053. */
  1054. if (!sev_es_negotiate_protocol())
  1055. sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
  1056. /*
  1057. * Clear the boot_ghcb. The first exception comes in before the bss
  1058. * section is cleared.
  1059. */
  1060. memset(&boot_ghcb_page, 0, PAGE_SIZE);
  1061. /* Alright - Make the boot-ghcb public */
  1062. boot_ghcb = &boot_ghcb_page;
  1063. /* SNP guest requires that GHCB GPA must be registered. */
  1064. if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
  1065. snp_register_ghcb_early(__pa(&boot_ghcb_page));
  1066. }
  1067. #ifdef CONFIG_HOTPLUG_CPU
  1068. static void sev_es_ap_hlt_loop(void)
  1069. {
  1070. struct ghcb_state state;
  1071. struct ghcb *ghcb;
  1072. ghcb = __sev_get_ghcb(&state);
  1073. while (true) {
  1074. vc_ghcb_invalidate(ghcb);
  1075. ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
  1076. ghcb_set_sw_exit_info_1(ghcb, 0);
  1077. ghcb_set_sw_exit_info_2(ghcb, 0);
  1078. sev_es_wr_ghcb_msr(__pa(ghcb));
  1079. VMGEXIT();
  1080. /* Wakeup signal? */
  1081. if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
  1082. ghcb->save.sw_exit_info_2)
  1083. break;
  1084. }
  1085. __sev_put_ghcb(&state);
  1086. }
  1087. /*
  1088. * Play_dead handler when running under SEV-ES. This is needed because
  1089. * the hypervisor can't deliver an SIPI request to restart the AP.
  1090. * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
  1091. * hypervisor wakes it up again.
  1092. */
  1093. static void sev_es_play_dead(void)
  1094. {
  1095. play_dead_common();
  1096. /* IRQs now disabled */
  1097. sev_es_ap_hlt_loop();
  1098. /*
  1099. * If we get here, the VCPU was woken up again. Jump to CPU
  1100. * startup code to get it back online.
  1101. */
  1102. start_cpu0();
  1103. }
  1104. #else /* CONFIG_HOTPLUG_CPU */
  1105. #define sev_es_play_dead native_play_dead
  1106. #endif /* CONFIG_HOTPLUG_CPU */
  1107. #ifdef CONFIG_SMP
  1108. static void __init sev_es_setup_play_dead(void)
  1109. {
  1110. smp_ops.play_dead = sev_es_play_dead;
  1111. }
  1112. #else
  1113. static inline void sev_es_setup_play_dead(void) { }
  1114. #endif
  1115. static void __init alloc_runtime_data(int cpu)
  1116. {
  1117. struct sev_es_runtime_data *data;
  1118. data = memblock_alloc(sizeof(*data), PAGE_SIZE);
  1119. if (!data)
  1120. panic("Can't allocate SEV-ES runtime data");
  1121. per_cpu(runtime_data, cpu) = data;
  1122. }
  1123. static void __init init_ghcb(int cpu)
  1124. {
  1125. struct sev_es_runtime_data *data;
  1126. int err;
  1127. data = per_cpu(runtime_data, cpu);
  1128. err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
  1129. sizeof(data->ghcb_page));
  1130. if (err)
  1131. panic("Can't map GHCBs unencrypted");
  1132. memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
  1133. data->ghcb_active = false;
  1134. data->backup_ghcb_active = false;
  1135. }
  1136. void __init sev_es_init_vc_handling(void)
  1137. {
  1138. int cpu;
  1139. BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
  1140. if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
  1141. return;
  1142. if (!sev_es_check_cpu_features())
  1143. panic("SEV-ES CPU Features missing");
  1144. /*
  1145. * SNP is supported in v2 of the GHCB spec which mandates support for HV
  1146. * features.
  1147. */
  1148. if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
  1149. sev_hv_features = get_hv_features();
  1150. if (!(sev_hv_features & GHCB_HV_FT_SNP))
  1151. sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
  1152. }
  1153. /* Enable SEV-ES special handling */
  1154. static_branch_enable(&sev_es_enable_key);
  1155. /* Initialize per-cpu GHCB pages */
  1156. for_each_possible_cpu(cpu) {
  1157. alloc_runtime_data(cpu);
  1158. init_ghcb(cpu);
  1159. }
  1160. sev_es_setup_play_dead();
  1161. /* Secondary CPUs use the runtime #VC handler */
  1162. initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
  1163. }
  1164. static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
  1165. {
  1166. int trapnr = ctxt->fi.vector;
  1167. if (trapnr == X86_TRAP_PF)
  1168. native_write_cr2(ctxt->fi.cr2);
  1169. ctxt->regs->orig_ax = ctxt->fi.error_code;
  1170. do_early_exception(ctxt->regs, trapnr);
  1171. }
  1172. static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
  1173. {
  1174. long *reg_array;
  1175. int offset;
  1176. reg_array = (long *)ctxt->regs;
  1177. offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
  1178. if (offset < 0)
  1179. return NULL;
  1180. offset /= sizeof(long);
  1181. return reg_array + offset;
  1182. }
  1183. static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
  1184. unsigned int bytes, bool read)
  1185. {
  1186. u64 exit_code, exit_info_1, exit_info_2;
  1187. unsigned long ghcb_pa = __pa(ghcb);
  1188. enum es_result res;
  1189. phys_addr_t paddr;
  1190. void __user *ref;
  1191. ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
  1192. if (ref == (void __user *)-1L)
  1193. return ES_UNSUPPORTED;
  1194. exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
  1195. res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
  1196. if (res != ES_OK) {
  1197. if (res == ES_EXCEPTION && !read)
  1198. ctxt->fi.error_code |= X86_PF_WRITE;
  1199. return res;
  1200. }
  1201. exit_info_1 = paddr;
  1202. /* Can never be greater than 8 */
  1203. exit_info_2 = bytes;
  1204. ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
  1205. return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
  1206. }
  1207. /*
  1208. * The MOVS instruction has two memory operands, which raises the
  1209. * problem that it is not known whether the access to the source or the
  1210. * destination caused the #VC exception (and hence whether an MMIO read
  1211. * or write operation needs to be emulated).
  1212. *
  1213. * Instead of playing games with walking page-tables and trying to guess
  1214. * whether the source or destination is an MMIO range, split the move
  1215. * into two operations, a read and a write with only one memory operand.
  1216. * This will cause a nested #VC exception on the MMIO address which can
  1217. * then be handled.
  1218. *
  1219. * This implementation has the benefit that it also supports MOVS where
  1220. * source _and_ destination are MMIO regions.
  1221. *
  1222. * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
  1223. * rare operation. If it turns out to be a performance problem the split
  1224. * operations can be moved to memcpy_fromio() and memcpy_toio().
  1225. */
  1226. static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
  1227. unsigned int bytes)
  1228. {
  1229. unsigned long ds_base, es_base;
  1230. unsigned char *src, *dst;
  1231. unsigned char buffer[8];
  1232. enum es_result ret;
  1233. bool rep;
  1234. int off;
  1235. ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
  1236. es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
  1237. if (ds_base == -1L || es_base == -1L) {
  1238. ctxt->fi.vector = X86_TRAP_GP;
  1239. ctxt->fi.error_code = 0;
  1240. return ES_EXCEPTION;
  1241. }
  1242. src = ds_base + (unsigned char *)ctxt->regs->si;
  1243. dst = es_base + (unsigned char *)ctxt->regs->di;
  1244. ret = vc_read_mem(ctxt, src, buffer, bytes);
  1245. if (ret != ES_OK)
  1246. return ret;
  1247. ret = vc_write_mem(ctxt, dst, buffer, bytes);
  1248. if (ret != ES_OK)
  1249. return ret;
  1250. if (ctxt->regs->flags & X86_EFLAGS_DF)
  1251. off = -bytes;
  1252. else
  1253. off = bytes;
  1254. ctxt->regs->si += off;
  1255. ctxt->regs->di += off;
  1256. rep = insn_has_rep_prefix(&ctxt->insn);
  1257. if (rep)
  1258. ctxt->regs->cx -= 1;
  1259. if (!rep || ctxt->regs->cx == 0)
  1260. return ES_OK;
  1261. else
  1262. return ES_RETRY;
  1263. }
  1264. static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
  1265. {
  1266. struct insn *insn = &ctxt->insn;
  1267. unsigned int bytes = 0;
  1268. enum mmio_type mmio;
  1269. enum es_result ret;
  1270. u8 sign_byte;
  1271. long *reg_data;
  1272. mmio = insn_decode_mmio(insn, &bytes);
  1273. if (mmio == MMIO_DECODE_FAILED)
  1274. return ES_DECODE_FAILED;
  1275. if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
  1276. reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
  1277. if (!reg_data)
  1278. return ES_DECODE_FAILED;
  1279. }
  1280. if (user_mode(ctxt->regs))
  1281. return ES_UNSUPPORTED;
  1282. switch (mmio) {
  1283. case MMIO_WRITE:
  1284. memcpy(ghcb->shared_buffer, reg_data, bytes);
  1285. ret = vc_do_mmio(ghcb, ctxt, bytes, false);
  1286. break;
  1287. case MMIO_WRITE_IMM:
  1288. memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
  1289. ret = vc_do_mmio(ghcb, ctxt, bytes, false);
  1290. break;
  1291. case MMIO_READ:
  1292. ret = vc_do_mmio(ghcb, ctxt, bytes, true);
  1293. if (ret)
  1294. break;
  1295. /* Zero-extend for 32-bit operation */
  1296. if (bytes == 4)
  1297. *reg_data = 0;
  1298. memcpy(reg_data, ghcb->shared_buffer, bytes);
  1299. break;
  1300. case MMIO_READ_ZERO_EXTEND:
  1301. ret = vc_do_mmio(ghcb, ctxt, bytes, true);
  1302. if (ret)
  1303. break;
  1304. /* Zero extend based on operand size */
  1305. memset(reg_data, 0, insn->opnd_bytes);
  1306. memcpy(reg_data, ghcb->shared_buffer, bytes);
  1307. break;
  1308. case MMIO_READ_SIGN_EXTEND:
  1309. ret = vc_do_mmio(ghcb, ctxt, bytes, true);
  1310. if (ret)
  1311. break;
  1312. if (bytes == 1) {
  1313. u8 *val = (u8 *)ghcb->shared_buffer;
  1314. sign_byte = (*val & 0x80) ? 0xff : 0x00;
  1315. } else {
  1316. u16 *val = (u16 *)ghcb->shared_buffer;
  1317. sign_byte = (*val & 0x8000) ? 0xff : 0x00;
  1318. }
  1319. /* Sign extend based on operand size */
  1320. memset(reg_data, sign_byte, insn->opnd_bytes);
  1321. memcpy(reg_data, ghcb->shared_buffer, bytes);
  1322. break;
  1323. case MMIO_MOVS:
  1324. ret = vc_handle_mmio_movs(ctxt, bytes);
  1325. break;
  1326. default:
  1327. ret = ES_UNSUPPORTED;
  1328. break;
  1329. }
  1330. return ret;
  1331. }
  1332. static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
  1333. struct es_em_ctxt *ctxt)
  1334. {
  1335. struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
  1336. long val, *reg = vc_insn_get_rm(ctxt);
  1337. enum es_result ret;
  1338. if (!reg)
  1339. return ES_DECODE_FAILED;
  1340. val = *reg;
  1341. /* Upper 32 bits must be written as zeroes */
  1342. if (val >> 32) {
  1343. ctxt->fi.vector = X86_TRAP_GP;
  1344. ctxt->fi.error_code = 0;
  1345. return ES_EXCEPTION;
  1346. }
  1347. /* Clear out other reserved bits and set bit 10 */
  1348. val = (val & 0xffff23ffL) | BIT(10);
  1349. /* Early non-zero writes to DR7 are not supported */
  1350. if (!data && (val & ~DR7_RESET_VALUE))
  1351. return ES_UNSUPPORTED;
  1352. /* Using a value of 0 for ExitInfo1 means RAX holds the value */
  1353. ghcb_set_rax(ghcb, val);
  1354. ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
  1355. if (ret != ES_OK)
  1356. return ret;
  1357. if (data)
  1358. data->dr7 = val;
  1359. return ES_OK;
  1360. }
  1361. static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
  1362. struct es_em_ctxt *ctxt)
  1363. {
  1364. struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
  1365. long *reg = vc_insn_get_rm(ctxt);
  1366. if (!reg)
  1367. return ES_DECODE_FAILED;
  1368. if (data)
  1369. *reg = data->dr7;
  1370. else
  1371. *reg = DR7_RESET_VALUE;
  1372. return ES_OK;
  1373. }
  1374. static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
  1375. struct es_em_ctxt *ctxt)
  1376. {
  1377. return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
  1378. }
  1379. static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
  1380. {
  1381. enum es_result ret;
  1382. ghcb_set_rcx(ghcb, ctxt->regs->cx);
  1383. ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
  1384. if (ret != ES_OK)
  1385. return ret;
  1386. if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
  1387. return ES_VMM_ERROR;
  1388. ctxt->regs->ax = ghcb->save.rax;
  1389. ctxt->regs->dx = ghcb->save.rdx;
  1390. return ES_OK;
  1391. }
  1392. static enum es_result vc_handle_monitor(struct ghcb *ghcb,
  1393. struct es_em_ctxt *ctxt)
  1394. {
  1395. /*
  1396. * Treat it as a NOP and do not leak a physical address to the
  1397. * hypervisor.
  1398. */
  1399. return ES_OK;
  1400. }
  1401. static enum es_result vc_handle_mwait(struct ghcb *ghcb,
  1402. struct es_em_ctxt *ctxt)
  1403. {
  1404. /* Treat the same as MONITOR/MONITORX */
  1405. return ES_OK;
  1406. }
  1407. static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
  1408. struct es_em_ctxt *ctxt)
  1409. {
  1410. enum es_result ret;
  1411. ghcb_set_rax(ghcb, ctxt->regs->ax);
  1412. ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
  1413. if (x86_platform.hyper.sev_es_hcall_prepare)
  1414. x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
  1415. ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
  1416. if (ret != ES_OK)
  1417. return ret;
  1418. if (!ghcb_rax_is_valid(ghcb))
  1419. return ES_VMM_ERROR;
  1420. ctxt->regs->ax = ghcb->save.rax;
  1421. /*
  1422. * Call sev_es_hcall_finish() after regs->ax is already set.
  1423. * This allows the hypervisor handler to overwrite it again if
  1424. * necessary.
  1425. */
  1426. if (x86_platform.hyper.sev_es_hcall_finish &&
  1427. !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
  1428. return ES_VMM_ERROR;
  1429. return ES_OK;
  1430. }
  1431. static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
  1432. struct es_em_ctxt *ctxt)
  1433. {
  1434. /*
  1435. * Calling ecx_alignment_check() directly does not work, because it
  1436. * enables IRQs and the GHCB is active. Forward the exception and call
  1437. * it later from vc_forward_exception().
  1438. */
  1439. ctxt->fi.vector = X86_TRAP_AC;
  1440. ctxt->fi.error_code = 0;
  1441. return ES_EXCEPTION;
  1442. }
  1443. static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
  1444. struct ghcb *ghcb,
  1445. unsigned long exit_code)
  1446. {
  1447. enum es_result result;
  1448. switch (exit_code) {
  1449. case SVM_EXIT_READ_DR7:
  1450. result = vc_handle_dr7_read(ghcb, ctxt);
  1451. break;
  1452. case SVM_EXIT_WRITE_DR7:
  1453. result = vc_handle_dr7_write(ghcb, ctxt);
  1454. break;
  1455. case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
  1456. result = vc_handle_trap_ac(ghcb, ctxt);
  1457. break;
  1458. case SVM_EXIT_RDTSC:
  1459. case SVM_EXIT_RDTSCP:
  1460. result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
  1461. break;
  1462. case SVM_EXIT_RDPMC:
  1463. result = vc_handle_rdpmc(ghcb, ctxt);
  1464. break;
  1465. case SVM_EXIT_INVD:
  1466. pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
  1467. result = ES_UNSUPPORTED;
  1468. break;
  1469. case SVM_EXIT_CPUID:
  1470. result = vc_handle_cpuid(ghcb, ctxt);
  1471. break;
  1472. case SVM_EXIT_IOIO:
  1473. result = vc_handle_ioio(ghcb, ctxt);
  1474. break;
  1475. case SVM_EXIT_MSR:
  1476. result = vc_handle_msr(ghcb, ctxt);
  1477. break;
  1478. case SVM_EXIT_VMMCALL:
  1479. result = vc_handle_vmmcall(ghcb, ctxt);
  1480. break;
  1481. case SVM_EXIT_WBINVD:
  1482. result = vc_handle_wbinvd(ghcb, ctxt);
  1483. break;
  1484. case SVM_EXIT_MONITOR:
  1485. result = vc_handle_monitor(ghcb, ctxt);
  1486. break;
  1487. case SVM_EXIT_MWAIT:
  1488. result = vc_handle_mwait(ghcb, ctxt);
  1489. break;
  1490. case SVM_EXIT_NPF:
  1491. result = vc_handle_mmio(ghcb, ctxt);
  1492. break;
  1493. default:
  1494. /*
  1495. * Unexpected #VC exception
  1496. */
  1497. result = ES_UNSUPPORTED;
  1498. }
  1499. return result;
  1500. }
  1501. static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
  1502. {
  1503. long error_code = ctxt->fi.error_code;
  1504. int trapnr = ctxt->fi.vector;
  1505. ctxt->regs->orig_ax = ctxt->fi.error_code;
  1506. switch (trapnr) {
  1507. case X86_TRAP_GP:
  1508. exc_general_protection(ctxt->regs, error_code);
  1509. break;
  1510. case X86_TRAP_UD:
  1511. exc_invalid_op(ctxt->regs);
  1512. break;
  1513. case X86_TRAP_PF:
  1514. write_cr2(ctxt->fi.cr2);
  1515. exc_page_fault(ctxt->regs, error_code);
  1516. break;
  1517. case X86_TRAP_AC:
  1518. exc_alignment_check(ctxt->regs, error_code);
  1519. break;
  1520. default:
  1521. pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
  1522. BUG();
  1523. }
  1524. }
  1525. static __always_inline bool is_vc2_stack(unsigned long sp)
  1526. {
  1527. return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
  1528. }
  1529. static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
  1530. {
  1531. unsigned long sp, prev_sp;
  1532. sp = (unsigned long)regs;
  1533. prev_sp = regs->sp;
  1534. /*
  1535. * If the code was already executing on the VC2 stack when the #VC
  1536. * happened, let it proceed to the normal handling routine. This way the
  1537. * code executing on the VC2 stack can cause #VC exceptions to get handled.
  1538. */
  1539. return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
  1540. }
  1541. static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
  1542. {
  1543. struct ghcb_state state;
  1544. struct es_em_ctxt ctxt;
  1545. enum es_result result;
  1546. struct ghcb *ghcb;
  1547. bool ret = true;
  1548. ghcb = __sev_get_ghcb(&state);
  1549. vc_ghcb_invalidate(ghcb);
  1550. result = vc_init_em_ctxt(&ctxt, regs, error_code);
  1551. if (result == ES_OK)
  1552. result = vc_handle_exitcode(&ctxt, ghcb, error_code);
  1553. __sev_put_ghcb(&state);
  1554. /* Done - now check the result */
  1555. switch (result) {
  1556. case ES_OK:
  1557. vc_finish_insn(&ctxt);
  1558. break;
  1559. case ES_UNSUPPORTED:
  1560. pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
  1561. error_code, regs->ip);
  1562. ret = false;
  1563. break;
  1564. case ES_VMM_ERROR:
  1565. pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
  1566. error_code, regs->ip);
  1567. ret = false;
  1568. break;
  1569. case ES_DECODE_FAILED:
  1570. pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
  1571. error_code, regs->ip);
  1572. ret = false;
  1573. break;
  1574. case ES_EXCEPTION:
  1575. vc_forward_exception(&ctxt);
  1576. break;
  1577. case ES_RETRY:
  1578. /* Nothing to do */
  1579. break;
  1580. default:
  1581. pr_emerg("Unknown result in %s():%d\n", __func__, result);
  1582. /*
  1583. * Emulating the instruction which caused the #VC exception
  1584. * failed - can't continue so print debug information
  1585. */
  1586. BUG();
  1587. }
  1588. return ret;
  1589. }
  1590. static __always_inline bool vc_is_db(unsigned long error_code)
  1591. {
  1592. return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
  1593. }
  1594. /*
  1595. * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
  1596. * and will panic when an error happens.
  1597. */
  1598. DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
  1599. {
  1600. irqentry_state_t irq_state;
  1601. /*
  1602. * With the current implementation it is always possible to switch to a
  1603. * safe stack because #VC exceptions only happen at known places, like
  1604. * intercepted instructions or accesses to MMIO areas/IO ports. They can
  1605. * also happen with code instrumentation when the hypervisor intercepts
  1606. * #DB, but the critical paths are forbidden to be instrumented, so #DB
  1607. * exceptions currently also only happen in safe places.
  1608. *
  1609. * But keep this here in case the noinstr annotations are violated due
  1610. * to bug elsewhere.
  1611. */
  1612. if (unlikely(vc_from_invalid_context(regs))) {
  1613. instrumentation_begin();
  1614. panic("Can't handle #VC exception from unsupported context\n");
  1615. instrumentation_end();
  1616. }
  1617. /*
  1618. * Handle #DB before calling into !noinstr code to avoid recursive #DB.
  1619. */
  1620. if (vc_is_db(error_code)) {
  1621. exc_debug(regs);
  1622. return;
  1623. }
  1624. irq_state = irqentry_nmi_enter(regs);
  1625. instrumentation_begin();
  1626. if (!vc_raw_handle_exception(regs, error_code)) {
  1627. /* Show some debug info */
  1628. show_regs(regs);
  1629. /* Ask hypervisor to sev_es_terminate */
  1630. sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
  1631. /* If that fails and we get here - just panic */
  1632. panic("Returned from Terminate-Request to Hypervisor\n");
  1633. }
  1634. instrumentation_end();
  1635. irqentry_nmi_exit(regs, irq_state);
  1636. }
  1637. /*
  1638. * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
  1639. * and will kill the current task with SIGBUS when an error happens.
  1640. */
  1641. DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
  1642. {
  1643. /*
  1644. * Handle #DB before calling into !noinstr code to avoid recursive #DB.
  1645. */
  1646. if (vc_is_db(error_code)) {
  1647. noist_exc_debug(regs);
  1648. return;
  1649. }
  1650. irqentry_enter_from_user_mode(regs);
  1651. instrumentation_begin();
  1652. if (!vc_raw_handle_exception(regs, error_code)) {
  1653. /*
  1654. * Do not kill the machine if user-space triggered the
  1655. * exception. Send SIGBUS instead and let user-space deal with
  1656. * it.
  1657. */
  1658. force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
  1659. }
  1660. instrumentation_end();
  1661. irqentry_exit_to_user_mode(regs);
  1662. }
  1663. bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
  1664. {
  1665. unsigned long exit_code = regs->orig_ax;
  1666. struct es_em_ctxt ctxt;
  1667. enum es_result result;
  1668. vc_ghcb_invalidate(boot_ghcb);
  1669. result = vc_init_em_ctxt(&ctxt, regs, exit_code);
  1670. if (result == ES_OK)
  1671. result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
  1672. /* Done - now check the result */
  1673. switch (result) {
  1674. case ES_OK:
  1675. vc_finish_insn(&ctxt);
  1676. break;
  1677. case ES_UNSUPPORTED:
  1678. early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
  1679. exit_code, regs->ip);
  1680. goto fail;
  1681. case ES_VMM_ERROR:
  1682. early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
  1683. exit_code, regs->ip);
  1684. goto fail;
  1685. case ES_DECODE_FAILED:
  1686. early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
  1687. exit_code, regs->ip);
  1688. goto fail;
  1689. case ES_EXCEPTION:
  1690. vc_early_forward_exception(&ctxt);
  1691. break;
  1692. case ES_RETRY:
  1693. /* Nothing to do */
  1694. break;
  1695. default:
  1696. BUG();
  1697. }
  1698. return true;
  1699. fail:
  1700. show_regs(regs);
  1701. sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
  1702. }
  1703. /*
  1704. * Initial set up of SNP relies on information provided by the
  1705. * Confidential Computing blob, which can be passed to the kernel
  1706. * in the following ways, depending on how it is booted:
  1707. *
  1708. * - when booted via the boot/decompress kernel:
  1709. * - via boot_params
  1710. *
  1711. * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
  1712. * - via a setup_data entry, as defined by the Linux Boot Protocol
  1713. *
  1714. * Scan for the blob in that order.
  1715. */
  1716. static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
  1717. {
  1718. struct cc_blob_sev_info *cc_info;
  1719. /* Boot kernel would have passed the CC blob via boot_params. */
  1720. if (bp->cc_blob_address) {
  1721. cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
  1722. goto found_cc_info;
  1723. }
  1724. /*
  1725. * If kernel was booted directly, without the use of the
  1726. * boot/decompression kernel, the CC blob may have been passed via
  1727. * setup_data instead.
  1728. */
  1729. cc_info = find_cc_blob_setup_data(bp);
  1730. if (!cc_info)
  1731. return NULL;
  1732. found_cc_info:
  1733. if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
  1734. snp_abort();
  1735. return cc_info;
  1736. }
  1737. bool __init snp_init(struct boot_params *bp)
  1738. {
  1739. struct cc_blob_sev_info *cc_info;
  1740. if (!bp)
  1741. return false;
  1742. cc_info = find_cc_blob(bp);
  1743. if (!cc_info)
  1744. return false;
  1745. setup_cpuid_table(cc_info);
  1746. /*
  1747. * The CC blob will be used later to access the secrets page. Cache
  1748. * it here like the boot kernel does.
  1749. */
  1750. bp->cc_blob_address = (u32)(unsigned long)cc_info;
  1751. return true;
  1752. }
  1753. void __init __noreturn snp_abort(void)
  1754. {
  1755. sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
  1756. }
  1757. static void dump_cpuid_table(void)
  1758. {
  1759. const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
  1760. int i = 0;
  1761. pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
  1762. cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
  1763. for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
  1764. const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
  1765. pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
  1766. i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
  1767. fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
  1768. }
  1769. }
  1770. /*
  1771. * It is useful from an auditing/testing perspective to provide an easy way
  1772. * for the guest owner to know that the CPUID table has been initialized as
  1773. * expected, but that initialization happens too early in boot to print any
  1774. * sort of indicator, and there's not really any other good place to do it,
  1775. * so do it here.
  1776. */
  1777. static int __init report_cpuid_table(void)
  1778. {
  1779. const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
  1780. if (!cpuid_table->count)
  1781. return 0;
  1782. pr_info("Using SNP CPUID table, %d entries present.\n",
  1783. cpuid_table->count);
  1784. if (sev_cfg.debug)
  1785. dump_cpuid_table();
  1786. return 0;
  1787. }
  1788. arch_initcall(report_cpuid_table);
  1789. static int __init init_sev_config(char *str)
  1790. {
  1791. char *s;
  1792. while ((s = strsep(&str, ","))) {
  1793. if (!strcmp(s, "debug")) {
  1794. sev_cfg.debug = true;
  1795. continue;
  1796. }
  1797. pr_info("SEV command-line option '%s' was not recognized\n", s);
  1798. }
  1799. return 1;
  1800. }
  1801. __setup("sev=", init_sev_config);
  1802. int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
  1803. {
  1804. struct ghcb_state state;
  1805. struct es_em_ctxt ctxt;
  1806. unsigned long flags;
  1807. struct ghcb *ghcb;
  1808. int ret;
  1809. rio->exitinfo2 = SEV_RET_NO_FW_CALL;
  1810. /*
  1811. * __sev_get_ghcb() needs to run with IRQs disabled because it is using
  1812. * a per-CPU GHCB.
  1813. */
  1814. local_irq_save(flags);
  1815. ghcb = __sev_get_ghcb(&state);
  1816. if (!ghcb) {
  1817. ret = -EIO;
  1818. goto e_restore_irq;
  1819. }
  1820. vc_ghcb_invalidate(ghcb);
  1821. if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
  1822. ghcb_set_rax(ghcb, input->data_gpa);
  1823. ghcb_set_rbx(ghcb, input->data_npages);
  1824. }
  1825. ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
  1826. if (ret)
  1827. goto e_put;
  1828. rio->exitinfo2 = ghcb->save.sw_exit_info_2;
  1829. switch (rio->exitinfo2) {
  1830. case 0:
  1831. break;
  1832. case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY):
  1833. ret = -EAGAIN;
  1834. break;
  1835. case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
  1836. /* Number of expected pages are returned in RBX */
  1837. if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
  1838. input->data_npages = ghcb_get_rbx(ghcb);
  1839. ret = -ENOSPC;
  1840. break;
  1841. }
  1842. fallthrough;
  1843. default:
  1844. ret = -EIO;
  1845. break;
  1846. }
  1847. e_put:
  1848. __sev_put_ghcb(&state);
  1849. e_restore_irq:
  1850. local_irq_restore(flags);
  1851. return ret;
  1852. }
  1853. EXPORT_SYMBOL_GPL(snp_issue_guest_request);
  1854. static struct platform_device sev_guest_device = {
  1855. .name = "sev-guest",
  1856. .id = -1,
  1857. };
  1858. static int __init snp_init_platform_device(void)
  1859. {
  1860. struct sev_guest_platform_data data;
  1861. u64 gpa;
  1862. if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
  1863. return -ENODEV;
  1864. gpa = get_secrets_page();
  1865. if (!gpa)
  1866. return -ENODEV;
  1867. data.secrets_gpa = gpa;
  1868. if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
  1869. return -ENODEV;
  1870. if (platform_device_register(&sev_guest_device))
  1871. return -ENODEV;
  1872. pr_info("SNP guest platform device initialized.\n");
  1873. return 0;
  1874. }
  1875. device_initcall(snp_init_platform_device);