svm.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __SVM_H
  3. #define __SVM_H
  4. #include <uapi/asm/svm.h>
  5. #include <uapi/asm/kvm.h>
  6. #include <asm/hyperv-tlfs.h>
  7. /*
  8. * 32-bit intercept words in the VMCB Control Area, starting
  9. * at Byte offset 000h.
  10. */
  11. enum intercept_words {
  12. INTERCEPT_CR = 0,
  13. INTERCEPT_DR,
  14. INTERCEPT_EXCEPTION,
  15. INTERCEPT_WORD3,
  16. INTERCEPT_WORD4,
  17. INTERCEPT_WORD5,
  18. MAX_INTERCEPT,
  19. };
  20. enum {
  21. /* Byte offset 000h (word 0) */
  22. INTERCEPT_CR0_READ = 0,
  23. INTERCEPT_CR3_READ = 3,
  24. INTERCEPT_CR4_READ = 4,
  25. INTERCEPT_CR8_READ = 8,
  26. INTERCEPT_CR0_WRITE = 16,
  27. INTERCEPT_CR3_WRITE = 16 + 3,
  28. INTERCEPT_CR4_WRITE = 16 + 4,
  29. INTERCEPT_CR8_WRITE = 16 + 8,
  30. /* Byte offset 004h (word 1) */
  31. INTERCEPT_DR0_READ = 32,
  32. INTERCEPT_DR1_READ,
  33. INTERCEPT_DR2_READ,
  34. INTERCEPT_DR3_READ,
  35. INTERCEPT_DR4_READ,
  36. INTERCEPT_DR5_READ,
  37. INTERCEPT_DR6_READ,
  38. INTERCEPT_DR7_READ,
  39. INTERCEPT_DR0_WRITE = 48,
  40. INTERCEPT_DR1_WRITE,
  41. INTERCEPT_DR2_WRITE,
  42. INTERCEPT_DR3_WRITE,
  43. INTERCEPT_DR4_WRITE,
  44. INTERCEPT_DR5_WRITE,
  45. INTERCEPT_DR6_WRITE,
  46. INTERCEPT_DR7_WRITE,
  47. /* Byte offset 008h (word 2) */
  48. INTERCEPT_EXCEPTION_OFFSET = 64,
  49. /* Byte offset 00Ch (word 3) */
  50. INTERCEPT_INTR = 96,
  51. INTERCEPT_NMI,
  52. INTERCEPT_SMI,
  53. INTERCEPT_INIT,
  54. INTERCEPT_VINTR,
  55. INTERCEPT_SELECTIVE_CR0,
  56. INTERCEPT_STORE_IDTR,
  57. INTERCEPT_STORE_GDTR,
  58. INTERCEPT_STORE_LDTR,
  59. INTERCEPT_STORE_TR,
  60. INTERCEPT_LOAD_IDTR,
  61. INTERCEPT_LOAD_GDTR,
  62. INTERCEPT_LOAD_LDTR,
  63. INTERCEPT_LOAD_TR,
  64. INTERCEPT_RDTSC,
  65. INTERCEPT_RDPMC,
  66. INTERCEPT_PUSHF,
  67. INTERCEPT_POPF,
  68. INTERCEPT_CPUID,
  69. INTERCEPT_RSM,
  70. INTERCEPT_IRET,
  71. INTERCEPT_INTn,
  72. INTERCEPT_INVD,
  73. INTERCEPT_PAUSE,
  74. INTERCEPT_HLT,
  75. INTERCEPT_INVLPG,
  76. INTERCEPT_INVLPGA,
  77. INTERCEPT_IOIO_PROT,
  78. INTERCEPT_MSR_PROT,
  79. INTERCEPT_TASK_SWITCH,
  80. INTERCEPT_FERR_FREEZE,
  81. INTERCEPT_SHUTDOWN,
  82. /* Byte offset 010h (word 4) */
  83. INTERCEPT_VMRUN = 128,
  84. INTERCEPT_VMMCALL,
  85. INTERCEPT_VMLOAD,
  86. INTERCEPT_VMSAVE,
  87. INTERCEPT_STGI,
  88. INTERCEPT_CLGI,
  89. INTERCEPT_SKINIT,
  90. INTERCEPT_RDTSCP,
  91. INTERCEPT_ICEBP,
  92. INTERCEPT_WBINVD,
  93. INTERCEPT_MONITOR,
  94. INTERCEPT_MWAIT,
  95. INTERCEPT_MWAIT_COND,
  96. INTERCEPT_XSETBV,
  97. INTERCEPT_RDPRU,
  98. TRAP_EFER_WRITE,
  99. TRAP_CR0_WRITE,
  100. TRAP_CR1_WRITE,
  101. TRAP_CR2_WRITE,
  102. TRAP_CR3_WRITE,
  103. TRAP_CR4_WRITE,
  104. TRAP_CR5_WRITE,
  105. TRAP_CR6_WRITE,
  106. TRAP_CR7_WRITE,
  107. TRAP_CR8_WRITE,
  108. /* Byte offset 014h (word 5) */
  109. INTERCEPT_INVLPGB = 160,
  110. INTERCEPT_INVLPGB_ILLEGAL,
  111. INTERCEPT_INVPCID,
  112. INTERCEPT_MCOMMIT,
  113. INTERCEPT_TLBSYNC,
  114. };
  115. struct __attribute__ ((__packed__)) vmcb_control_area {
  116. u32 intercepts[MAX_INTERCEPT];
  117. u32 reserved_1[15 - MAX_INTERCEPT];
  118. u16 pause_filter_thresh;
  119. u16 pause_filter_count;
  120. u64 iopm_base_pa;
  121. u64 msrpm_base_pa;
  122. u64 tsc_offset;
  123. u32 asid;
  124. u8 tlb_ctl;
  125. u8 reserved_2[3];
  126. u32 int_ctl;
  127. u32 int_vector;
  128. u32 int_state;
  129. u8 reserved_3[4];
  130. u32 exit_code;
  131. u32 exit_code_hi;
  132. u64 exit_info_1;
  133. u64 exit_info_2;
  134. u32 exit_int_info;
  135. u32 exit_int_info_err;
  136. u64 nested_ctl;
  137. u64 avic_vapic_bar;
  138. u64 ghcb_gpa;
  139. u32 event_inj;
  140. u32 event_inj_err;
  141. u64 nested_cr3;
  142. u64 virt_ext;
  143. u32 clean;
  144. u32 reserved_5;
  145. u64 next_rip;
  146. u8 insn_len;
  147. u8 insn_bytes[15];
  148. u64 avic_backing_page; /* Offset 0xe0 */
  149. u8 reserved_6[8]; /* Offset 0xe8 */
  150. u64 avic_logical_id; /* Offset 0xf0 */
  151. u64 avic_physical_id; /* Offset 0xf8 */
  152. u8 reserved_7[8];
  153. u64 vmsa_pa; /* Used for an SEV-ES guest */
  154. u8 reserved_8[720];
  155. /*
  156. * Offset 0x3e0, 32 bytes reserved
  157. * for use by hypervisor/software.
  158. */
  159. union {
  160. struct hv_vmcb_enlightenments hv_enlightenments;
  161. u8 reserved_sw[32];
  162. };
  163. };
  164. #define TLB_CONTROL_DO_NOTHING 0
  165. #define TLB_CONTROL_FLUSH_ALL_ASID 1
  166. #define TLB_CONTROL_FLUSH_ASID 3
  167. #define TLB_CONTROL_FLUSH_ASID_LOCAL 7
  168. #define V_TPR_MASK 0x0f
  169. #define V_IRQ_SHIFT 8
  170. #define V_IRQ_MASK (1 << V_IRQ_SHIFT)
  171. #define V_GIF_SHIFT 9
  172. #define V_GIF_MASK (1 << V_GIF_SHIFT)
  173. #define V_INTR_PRIO_SHIFT 16
  174. #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
  175. #define V_IGN_TPR_SHIFT 20
  176. #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
  177. #define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
  178. #define V_INTR_MASKING_SHIFT 24
  179. #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
  180. #define V_GIF_ENABLE_SHIFT 25
  181. #define V_GIF_ENABLE_MASK (1 << V_GIF_ENABLE_SHIFT)
  182. #define AVIC_ENABLE_SHIFT 31
  183. #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
  184. #define X2APIC_MODE_SHIFT 30
  185. #define X2APIC_MODE_MASK (1 << X2APIC_MODE_SHIFT)
  186. #define LBR_CTL_ENABLE_MASK BIT_ULL(0)
  187. #define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
  188. #define SVM_INTERRUPT_SHADOW_MASK BIT_ULL(0)
  189. #define SVM_GUEST_INTERRUPT_MASK BIT_ULL(1)
  190. #define SVM_IOIO_STR_SHIFT 2
  191. #define SVM_IOIO_REP_SHIFT 3
  192. #define SVM_IOIO_SIZE_SHIFT 4
  193. #define SVM_IOIO_ASIZE_SHIFT 7
  194. #define SVM_IOIO_TYPE_MASK 1
  195. #define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
  196. #define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
  197. #define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
  198. #define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
  199. #define SVM_VM_CR_VALID_MASK 0x001fULL
  200. #define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
  201. #define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
  202. #define SVM_NESTED_CTL_NP_ENABLE BIT(0)
  203. #define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
  204. #define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
  205. #define SVM_TSC_RATIO_RSVD 0xffffff0000000000ULL
  206. #define SVM_TSC_RATIO_MIN 0x0000000000000001ULL
  207. #define SVM_TSC_RATIO_MAX 0x000000ffffffffffULL
  208. #define SVM_TSC_RATIO_DEFAULT 0x0100000000ULL
  209. /* AVIC */
  210. #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFFULL)
  211. #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
  212. #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
  213. #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
  214. #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
  215. #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
  216. #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
  217. #define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFFULL)
  218. #define AVIC_DOORBELL_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
  219. #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
  220. #define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
  221. #define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
  222. #define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
  223. enum avic_ipi_failure_cause {
  224. AVIC_IPI_FAILURE_INVALID_INT_TYPE,
  225. AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
  226. AVIC_IPI_FAILURE_INVALID_TARGET,
  227. AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
  228. AVIC_IPI_FAILURE_INVALID_IPI_VECTOR,
  229. };
  230. #define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
  231. /*
  232. * For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as
  233. * 0xff is a broadcast to all CPUs, i.e. can't be targeted individually.
  234. */
  235. #define AVIC_MAX_PHYSICAL_ID 0XFEULL
  236. /*
  237. * For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511).
  238. */
  239. #define X2AVIC_MAX_PHYSICAL_ID 0x1FFUL
  240. static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID);
  241. static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID);
  242. #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
  243. #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
  244. struct vmcb_seg {
  245. u16 selector;
  246. u16 attrib;
  247. u32 limit;
  248. u64 base;
  249. } __packed;
  250. /* Save area definition for legacy and SEV-MEM guests */
  251. struct vmcb_save_area {
  252. struct vmcb_seg es;
  253. struct vmcb_seg cs;
  254. struct vmcb_seg ss;
  255. struct vmcb_seg ds;
  256. struct vmcb_seg fs;
  257. struct vmcb_seg gs;
  258. struct vmcb_seg gdtr;
  259. struct vmcb_seg ldtr;
  260. struct vmcb_seg idtr;
  261. struct vmcb_seg tr;
  262. u8 reserved_1[42];
  263. u8 vmpl;
  264. u8 cpl;
  265. u8 reserved_2[4];
  266. u64 efer;
  267. u8 reserved_3[112];
  268. u64 cr4;
  269. u64 cr3;
  270. u64 cr0;
  271. u64 dr7;
  272. u64 dr6;
  273. u64 rflags;
  274. u64 rip;
  275. u8 reserved_4[88];
  276. u64 rsp;
  277. u64 s_cet;
  278. u64 ssp;
  279. u64 isst_addr;
  280. u64 rax;
  281. u64 star;
  282. u64 lstar;
  283. u64 cstar;
  284. u64 sfmask;
  285. u64 kernel_gs_base;
  286. u64 sysenter_cs;
  287. u64 sysenter_esp;
  288. u64 sysenter_eip;
  289. u64 cr2;
  290. u8 reserved_5[32];
  291. u64 g_pat;
  292. u64 dbgctl;
  293. u64 br_from;
  294. u64 br_to;
  295. u64 last_excp_from;
  296. u64 last_excp_to;
  297. u8 reserved_6[72];
  298. u32 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */
  299. } __packed;
  300. /* Save area definition for SEV-ES and SEV-SNP guests */
  301. struct sev_es_save_area {
  302. struct vmcb_seg es;
  303. struct vmcb_seg cs;
  304. struct vmcb_seg ss;
  305. struct vmcb_seg ds;
  306. struct vmcb_seg fs;
  307. struct vmcb_seg gs;
  308. struct vmcb_seg gdtr;
  309. struct vmcb_seg ldtr;
  310. struct vmcb_seg idtr;
  311. struct vmcb_seg tr;
  312. u64 vmpl0_ssp;
  313. u64 vmpl1_ssp;
  314. u64 vmpl2_ssp;
  315. u64 vmpl3_ssp;
  316. u64 u_cet;
  317. u8 reserved_1[2];
  318. u8 vmpl;
  319. u8 cpl;
  320. u8 reserved_2[4];
  321. u64 efer;
  322. u8 reserved_3[104];
  323. u64 xss;
  324. u64 cr4;
  325. u64 cr3;
  326. u64 cr0;
  327. u64 dr7;
  328. u64 dr6;
  329. u64 rflags;
  330. u64 rip;
  331. u64 dr0;
  332. u64 dr1;
  333. u64 dr2;
  334. u64 dr3;
  335. u64 dr0_addr_mask;
  336. u64 dr1_addr_mask;
  337. u64 dr2_addr_mask;
  338. u64 dr3_addr_mask;
  339. u8 reserved_4[24];
  340. u64 rsp;
  341. u64 s_cet;
  342. u64 ssp;
  343. u64 isst_addr;
  344. u64 rax;
  345. u64 star;
  346. u64 lstar;
  347. u64 cstar;
  348. u64 sfmask;
  349. u64 kernel_gs_base;
  350. u64 sysenter_cs;
  351. u64 sysenter_esp;
  352. u64 sysenter_eip;
  353. u64 cr2;
  354. u8 reserved_5[32];
  355. u64 g_pat;
  356. u64 dbgctl;
  357. u64 br_from;
  358. u64 br_to;
  359. u64 last_excp_from;
  360. u64 last_excp_to;
  361. u8 reserved_7[80];
  362. u32 pkru;
  363. u8 reserved_8[20];
  364. u64 reserved_9; /* rax already available at 0x01f8 */
  365. u64 rcx;
  366. u64 rdx;
  367. u64 rbx;
  368. u64 reserved_10; /* rsp already available at 0x01d8 */
  369. u64 rbp;
  370. u64 rsi;
  371. u64 rdi;
  372. u64 r8;
  373. u64 r9;
  374. u64 r10;
  375. u64 r11;
  376. u64 r12;
  377. u64 r13;
  378. u64 r14;
  379. u64 r15;
  380. u8 reserved_11[16];
  381. u64 guest_exit_info_1;
  382. u64 guest_exit_info_2;
  383. u64 guest_exit_int_info;
  384. u64 guest_nrip;
  385. u64 sev_features;
  386. u64 vintr_ctrl;
  387. u64 guest_exit_code;
  388. u64 virtual_tom;
  389. u64 tlb_id;
  390. u64 pcpu_id;
  391. u64 event_inj;
  392. u64 xcr0;
  393. u8 reserved_12[16];
  394. /* Floating point area */
  395. u64 x87_dp;
  396. u32 mxcsr;
  397. u16 x87_ftw;
  398. u16 x87_fsw;
  399. u16 x87_fcw;
  400. u16 x87_fop;
  401. u16 x87_ds;
  402. u16 x87_cs;
  403. u64 x87_rip;
  404. u8 fpreg_x87[80];
  405. u8 fpreg_xmm[256];
  406. u8 fpreg_ymm[256];
  407. } __packed;
  408. struct ghcb_save_area {
  409. u8 reserved_1[203];
  410. u8 cpl;
  411. u8 reserved_2[116];
  412. u64 xss;
  413. u8 reserved_3[24];
  414. u64 dr7;
  415. u8 reserved_4[16];
  416. u64 rip;
  417. u8 reserved_5[88];
  418. u64 rsp;
  419. u8 reserved_6[24];
  420. u64 rax;
  421. u8 reserved_7[264];
  422. u64 rcx;
  423. u64 rdx;
  424. u64 rbx;
  425. u8 reserved_8[8];
  426. u64 rbp;
  427. u64 rsi;
  428. u64 rdi;
  429. u64 r8;
  430. u64 r9;
  431. u64 r10;
  432. u64 r11;
  433. u64 r12;
  434. u64 r13;
  435. u64 r14;
  436. u64 r15;
  437. u8 reserved_9[16];
  438. u64 sw_exit_code;
  439. u64 sw_exit_info_1;
  440. u64 sw_exit_info_2;
  441. u64 sw_scratch;
  442. u8 reserved_10[56];
  443. u64 xcr0;
  444. u8 valid_bitmap[16];
  445. u64 x87_state_gpa;
  446. } __packed;
  447. #define GHCB_SHARED_BUF_SIZE 2032
  448. struct ghcb {
  449. struct ghcb_save_area save;
  450. u8 reserved_save[2048 - sizeof(struct ghcb_save_area)];
  451. u8 shared_buffer[GHCB_SHARED_BUF_SIZE];
  452. u8 reserved_1[10];
  453. u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */
  454. u32 ghcb_usage;
  455. } __packed;
  456. #define EXPECTED_VMCB_SAVE_AREA_SIZE 740
  457. #define EXPECTED_GHCB_SAVE_AREA_SIZE 1032
  458. #define EXPECTED_SEV_ES_SAVE_AREA_SIZE 1648
  459. #define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024
  460. #define EXPECTED_GHCB_SIZE PAGE_SIZE
  461. static inline void __unused_size_checks(void)
  462. {
  463. BUILD_BUG_ON(sizeof(struct vmcb_save_area) != EXPECTED_VMCB_SAVE_AREA_SIZE);
  464. BUILD_BUG_ON(sizeof(struct ghcb_save_area) != EXPECTED_GHCB_SAVE_AREA_SIZE);
  465. BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE);
  466. BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
  467. BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE);
  468. }
  469. struct vmcb {
  470. struct vmcb_control_area control;
  471. struct vmcb_save_area save;
  472. } __packed;
  473. #define SVM_CPUID_FUNC 0x8000000a
  474. #define SVM_VM_CR_SVM_DISABLE 4
  475. #define SVM_SELECTOR_S_SHIFT 4
  476. #define SVM_SELECTOR_DPL_SHIFT 5
  477. #define SVM_SELECTOR_P_SHIFT 7
  478. #define SVM_SELECTOR_AVL_SHIFT 8
  479. #define SVM_SELECTOR_L_SHIFT 9
  480. #define SVM_SELECTOR_DB_SHIFT 10
  481. #define SVM_SELECTOR_G_SHIFT 11
  482. #define SVM_SELECTOR_TYPE_MASK (0xf)
  483. #define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
  484. #define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
  485. #define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
  486. #define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
  487. #define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
  488. #define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
  489. #define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
  490. #define SVM_SELECTOR_WRITE_MASK (1 << 1)
  491. #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
  492. #define SVM_SELECTOR_CODE_MASK (1 << 3)
  493. #define SVM_EVTINJ_VEC_MASK 0xff
  494. #define SVM_EVTINJ_TYPE_SHIFT 8
  495. #define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
  496. #define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
  497. #define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
  498. #define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
  499. #define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
  500. #define SVM_EVTINJ_VALID (1 << 31)
  501. #define SVM_EVTINJ_VALID_ERR (1 << 11)
  502. #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
  503. #define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK
  504. #define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
  505. #define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
  506. #define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
  507. #define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
  508. #define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
  509. #define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
  510. #define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
  511. #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
  512. #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
  513. #define SVM_EXITINFO_REG_MASK 0x0F
  514. #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
  515. /* GHCB Accessor functions */
  516. #define GHCB_BITMAP_IDX(field) \
  517. (offsetof(struct ghcb_save_area, field) / sizeof(u64))
  518. #define DEFINE_GHCB_ACCESSORS(field) \
  519. static __always_inline bool ghcb_##field##_is_valid(const struct ghcb *ghcb) \
  520. { \
  521. return test_bit(GHCB_BITMAP_IDX(field), \
  522. (unsigned long *)&ghcb->save.valid_bitmap); \
  523. } \
  524. \
  525. static __always_inline u64 ghcb_get_##field(struct ghcb *ghcb) \
  526. { \
  527. return ghcb->save.field; \
  528. } \
  529. \
  530. static __always_inline u64 ghcb_get_##field##_if_valid(struct ghcb *ghcb) \
  531. { \
  532. return ghcb_##field##_is_valid(ghcb) ? ghcb->save.field : 0; \
  533. } \
  534. \
  535. static __always_inline void ghcb_set_##field(struct ghcb *ghcb, u64 value) \
  536. { \
  537. __set_bit(GHCB_BITMAP_IDX(field), \
  538. (unsigned long *)&ghcb->save.valid_bitmap); \
  539. ghcb->save.field = value; \
  540. }
  541. DEFINE_GHCB_ACCESSORS(cpl)
  542. DEFINE_GHCB_ACCESSORS(rip)
  543. DEFINE_GHCB_ACCESSORS(rsp)
  544. DEFINE_GHCB_ACCESSORS(rax)
  545. DEFINE_GHCB_ACCESSORS(rcx)
  546. DEFINE_GHCB_ACCESSORS(rdx)
  547. DEFINE_GHCB_ACCESSORS(rbx)
  548. DEFINE_GHCB_ACCESSORS(rbp)
  549. DEFINE_GHCB_ACCESSORS(rsi)
  550. DEFINE_GHCB_ACCESSORS(rdi)
  551. DEFINE_GHCB_ACCESSORS(r8)
  552. DEFINE_GHCB_ACCESSORS(r9)
  553. DEFINE_GHCB_ACCESSORS(r10)
  554. DEFINE_GHCB_ACCESSORS(r11)
  555. DEFINE_GHCB_ACCESSORS(r12)
  556. DEFINE_GHCB_ACCESSORS(r13)
  557. DEFINE_GHCB_ACCESSORS(r14)
  558. DEFINE_GHCB_ACCESSORS(r15)
  559. DEFINE_GHCB_ACCESSORS(sw_exit_code)
  560. DEFINE_GHCB_ACCESSORS(sw_exit_info_1)
  561. DEFINE_GHCB_ACCESSORS(sw_exit_info_2)
  562. DEFINE_GHCB_ACCESSORS(sw_scratch)
  563. DEFINE_GHCB_ACCESSORS(xcr0)
  564. #endif