vmcs12.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __KVM_X86_VMX_VMCS12_H
  3. #define __KVM_X86_VMX_VMCS12_H
  4. #include <linux/build_bug.h>
  5. #include "vmcs.h"
  6. /*
  7. * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
  8. * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
  9. * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
  10. * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
  11. * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
  12. * More than one of these structures may exist, if L1 runs multiple L2 guests.
  13. * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
  14. * underlying hardware which will be used to run L2.
  15. * This structure is packed to ensure that its layout is identical across
  16. * machines (necessary for live migration).
  17. *
  18. * IMPORTANT: Changing the layout of existing fields in this structure
  19. * will break save/restore compatibility with older kvm releases. When
  20. * adding new fields, either use space in the reserved padding* arrays
  21. * or add the new fields to the end of the structure.
  22. */
  23. typedef u64 natural_width;
  24. struct __packed vmcs12 {
  25. /* According to the Intel spec, a VMCS region must start with the
  26. * following two fields. Then follow implementation-specific data.
  27. */
  28. struct vmcs_hdr hdr;
  29. u32 abort;
  30. u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
  31. u32 padding[7]; /* room for future expansion */
  32. u64 io_bitmap_a;
  33. u64 io_bitmap_b;
  34. u64 msr_bitmap;
  35. u64 vm_exit_msr_store_addr;
  36. u64 vm_exit_msr_load_addr;
  37. u64 vm_entry_msr_load_addr;
  38. u64 tsc_offset;
  39. u64 virtual_apic_page_addr;
  40. u64 apic_access_addr;
  41. u64 posted_intr_desc_addr;
  42. u64 ept_pointer;
  43. u64 eoi_exit_bitmap0;
  44. u64 eoi_exit_bitmap1;
  45. u64 eoi_exit_bitmap2;
  46. u64 eoi_exit_bitmap3;
  47. u64 xss_exit_bitmap;
  48. u64 guest_physical_address;
  49. u64 vmcs_link_pointer;
  50. u64 guest_ia32_debugctl;
  51. u64 guest_ia32_pat;
  52. u64 guest_ia32_efer;
  53. u64 guest_ia32_perf_global_ctrl;
  54. u64 guest_pdptr0;
  55. u64 guest_pdptr1;
  56. u64 guest_pdptr2;
  57. u64 guest_pdptr3;
  58. u64 guest_bndcfgs;
  59. u64 host_ia32_pat;
  60. u64 host_ia32_efer;
  61. u64 host_ia32_perf_global_ctrl;
  62. u64 vmread_bitmap;
  63. u64 vmwrite_bitmap;
  64. u64 vm_function_control;
  65. u64 eptp_list_address;
  66. u64 pml_address;
  67. u64 encls_exiting_bitmap;
  68. u64 tsc_multiplier;
  69. u64 padding64[1]; /* room for future expansion */
  70. /*
  71. * To allow migration of L1 (complete with its L2 guests) between
  72. * machines of different natural widths (32 or 64 bit), we cannot have
  73. * unsigned long fields with no explicit size. We use u64 (aliased
  74. * natural_width) instead. Luckily, x86 is little-endian.
  75. */
  76. natural_width cr0_guest_host_mask;
  77. natural_width cr4_guest_host_mask;
  78. natural_width cr0_read_shadow;
  79. natural_width cr4_read_shadow;
  80. natural_width dead_space[4]; /* Last remnants of cr3_target_value[0-3]. */
  81. natural_width exit_qualification;
  82. natural_width guest_linear_address;
  83. natural_width guest_cr0;
  84. natural_width guest_cr3;
  85. natural_width guest_cr4;
  86. natural_width guest_es_base;
  87. natural_width guest_cs_base;
  88. natural_width guest_ss_base;
  89. natural_width guest_ds_base;
  90. natural_width guest_fs_base;
  91. natural_width guest_gs_base;
  92. natural_width guest_ldtr_base;
  93. natural_width guest_tr_base;
  94. natural_width guest_gdtr_base;
  95. natural_width guest_idtr_base;
  96. natural_width guest_dr7;
  97. natural_width guest_rsp;
  98. natural_width guest_rip;
  99. natural_width guest_rflags;
  100. natural_width guest_pending_dbg_exceptions;
  101. natural_width guest_sysenter_esp;
  102. natural_width guest_sysenter_eip;
  103. natural_width host_cr0;
  104. natural_width host_cr3;
  105. natural_width host_cr4;
  106. natural_width host_fs_base;
  107. natural_width host_gs_base;
  108. natural_width host_tr_base;
  109. natural_width host_gdtr_base;
  110. natural_width host_idtr_base;
  111. natural_width host_ia32_sysenter_esp;
  112. natural_width host_ia32_sysenter_eip;
  113. natural_width host_rsp;
  114. natural_width host_rip;
  115. natural_width paddingl[8]; /* room for future expansion */
  116. u32 pin_based_vm_exec_control;
  117. u32 cpu_based_vm_exec_control;
  118. u32 exception_bitmap;
  119. u32 page_fault_error_code_mask;
  120. u32 page_fault_error_code_match;
  121. u32 cr3_target_count;
  122. u32 vm_exit_controls;
  123. u32 vm_exit_msr_store_count;
  124. u32 vm_exit_msr_load_count;
  125. u32 vm_entry_controls;
  126. u32 vm_entry_msr_load_count;
  127. u32 vm_entry_intr_info_field;
  128. u32 vm_entry_exception_error_code;
  129. u32 vm_entry_instruction_len;
  130. u32 tpr_threshold;
  131. u32 secondary_vm_exec_control;
  132. u32 vm_instruction_error;
  133. u32 vm_exit_reason;
  134. u32 vm_exit_intr_info;
  135. u32 vm_exit_intr_error_code;
  136. u32 idt_vectoring_info_field;
  137. u32 idt_vectoring_error_code;
  138. u32 vm_exit_instruction_len;
  139. u32 vmx_instruction_info;
  140. u32 guest_es_limit;
  141. u32 guest_cs_limit;
  142. u32 guest_ss_limit;
  143. u32 guest_ds_limit;
  144. u32 guest_fs_limit;
  145. u32 guest_gs_limit;
  146. u32 guest_ldtr_limit;
  147. u32 guest_tr_limit;
  148. u32 guest_gdtr_limit;
  149. u32 guest_idtr_limit;
  150. u32 guest_es_ar_bytes;
  151. u32 guest_cs_ar_bytes;
  152. u32 guest_ss_ar_bytes;
  153. u32 guest_ds_ar_bytes;
  154. u32 guest_fs_ar_bytes;
  155. u32 guest_gs_ar_bytes;
  156. u32 guest_ldtr_ar_bytes;
  157. u32 guest_tr_ar_bytes;
  158. u32 guest_interruptibility_info;
  159. u32 guest_activity_state;
  160. u32 guest_sysenter_cs;
  161. u32 host_ia32_sysenter_cs;
  162. u32 vmx_preemption_timer_value;
  163. u32 padding32[7]; /* room for future expansion */
  164. u16 virtual_processor_id;
  165. u16 posted_intr_nv;
  166. u16 guest_es_selector;
  167. u16 guest_cs_selector;
  168. u16 guest_ss_selector;
  169. u16 guest_ds_selector;
  170. u16 guest_fs_selector;
  171. u16 guest_gs_selector;
  172. u16 guest_ldtr_selector;
  173. u16 guest_tr_selector;
  174. u16 guest_intr_status;
  175. u16 host_es_selector;
  176. u16 host_cs_selector;
  177. u16 host_ss_selector;
  178. u16 host_ds_selector;
  179. u16 host_fs_selector;
  180. u16 host_gs_selector;
  181. u16 host_tr_selector;
  182. u16 guest_pml_index;
  183. };
  184. /*
  185. * VMCS12_REVISION is an arbitrary id that should be changed if the content or
  186. * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
  187. * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
  188. *
  189. * IMPORTANT: Changing this value will break save/restore compatibility with
  190. * older kvm releases.
  191. */
  192. #define VMCS12_REVISION 0x11e57ed0
  193. /*
  194. * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
  195. * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
  196. * current implementation, 4K are reserved to avoid future complications and
  197. * to preserve userspace ABI.
  198. */
  199. #define VMCS12_SIZE KVM_STATE_NESTED_VMX_VMCS_SIZE
  200. /*
  201. * For save/restore compatibility, the vmcs12 field offsets must not change.
  202. */
  203. #define CHECK_OFFSET(field, loc) \
  204. BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \
  205. "Offset of " #field " in struct vmcs12 has changed.")
  206. static inline void vmx_check_vmcs12_offsets(void)
  207. {
  208. CHECK_OFFSET(hdr, 0);
  209. CHECK_OFFSET(abort, 4);
  210. CHECK_OFFSET(launch_state, 8);
  211. CHECK_OFFSET(io_bitmap_a, 40);
  212. CHECK_OFFSET(io_bitmap_b, 48);
  213. CHECK_OFFSET(msr_bitmap, 56);
  214. CHECK_OFFSET(vm_exit_msr_store_addr, 64);
  215. CHECK_OFFSET(vm_exit_msr_load_addr, 72);
  216. CHECK_OFFSET(vm_entry_msr_load_addr, 80);
  217. CHECK_OFFSET(tsc_offset, 88);
  218. CHECK_OFFSET(virtual_apic_page_addr, 96);
  219. CHECK_OFFSET(apic_access_addr, 104);
  220. CHECK_OFFSET(posted_intr_desc_addr, 112);
  221. CHECK_OFFSET(ept_pointer, 120);
  222. CHECK_OFFSET(eoi_exit_bitmap0, 128);
  223. CHECK_OFFSET(eoi_exit_bitmap1, 136);
  224. CHECK_OFFSET(eoi_exit_bitmap2, 144);
  225. CHECK_OFFSET(eoi_exit_bitmap3, 152);
  226. CHECK_OFFSET(xss_exit_bitmap, 160);
  227. CHECK_OFFSET(guest_physical_address, 168);
  228. CHECK_OFFSET(vmcs_link_pointer, 176);
  229. CHECK_OFFSET(guest_ia32_debugctl, 184);
  230. CHECK_OFFSET(guest_ia32_pat, 192);
  231. CHECK_OFFSET(guest_ia32_efer, 200);
  232. CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208);
  233. CHECK_OFFSET(guest_pdptr0, 216);
  234. CHECK_OFFSET(guest_pdptr1, 224);
  235. CHECK_OFFSET(guest_pdptr2, 232);
  236. CHECK_OFFSET(guest_pdptr3, 240);
  237. CHECK_OFFSET(guest_bndcfgs, 248);
  238. CHECK_OFFSET(host_ia32_pat, 256);
  239. CHECK_OFFSET(host_ia32_efer, 264);
  240. CHECK_OFFSET(host_ia32_perf_global_ctrl, 272);
  241. CHECK_OFFSET(vmread_bitmap, 280);
  242. CHECK_OFFSET(vmwrite_bitmap, 288);
  243. CHECK_OFFSET(vm_function_control, 296);
  244. CHECK_OFFSET(eptp_list_address, 304);
  245. CHECK_OFFSET(pml_address, 312);
  246. CHECK_OFFSET(encls_exiting_bitmap, 320);
  247. CHECK_OFFSET(tsc_multiplier, 328);
  248. CHECK_OFFSET(cr0_guest_host_mask, 344);
  249. CHECK_OFFSET(cr4_guest_host_mask, 352);
  250. CHECK_OFFSET(cr0_read_shadow, 360);
  251. CHECK_OFFSET(cr4_read_shadow, 368);
  252. CHECK_OFFSET(dead_space, 376);
  253. CHECK_OFFSET(exit_qualification, 408);
  254. CHECK_OFFSET(guest_linear_address, 416);
  255. CHECK_OFFSET(guest_cr0, 424);
  256. CHECK_OFFSET(guest_cr3, 432);
  257. CHECK_OFFSET(guest_cr4, 440);
  258. CHECK_OFFSET(guest_es_base, 448);
  259. CHECK_OFFSET(guest_cs_base, 456);
  260. CHECK_OFFSET(guest_ss_base, 464);
  261. CHECK_OFFSET(guest_ds_base, 472);
  262. CHECK_OFFSET(guest_fs_base, 480);
  263. CHECK_OFFSET(guest_gs_base, 488);
  264. CHECK_OFFSET(guest_ldtr_base, 496);
  265. CHECK_OFFSET(guest_tr_base, 504);
  266. CHECK_OFFSET(guest_gdtr_base, 512);
  267. CHECK_OFFSET(guest_idtr_base, 520);
  268. CHECK_OFFSET(guest_dr7, 528);
  269. CHECK_OFFSET(guest_rsp, 536);
  270. CHECK_OFFSET(guest_rip, 544);
  271. CHECK_OFFSET(guest_rflags, 552);
  272. CHECK_OFFSET(guest_pending_dbg_exceptions, 560);
  273. CHECK_OFFSET(guest_sysenter_esp, 568);
  274. CHECK_OFFSET(guest_sysenter_eip, 576);
  275. CHECK_OFFSET(host_cr0, 584);
  276. CHECK_OFFSET(host_cr3, 592);
  277. CHECK_OFFSET(host_cr4, 600);
  278. CHECK_OFFSET(host_fs_base, 608);
  279. CHECK_OFFSET(host_gs_base, 616);
  280. CHECK_OFFSET(host_tr_base, 624);
  281. CHECK_OFFSET(host_gdtr_base, 632);
  282. CHECK_OFFSET(host_idtr_base, 640);
  283. CHECK_OFFSET(host_ia32_sysenter_esp, 648);
  284. CHECK_OFFSET(host_ia32_sysenter_eip, 656);
  285. CHECK_OFFSET(host_rsp, 664);
  286. CHECK_OFFSET(host_rip, 672);
  287. CHECK_OFFSET(pin_based_vm_exec_control, 744);
  288. CHECK_OFFSET(cpu_based_vm_exec_control, 748);
  289. CHECK_OFFSET(exception_bitmap, 752);
  290. CHECK_OFFSET(page_fault_error_code_mask, 756);
  291. CHECK_OFFSET(page_fault_error_code_match, 760);
  292. CHECK_OFFSET(cr3_target_count, 764);
  293. CHECK_OFFSET(vm_exit_controls, 768);
  294. CHECK_OFFSET(vm_exit_msr_store_count, 772);
  295. CHECK_OFFSET(vm_exit_msr_load_count, 776);
  296. CHECK_OFFSET(vm_entry_controls, 780);
  297. CHECK_OFFSET(vm_entry_msr_load_count, 784);
  298. CHECK_OFFSET(vm_entry_intr_info_field, 788);
  299. CHECK_OFFSET(vm_entry_exception_error_code, 792);
  300. CHECK_OFFSET(vm_entry_instruction_len, 796);
  301. CHECK_OFFSET(tpr_threshold, 800);
  302. CHECK_OFFSET(secondary_vm_exec_control, 804);
  303. CHECK_OFFSET(vm_instruction_error, 808);
  304. CHECK_OFFSET(vm_exit_reason, 812);
  305. CHECK_OFFSET(vm_exit_intr_info, 816);
  306. CHECK_OFFSET(vm_exit_intr_error_code, 820);
  307. CHECK_OFFSET(idt_vectoring_info_field, 824);
  308. CHECK_OFFSET(idt_vectoring_error_code, 828);
  309. CHECK_OFFSET(vm_exit_instruction_len, 832);
  310. CHECK_OFFSET(vmx_instruction_info, 836);
  311. CHECK_OFFSET(guest_es_limit, 840);
  312. CHECK_OFFSET(guest_cs_limit, 844);
  313. CHECK_OFFSET(guest_ss_limit, 848);
  314. CHECK_OFFSET(guest_ds_limit, 852);
  315. CHECK_OFFSET(guest_fs_limit, 856);
  316. CHECK_OFFSET(guest_gs_limit, 860);
  317. CHECK_OFFSET(guest_ldtr_limit, 864);
  318. CHECK_OFFSET(guest_tr_limit, 868);
  319. CHECK_OFFSET(guest_gdtr_limit, 872);
  320. CHECK_OFFSET(guest_idtr_limit, 876);
  321. CHECK_OFFSET(guest_es_ar_bytes, 880);
  322. CHECK_OFFSET(guest_cs_ar_bytes, 884);
  323. CHECK_OFFSET(guest_ss_ar_bytes, 888);
  324. CHECK_OFFSET(guest_ds_ar_bytes, 892);
  325. CHECK_OFFSET(guest_fs_ar_bytes, 896);
  326. CHECK_OFFSET(guest_gs_ar_bytes, 900);
  327. CHECK_OFFSET(guest_ldtr_ar_bytes, 904);
  328. CHECK_OFFSET(guest_tr_ar_bytes, 908);
  329. CHECK_OFFSET(guest_interruptibility_info, 912);
  330. CHECK_OFFSET(guest_activity_state, 916);
  331. CHECK_OFFSET(guest_sysenter_cs, 920);
  332. CHECK_OFFSET(host_ia32_sysenter_cs, 924);
  333. CHECK_OFFSET(vmx_preemption_timer_value, 928);
  334. CHECK_OFFSET(virtual_processor_id, 960);
  335. CHECK_OFFSET(posted_intr_nv, 962);
  336. CHECK_OFFSET(guest_es_selector, 964);
  337. CHECK_OFFSET(guest_cs_selector, 966);
  338. CHECK_OFFSET(guest_ss_selector, 968);
  339. CHECK_OFFSET(guest_ds_selector, 970);
  340. CHECK_OFFSET(guest_fs_selector, 972);
  341. CHECK_OFFSET(guest_gs_selector, 974);
  342. CHECK_OFFSET(guest_ldtr_selector, 976);
  343. CHECK_OFFSET(guest_tr_selector, 978);
  344. CHECK_OFFSET(guest_intr_status, 980);
  345. CHECK_OFFSET(host_es_selector, 982);
  346. CHECK_OFFSET(host_cs_selector, 984);
  347. CHECK_OFFSET(host_ss_selector, 986);
  348. CHECK_OFFSET(host_ds_selector, 988);
  349. CHECK_OFFSET(host_fs_selector, 990);
  350. CHECK_OFFSET(host_gs_selector, 992);
  351. CHECK_OFFSET(host_tr_selector, 994);
  352. CHECK_OFFSET(guest_pml_index, 996);
  353. }
  354. extern const unsigned short vmcs12_field_offsets[];
  355. extern const unsigned int nr_vmcs12_fields;
  356. static inline short get_vmcs12_field_offset(unsigned long field)
  357. {
  358. unsigned short offset;
  359. unsigned int index;
  360. if (field >> 15)
  361. return -ENOENT;
  362. index = ROL16(field, 6);
  363. if (index >= nr_vmcs12_fields)
  364. return -ENOENT;
  365. index = array_index_nospec(index, nr_vmcs12_fields);
  366. offset = vmcs12_field_offsets[index];
  367. if (offset == 0)
  368. return -ENOENT;
  369. return offset;
  370. }
  371. static inline u64 vmcs12_read_any(struct vmcs12 *vmcs12, unsigned long field,
  372. u16 offset)
  373. {
  374. char *p = (char *)vmcs12 + offset;
  375. switch (vmcs_field_width(field)) {
  376. case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
  377. return *((natural_width *)p);
  378. case VMCS_FIELD_WIDTH_U16:
  379. return *((u16 *)p);
  380. case VMCS_FIELD_WIDTH_U32:
  381. return *((u32 *)p);
  382. case VMCS_FIELD_WIDTH_U64:
  383. return *((u64 *)p);
  384. default:
  385. WARN_ON_ONCE(1);
  386. return -1;
  387. }
  388. }
  389. static inline void vmcs12_write_any(struct vmcs12 *vmcs12, unsigned long field,
  390. u16 offset, u64 field_value)
  391. {
  392. char *p = (char *)vmcs12 + offset;
  393. switch (vmcs_field_width(field)) {
  394. case VMCS_FIELD_WIDTH_U16:
  395. *(u16 *)p = field_value;
  396. break;
  397. case VMCS_FIELD_WIDTH_U32:
  398. *(u32 *)p = field_value;
  399. break;
  400. case VMCS_FIELD_WIDTH_U64:
  401. *(u64 *)p = field_value;
  402. break;
  403. case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
  404. *(natural_width *)p = field_value;
  405. break;
  406. default:
  407. WARN_ON_ONCE(1);
  408. break;
  409. }
  410. }
  411. #endif /* __KVM_X86_VMX_VMCS12_H */