Merge tag 'kvm-3.7-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Avi Kivity: "Highlights of the changes for this release include support for vfio level triggered interrupts, improved big real mode support on older Intels, a streamlines guest page table walker, guest APIC speedups, PIO optimizations, better overcommit handling, and read-only memory." * tag 'kvm-3.7-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (138 commits) KVM: s390: Fix vcpu_load handling in interrupt code KVM: x86: Fix guest debug across vcpu INIT reset KVM: Add resampling irqfds for level triggered interrupts KVM: optimize apic interrupt delivery KVM: MMU: Eliminate pointless temporary 'ac' KVM: MMU: Avoid access/dirty update loop if all is well KVM: MMU: Eliminate eperm temporary KVM: MMU: Optimize is_last_gpte() KVM: MMU: Simplify walk_addr_generic() loop KVM: MMU: Optimize pte permission checks KVM: MMU: Update accessed and dirty bits after guest pagetable walk KVM: MMU: Move gpte_access() out of paging_tmpl.h KVM: MMU: Optimize gpte_access() slightly KVM: MMU: Push clean gpte write protection out of gpte_access() KVM: clarify kvmclock documentation KVM: make processes waiting on vcpu mutex killable KVM: SVM: Make use of asm.h KVM: VMX: Make use of asm.h KVM: VMX: Make lto-friendly KVM: x86: lapic: Clean up find_highest_vector() and count_vectors() ... Conflicts: arch/s390/include/asm/processor.h arch/x86/kvm/i8259.c
This commit is contained in:
@@ -127,6 +127,8 @@ module_param(ple_gap, int, S_IRUGO);
|
||||
static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
|
||||
module_param(ple_window, int, S_IRUGO);
|
||||
|
||||
extern const ulong vmx_return;
|
||||
|
||||
#define NR_AUTOLOAD_MSRS 8
|
||||
#define VMCS02_POOL_SIZE 1
|
||||
|
||||
@@ -405,16 +407,16 @@ struct vcpu_vmx {
|
||||
struct {
|
||||
int vm86_active;
|
||||
ulong save_rflags;
|
||||
struct kvm_segment segs[8];
|
||||
} rmode;
|
||||
struct {
|
||||
u32 bitmask; /* 4 bits per segment (1 bit per field) */
|
||||
struct kvm_save_segment {
|
||||
u16 selector;
|
||||
unsigned long base;
|
||||
u32 limit;
|
||||
u32 ar;
|
||||
} tr, es, ds, fs, gs;
|
||||
} rmode;
|
||||
struct {
|
||||
u32 bitmask; /* 4 bits per segment (1 bit per field) */
|
||||
struct kvm_save_segment seg[8];
|
||||
} seg[8];
|
||||
} segment_cache;
|
||||
int vpid;
|
||||
bool emulation_required;
|
||||
@@ -450,7 +452,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
|
||||
#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
|
||||
[number##_HIGH] = VMCS12_OFFSET(name)+4
|
||||
|
||||
static unsigned short vmcs_field_to_offset_table[] = {
|
||||
static const unsigned short vmcs_field_to_offset_table[] = {
|
||||
FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
|
||||
FIELD(GUEST_ES_SELECTOR, guest_es_selector),
|
||||
FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
|
||||
@@ -596,10 +598,9 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
|
||||
static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
|
||||
{
|
||||
struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
|
||||
if (is_error_page(page)) {
|
||||
kvm_release_page_clean(page);
|
||||
if (is_error_page(page))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
@@ -667,7 +668,7 @@ static struct vmx_capability {
|
||||
.ar_bytes = GUEST_##seg##_AR_BYTES, \
|
||||
}
|
||||
|
||||
static struct kvm_vmx_segment_field {
|
||||
static const struct kvm_vmx_segment_field {
|
||||
unsigned selector;
|
||||
unsigned base;
|
||||
unsigned limit;
|
||||
@@ -1343,7 +1344,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
|
||||
guest_efer = vmx->vcpu.arch.efer;
|
||||
|
||||
/*
|
||||
* NX is emulated; LMA and LME handled by hardware; SCE meaninless
|
||||
* NX is emulated; LMA and LME handled by hardware; SCE meaningless
|
||||
* outside long mode
|
||||
*/
|
||||
ignore_bits = EFER_NX | EFER_SCE;
|
||||
@@ -1995,7 +1996,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
|
||||
#endif
|
||||
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
|
||||
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
|
||||
CPU_BASED_RDPMC_EXITING |
|
||||
CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
|
||||
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
|
||||
/*
|
||||
* We can allow some features even when not supported by the
|
||||
@@ -2291,16 +2292,6 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
|
||||
}
|
||||
}
|
||||
|
||||
static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
|
||||
{
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
|
||||
vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
|
||||
else
|
||||
vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
|
||||
|
||||
update_exception_bitmap(vcpu);
|
||||
}
|
||||
|
||||
static __init int cpu_has_kvm_support(void)
|
||||
{
|
||||
return cpu_has_vmx();
|
||||
@@ -2698,20 +2689,17 @@ static __exit void hardware_unsetup(void)
|
||||
free_kvm_area();
|
||||
}
|
||||
|
||||
static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
|
||||
static void fix_pmode_dataseg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save)
|
||||
{
|
||||
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
||||
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
||||
struct kvm_segment tmp = *save;
|
||||
|
||||
if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
|
||||
vmcs_write16(sf->selector, save->selector);
|
||||
vmcs_writel(sf->base, save->base);
|
||||
vmcs_write32(sf->limit, save->limit);
|
||||
vmcs_write32(sf->ar_bytes, save->ar);
|
||||
} else {
|
||||
u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
|
||||
<< AR_DPL_SHIFT;
|
||||
vmcs_write32(sf->ar_bytes, 0x93 | dpl);
|
||||
if (!(vmcs_readl(sf->base) == tmp.base && tmp.s)) {
|
||||
tmp.base = vmcs_readl(sf->base);
|
||||
tmp.selector = vmcs_read16(sf->selector);
|
||||
tmp.s = 1;
|
||||
}
|
||||
vmx_set_segment(vcpu, &tmp, seg);
|
||||
}
|
||||
|
||||
static void enter_pmode(struct kvm_vcpu *vcpu)
|
||||
@@ -2724,10 +2712,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
|
||||
|
||||
vmx_segment_cache_clear(vmx);
|
||||
|
||||
vmcs_write16(GUEST_TR_SELECTOR, vmx->rmode.tr.selector);
|
||||
vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
|
||||
vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
|
||||
vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
|
||||
vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
|
||||
|
||||
flags = vmcs_readl(GUEST_RFLAGS);
|
||||
flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
|
||||
@@ -2742,10 +2727,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
|
||||
if (emulate_invalid_guest_state)
|
||||
return;
|
||||
|
||||
fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
|
||||
fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
|
||||
fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
|
||||
fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
|
||||
fix_pmode_dataseg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
|
||||
fix_pmode_dataseg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
|
||||
fix_pmode_dataseg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
|
||||
fix_pmode_dataseg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
|
||||
|
||||
vmx_segment_cache_clear(vmx);
|
||||
|
||||
@@ -2773,14 +2758,10 @@ static gva_t rmode_tss_base(struct kvm *kvm)
|
||||
return kvm->arch.tss_addr;
|
||||
}
|
||||
|
||||
static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
|
||||
static void fix_rmode_seg(int seg, struct kvm_segment *save)
|
||||
{
|
||||
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
||||
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
||||
|
||||
save->selector = vmcs_read16(sf->selector);
|
||||
save->base = vmcs_readl(sf->base);
|
||||
save->limit = vmcs_read32(sf->limit);
|
||||
save->ar = vmcs_read32(sf->ar_bytes);
|
||||
vmcs_write16(sf->selector, save->base >> 4);
|
||||
vmcs_write32(sf->base, save->base & 0xffff0);
|
||||
vmcs_write32(sf->limit, 0xffff);
|
||||
@@ -2800,9 +2781,16 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
|
||||
if (enable_unrestricted_guest)
|
||||
return;
|
||||
|
||||
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
|
||||
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
|
||||
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
|
||||
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
|
||||
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
|
||||
|
||||
vmx->emulation_required = 1;
|
||||
vmx->rmode.vm86_active = 1;
|
||||
|
||||
|
||||
/*
|
||||
* Very old userspace does not call KVM_SET_TSS_ADDR before entering
|
||||
* vcpu. Call it here with phys address pointing 16M below 4G.
|
||||
@@ -2817,14 +2805,8 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
|
||||
|
||||
vmx_segment_cache_clear(vmx);
|
||||
|
||||
vmx->rmode.tr.selector = vmcs_read16(GUEST_TR_SELECTOR);
|
||||
vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
|
||||
vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
|
||||
|
||||
vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
|
||||
vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
|
||||
|
||||
vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
|
||||
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
|
||||
|
||||
flags = vmcs_readl(GUEST_RFLAGS);
|
||||
@@ -3117,35 +3099,24 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
|
||||
struct kvm_segment *var, int seg)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct kvm_save_segment *save;
|
||||
u32 ar;
|
||||
|
||||
if (vmx->rmode.vm86_active
|
||||
&& (seg == VCPU_SREG_TR || seg == VCPU_SREG_ES
|
||||
|| seg == VCPU_SREG_DS || seg == VCPU_SREG_FS
|
||||
|| seg == VCPU_SREG_GS)
|
||||
&& !emulate_invalid_guest_state) {
|
||||
switch (seg) {
|
||||
case VCPU_SREG_TR: save = &vmx->rmode.tr; break;
|
||||
case VCPU_SREG_ES: save = &vmx->rmode.es; break;
|
||||
case VCPU_SREG_DS: save = &vmx->rmode.ds; break;
|
||||
case VCPU_SREG_FS: save = &vmx->rmode.fs; break;
|
||||
case VCPU_SREG_GS: save = &vmx->rmode.gs; break;
|
||||
default: BUG();
|
||||
}
|
||||
var->selector = save->selector;
|
||||
var->base = save->base;
|
||||
var->limit = save->limit;
|
||||
ar = save->ar;
|
||||
|| seg == VCPU_SREG_GS)) {
|
||||
*var = vmx->rmode.segs[seg];
|
||||
if (seg == VCPU_SREG_TR
|
||||
|| var->selector == vmx_read_guest_seg_selector(vmx, seg))
|
||||
goto use_saved_rmode_seg;
|
||||
return;
|
||||
var->base = vmx_read_guest_seg_base(vmx, seg);
|
||||
var->selector = vmx_read_guest_seg_selector(vmx, seg);
|
||||
return;
|
||||
}
|
||||
var->base = vmx_read_guest_seg_base(vmx, seg);
|
||||
var->limit = vmx_read_guest_seg_limit(vmx, seg);
|
||||
var->selector = vmx_read_guest_seg_selector(vmx, seg);
|
||||
ar = vmx_read_guest_seg_ar(vmx, seg);
|
||||
use_saved_rmode_seg:
|
||||
if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
|
||||
ar = 0;
|
||||
var->type = ar & 15;
|
||||
@@ -3227,23 +3198,21 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
|
||||
struct kvm_segment *var, int seg)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
||||
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
||||
u32 ar;
|
||||
|
||||
vmx_segment_cache_clear(vmx);
|
||||
|
||||
if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
|
||||
vmcs_write16(sf->selector, var->selector);
|
||||
vmx->rmode.tr.selector = var->selector;
|
||||
vmx->rmode.tr.base = var->base;
|
||||
vmx->rmode.tr.limit = var->limit;
|
||||
vmx->rmode.tr.ar = vmx_segment_access_rights(var);
|
||||
vmx->rmode.segs[VCPU_SREG_TR] = *var;
|
||||
return;
|
||||
}
|
||||
vmcs_writel(sf->base, var->base);
|
||||
vmcs_write32(sf->limit, var->limit);
|
||||
vmcs_write16(sf->selector, var->selector);
|
||||
if (vmx->rmode.vm86_active && var->s) {
|
||||
vmx->rmode.segs[seg] = *var;
|
||||
/*
|
||||
* Hack real-mode segments into vm86 compatibility.
|
||||
*/
|
||||
@@ -3258,7 +3227,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
|
||||
* qemu binaries.
|
||||
* IA32 arch specifies that at the time of processor reset the
|
||||
* "Accessed" bit in the AR field of segment registers is 1. And qemu
|
||||
* is setting it to 0 in the usedland code. This causes invalid guest
|
||||
* is setting it to 0 in the userland code. This causes invalid guest
|
||||
* state vmexit when "unrestricted guest" mode is turned on.
|
||||
* Fix for this setup issue in cpu_reset is being pushed in the qemu
|
||||
* tree. Newer qemu binaries with that qemu fix would not need this
|
||||
@@ -3288,16 +3257,10 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
|
||||
vmcs_readl(GUEST_CS_BASE) >> 4);
|
||||
break;
|
||||
case VCPU_SREG_ES:
|
||||
fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
|
||||
break;
|
||||
case VCPU_SREG_DS:
|
||||
fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
|
||||
break;
|
||||
case VCPU_SREG_GS:
|
||||
fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
|
||||
break;
|
||||
case VCPU_SREG_FS:
|
||||
fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
|
||||
fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
|
||||
break;
|
||||
case VCPU_SREG_SS:
|
||||
vmcs_write16(GUEST_SS_SELECTOR,
|
||||
@@ -3351,9 +3314,9 @@ static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
|
||||
|
||||
if (var.base != (var.selector << 4))
|
||||
return false;
|
||||
if (var.limit != 0xffff)
|
||||
if (var.limit < 0xffff)
|
||||
return false;
|
||||
if (ar != 0xf3)
|
||||
if (((ar | (3 << AR_DPL_SHIFT)) & ~(AR_G_MASK | AR_DB_MASK)) != 0xf3)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@@ -3605,7 +3568,7 @@ out:
|
||||
|
||||
static void seg_setup(int seg)
|
||||
{
|
||||
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
||||
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
||||
unsigned int ar;
|
||||
|
||||
vmcs_write16(sf->selector, 0);
|
||||
@@ -3770,8 +3733,7 @@ static void vmx_set_constant_host_state(void)
|
||||
native_store_idt(&dt);
|
||||
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
|
||||
|
||||
asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
|
||||
vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
|
||||
vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
|
||||
|
||||
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
|
||||
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
|
||||
@@ -4005,8 +3967,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
kvm_rip_write(vcpu, 0);
|
||||
kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
|
||||
|
||||
vmcs_writel(GUEST_DR7, 0x400);
|
||||
|
||||
vmcs_writel(GUEST_GDTR_BASE, 0);
|
||||
vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
|
||||
|
||||
@@ -4456,7 +4416,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
|
||||
hypercall[2] = 0xc1;
|
||||
}
|
||||
|
||||
/* called to set cr0 as approriate for a mov-to-cr0 exit. */
|
||||
/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
|
||||
static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
{
|
||||
if (to_vmx(vcpu)->nested.vmxon &&
|
||||
@@ -5701,7 +5661,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
|
||||
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
||||
* to be done to userspace and return 0.
|
||||
*/
|
||||
static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
||||
static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
||||
[EXIT_REASON_EXCEPTION_NMI] = handle_exception,
|
||||
[EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
|
||||
[EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
|
||||
@@ -6229,17 +6189,10 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
||||
msrs[i].host);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define R "r"
|
||||
#define Q "q"
|
||||
#else
|
||||
#define R "e"
|
||||
#define Q "l"
|
||||
#endif
|
||||
|
||||
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
unsigned long debugctlmsr;
|
||||
|
||||
if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
@@ -6279,34 +6232,35 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vmx_set_interrupt_shadow(vcpu, 0);
|
||||
|
||||
atomic_switch_perf_msrs(vmx);
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
||||
vmx->__launched = vmx->loaded_vmcs->launched;
|
||||
asm(
|
||||
/* Store host registers */
|
||||
"push %%"R"dx; push %%"R"bp;"
|
||||
"push %%"R"cx \n\t" /* placeholder for guest rcx */
|
||||
"push %%"R"cx \n\t"
|
||||
"cmp %%"R"sp, %c[host_rsp](%0) \n\t"
|
||||
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
|
||||
"push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
|
||||
"push %%" _ASM_CX " \n\t"
|
||||
"cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
|
||||
"je 1f \n\t"
|
||||
"mov %%"R"sp, %c[host_rsp](%0) \n\t"
|
||||
"mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
|
||||
__ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
|
||||
"1: \n\t"
|
||||
/* Reload cr2 if changed */
|
||||
"mov %c[cr2](%0), %%"R"ax \n\t"
|
||||
"mov %%cr2, %%"R"dx \n\t"
|
||||
"cmp %%"R"ax, %%"R"dx \n\t"
|
||||
"mov %c[cr2](%0), %%" _ASM_AX " \n\t"
|
||||
"mov %%cr2, %%" _ASM_DX " \n\t"
|
||||
"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
|
||||
"je 2f \n\t"
|
||||
"mov %%"R"ax, %%cr2 \n\t"
|
||||
"mov %%" _ASM_AX", %%cr2 \n\t"
|
||||
"2: \n\t"
|
||||
/* Check if vmlaunch of vmresume is needed */
|
||||
"cmpl $0, %c[launched](%0) \n\t"
|
||||
/* Load guest registers. Don't clobber flags. */
|
||||
"mov %c[rax](%0), %%"R"ax \n\t"
|
||||
"mov %c[rbx](%0), %%"R"bx \n\t"
|
||||
"mov %c[rdx](%0), %%"R"dx \n\t"
|
||||
"mov %c[rsi](%0), %%"R"si \n\t"
|
||||
"mov %c[rdi](%0), %%"R"di \n\t"
|
||||
"mov %c[rbp](%0), %%"R"bp \n\t"
|
||||
"mov %c[rax](%0), %%" _ASM_AX " \n\t"
|
||||
"mov %c[rbx](%0), %%" _ASM_BX " \n\t"
|
||||
"mov %c[rdx](%0), %%" _ASM_DX " \n\t"
|
||||
"mov %c[rsi](%0), %%" _ASM_SI " \n\t"
|
||||
"mov %c[rdi](%0), %%" _ASM_DI " \n\t"
|
||||
"mov %c[rbp](%0), %%" _ASM_BP " \n\t"
|
||||
#ifdef CONFIG_X86_64
|
||||
"mov %c[r8](%0), %%r8 \n\t"
|
||||
"mov %c[r9](%0), %%r9 \n\t"
|
||||
@@ -6317,24 +6271,24 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
"mov %c[r14](%0), %%r14 \n\t"
|
||||
"mov %c[r15](%0), %%r15 \n\t"
|
||||
#endif
|
||||
"mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
|
||||
"mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
|
||||
|
||||
/* Enter guest mode */
|
||||
"jne .Llaunched \n\t"
|
||||
"jne 1f \n\t"
|
||||
__ex(ASM_VMX_VMLAUNCH) "\n\t"
|
||||
"jmp .Lkvm_vmx_return \n\t"
|
||||
".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
|
||||
".Lkvm_vmx_return: "
|
||||
"jmp 2f \n\t"
|
||||
"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
|
||||
"2: "
|
||||
/* Save guest registers, load host registers, keep flags */
|
||||
"mov %0, %c[wordsize](%%"R"sp) \n\t"
|
||||
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
|
||||
"pop %0 \n\t"
|
||||
"mov %%"R"ax, %c[rax](%0) \n\t"
|
||||
"mov %%"R"bx, %c[rbx](%0) \n\t"
|
||||
"pop"Q" %c[rcx](%0) \n\t"
|
||||
"mov %%"R"dx, %c[rdx](%0) \n\t"
|
||||
"mov %%"R"si, %c[rsi](%0) \n\t"
|
||||
"mov %%"R"di, %c[rdi](%0) \n\t"
|
||||
"mov %%"R"bp, %c[rbp](%0) \n\t"
|
||||
"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
|
||||
"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
|
||||
__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
|
||||
"mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
|
||||
"mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
|
||||
"mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
|
||||
"mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
|
||||
#ifdef CONFIG_X86_64
|
||||
"mov %%r8, %c[r8](%0) \n\t"
|
||||
"mov %%r9, %c[r9](%0) \n\t"
|
||||
@@ -6345,11 +6299,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
"mov %%r14, %c[r14](%0) \n\t"
|
||||
"mov %%r15, %c[r15](%0) \n\t"
|
||||
#endif
|
||||
"mov %%cr2, %%"R"ax \n\t"
|
||||
"mov %%"R"ax, %c[cr2](%0) \n\t"
|
||||
"mov %%cr2, %%" _ASM_AX " \n\t"
|
||||
"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
|
||||
|
||||
"pop %%"R"bp; pop %%"R"dx \n\t"
|
||||
"pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
|
||||
"setbe %c[fail](%0) \n\t"
|
||||
".pushsection .rodata \n\t"
|
||||
".global vmx_return \n\t"
|
||||
"vmx_return: " _ASM_PTR " 2b \n\t"
|
||||
".popsection"
|
||||
: : "c"(vmx), "d"((unsigned long)HOST_RSP),
|
||||
[launched]"i"(offsetof(struct vcpu_vmx, __launched)),
|
||||
[fail]"i"(offsetof(struct vcpu_vmx, fail)),
|
||||
@@ -6374,12 +6332,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
|
||||
[wordsize]"i"(sizeof(ulong))
|
||||
: "cc", "memory"
|
||||
, R"ax", R"bx", R"di", R"si"
|
||||
#ifdef CONFIG_X86_64
|
||||
, "rax", "rbx", "rdi", "rsi"
|
||||
, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
|
||||
#else
|
||||
, "eax", "ebx", "edi", "esi"
|
||||
#endif
|
||||
);
|
||||
|
||||
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
|
||||
if (debugctlmsr)
|
||||
update_debugctlmsr(debugctlmsr);
|
||||
|
||||
#ifndef CONFIG_X86_64
|
||||
/*
|
||||
* The sysexit path does not restore ds/es, so we must set them to
|
||||
@@ -6424,9 +6388,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vmx_complete_interrupts(vmx);
|
||||
}
|
||||
|
||||
#undef R
|
||||
#undef Q
|
||||
|
||||
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
@@ -7281,7 +7242,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
||||
.vcpu_load = vmx_vcpu_load,
|
||||
.vcpu_put = vmx_vcpu_put,
|
||||
|
||||
.set_guest_debug = set_guest_debug,
|
||||
.update_db_bp_intercept = update_exception_bitmap,
|
||||
.get_msr = vmx_get_msr,
|
||||
.set_msr = vmx_set_msr,
|
||||
.get_segment_base = vmx_get_segment_base,
|
||||
|
Reference in New Issue
Block a user