Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "s390: - ioctl hardening - selftests ARM: - ITS translation cache - support for 512 vCPUs - various cleanups and bugfixes PPC: - various minor fixes and preparation x86: - bugfixes all over the place (posted interrupts, SVM, emulation corner cases, blocked INIT) - some IPI optimizations" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (75 commits) KVM: X86: Use IPI shorthands in kvm guest when support KVM: x86: Fix INIT signal handling in various CPU states KVM: VMX: Introduce exit reason for receiving INIT signal on guest-mode KVM: VMX: Stop the preemption timer during vCPU reset KVM: LAPIC: Micro optimize IPI latency kvm: Nested KVM MMUs need PAE root too KVM: x86: set ctxt->have_exception in x86_decode_insn() KVM: x86: always stop emulation on page fault KVM: nVMX: trace nested VM-Enter failures detected by H/W KVM: nVMX: add tracepoint for failed nested VM-Enter x86: KVM: svm: Fix a check in nested_svm_vmrun() KVM: x86: Return to userspace with internal error on unexpected exit reason KVM: x86: Add kvm_emulate_{rd,wr}msr() to consolidate VXM/SVM code KVM: x86: Refactor up kvm_{g,s}et_msr() to simplify callers doc: kvm: Fix return description of KVM_SET_MSRS KVM: X86: Tune PLE Window tracepoint KVM: VMX: Change ple_window type to unsigned int KVM: X86: Remove tailing newline for tracepoints KVM: X86: Trace vcpu_id for vmexit KVM: x86: Manually calculate reserved bits when loading PDPTRS ...
This commit is contained in:
@@ -718,7 +718,7 @@ struct kvm_vcpu_arch {
|
||||
|
||||
/* Cache MMIO info */
|
||||
u64 mmio_gva;
|
||||
unsigned access;
|
||||
unsigned mmio_access;
|
||||
gfn_t mmio_gfn;
|
||||
u64 mmio_gen;
|
||||
|
||||
@@ -1072,7 +1072,7 @@ struct kvm_x86_ops {
|
||||
|
||||
void (*run)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_exit)(struct kvm_vcpu *vcpu);
|
||||
void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
|
||||
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
|
||||
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
|
||||
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
|
||||
void (*patch_hypercall)(struct kvm_vcpu *vcpu,
|
||||
@@ -1211,6 +1211,8 @@ struct kvm_x86_ops {
|
||||
uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
|
||||
};
|
||||
|
||||
struct kvm_arch_async_pf {
|
||||
@@ -1328,8 +1330,10 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
|
||||
|
||||
void kvm_enable_efer_bits(u64);
|
||||
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
|
||||
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
|
||||
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
|
||||
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
|
||||
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct x86_emulate_ctxt;
|
||||
|
||||
@@ -1583,6 +1587,13 @@ bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
|
||||
void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm_lapic_irq *irq);
|
||||
|
||||
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
|
||||
{
|
||||
/* We can only post Fixed and LowPrio IRQs */
|
||||
return (irq->delivery_mode == dest_Fixed ||
|
||||
irq->delivery_mode == dest_LowestPrio);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_x86_ops->vcpu_blocking)
|
||||
|
Reference in New Issue
Block a user