Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull first batch of KVM updates from Paolo Bonzini: "The bulk of the changes here is for x86. And for once it's not for silicon that no one owns: these are really new features for everyone. Details: - ARM: several features are in progress but missed the 4.2 deadline. So here is just a smattering of bug fixes, plus enabling the VFIO integration. - s390: Some fixes/refactorings/optimizations, plus support for 2GB pages. - x86: * host and guest support for marking kvmclock as a stable scheduler clock. * support for write combining. * support for system management mode, needed for secure boot in guests. * a bunch of cleanups required for the above * support for virtualized performance counters on AMD * legacy PCI device assignment is deprecated and defaults to "n" in Kconfig; VFIO replaces it On top of this there are also bug fixes and eager FPU context loading for FPU-heavy guests. - Common code: Support for multiple address spaces; for now it is used only for x86 SMM but the s390 folks also have plans" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (124 commits) KVM: s390: clear floating interrupt bitmap and parameters KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs KVM: x86/vPMU: Implement AMD vPMU code for KVM KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch KVM: x86/vPMU: introduce kvm_pmu_msr_idx_to_pmc KVM: x86/vPMU: reorder PMU functions KVM: x86/vPMU: whitespace and stylistic adjustments in PMU code KVM: x86/vPMU: use the new macros to go between PMC, PMU and VCPU KVM: x86/vPMU: introduce pmu.h header KVM: x86/vPMU: rename a few PMU functions KVM: MTRR: do not map huge page for non-consistent range KVM: MTRR: simplify kvm_mtrr_get_guest_memory_type KVM: MTRR: introduce mtrr_for_each_mem_type KVM: MTRR: introduce fixed_mtrr_addr_* functions KVM: MTRR: sort variable MTRRs KVM: MTRR: introduce var_mtrr_range KVM: MTRR: introduce fixed_mtrr_segment table KVM: MTRR: improve kvm_mtrr_get_guest_memory_type KVM: MTRR: do not split 64 bits MSR content KVM: MTRR: clean up mtrr default type ...
This commit is contained in:
@@ -184,23 +184,12 @@ struct kvm_mmu_memory_cache {
|
||||
void *objects[KVM_NR_MEM_OBJS];
|
||||
};
|
||||
|
||||
/*
|
||||
* kvm_mmu_page_role, below, is defined as:
|
||||
*
|
||||
* bits 0:3 - total guest paging levels (2-4, or zero for real mode)
|
||||
* bits 4:7 - page table level for this shadow (1-4)
|
||||
* bits 8:9 - page table quadrant for 2-level guests
|
||||
* bit 16 - direct mapping of virtual to physical mapping at gfn
|
||||
* used for real mode and two-dimensional paging
|
||||
* bits 17:19 - common access permissions for all ptes in this shadow page
|
||||
*/
|
||||
union kvm_mmu_page_role {
|
||||
unsigned word;
|
||||
struct {
|
||||
unsigned level:4;
|
||||
unsigned cr4_pae:1;
|
||||
unsigned quadrant:2;
|
||||
unsigned pad_for_nice_hex_output:6;
|
||||
unsigned direct:1;
|
||||
unsigned access:3;
|
||||
unsigned invalid:1;
|
||||
@@ -208,6 +197,15 @@ union kvm_mmu_page_role {
|
||||
unsigned cr0_wp:1;
|
||||
unsigned smep_andnot_wp:1;
|
||||
unsigned smap_andnot_wp:1;
|
||||
unsigned :8;
|
||||
|
||||
/*
|
||||
* This is left at the top of the word so that
|
||||
* kvm_memslots_for_spte_role can extract it with a
|
||||
* simple shift. While there is room, give it a whole
|
||||
* byte so it is also faster to load it from memory.
|
||||
*/
|
||||
unsigned smm:8;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -338,12 +336,28 @@ struct kvm_pmu {
|
||||
u64 reprogram_pmi;
|
||||
};
|
||||
|
||||
struct kvm_pmu_ops;
|
||||
|
||||
enum {
|
||||
KVM_DEBUGREG_BP_ENABLED = 1,
|
||||
KVM_DEBUGREG_WONT_EXIT = 2,
|
||||
KVM_DEBUGREG_RELOAD = 4,
|
||||
};
|
||||
|
||||
struct kvm_mtrr_range {
|
||||
u64 base;
|
||||
u64 mask;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
struct kvm_mtrr {
|
||||
struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
|
||||
mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
|
||||
u64 deftype;
|
||||
|
||||
struct list_head head;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
/*
|
||||
* rip and regs accesses must go through
|
||||
@@ -368,6 +382,7 @@ struct kvm_vcpu_arch {
|
||||
int32_t apic_arb_prio;
|
||||
int mp_state;
|
||||
u64 ia32_misc_enable_msr;
|
||||
u64 smbase;
|
||||
bool tpr_access_reporting;
|
||||
u64 ia32_xss;
|
||||
|
||||
@@ -471,8 +486,9 @@ struct kvm_vcpu_arch {
|
||||
atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
|
||||
unsigned nmi_pending; /* NMI queued after currently running handler */
|
||||
bool nmi_injected; /* Trying to inject an NMI this entry */
|
||||
bool smi_pending; /* SMI queued after currently running handler */
|
||||
|
||||
struct mtrr_state_type mtrr_state;
|
||||
struct kvm_mtrr mtrr_state;
|
||||
u64 pat;
|
||||
|
||||
unsigned switch_db_regs;
|
||||
@@ -637,6 +653,8 @@ struct kvm_arch {
|
||||
#endif
|
||||
|
||||
bool boot_vcpu_runs_old_kvmclock;
|
||||
|
||||
u64 disabled_quirks;
|
||||
};
|
||||
|
||||
struct kvm_vm_stat {
|
||||
@@ -689,12 +707,13 @@ struct msr_data {
|
||||
|
||||
struct kvm_lapic_irq {
|
||||
u32 vector;
|
||||
u32 delivery_mode;
|
||||
u32 dest_mode;
|
||||
u32 level;
|
||||
u32 trig_mode;
|
||||
u16 delivery_mode;
|
||||
u16 dest_mode;
|
||||
bool level;
|
||||
u16 trig_mode;
|
||||
u32 shorthand;
|
||||
u32 dest_id;
|
||||
bool msi_redir_hint;
|
||||
};
|
||||
|
||||
struct kvm_x86_ops {
|
||||
@@ -706,19 +725,20 @@ struct kvm_x86_ops {
|
||||
int (*hardware_setup)(void); /* __init */
|
||||
void (*hardware_unsetup)(void); /* __exit */
|
||||
bool (*cpu_has_accelerated_tpr)(void);
|
||||
bool (*cpu_has_high_real_mode_segbase)(void);
|
||||
void (*cpuid_update)(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Create, but do not attach this VCPU */
|
||||
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
|
||||
void (*vcpu_free)(struct kvm_vcpu *vcpu);
|
||||
void (*vcpu_reset)(struct kvm_vcpu *vcpu);
|
||||
void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
|
||||
|
||||
void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
|
||||
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
|
||||
void (*vcpu_put)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
|
||||
int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
|
||||
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
|
||||
void (*get_segment)(struct kvm_vcpu *vcpu,
|
||||
@@ -836,6 +856,8 @@ struct kvm_x86_ops {
|
||||
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
gfn_t offset, unsigned long mask);
|
||||
/* pmu operations of sub-arch */
|
||||
const struct kvm_pmu_ops *pmu_ops;
|
||||
};
|
||||
|
||||
struct kvm_arch_async_pf {
|
||||
@@ -871,7 +893,7 @@ void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
const struct kvm_memory_slot *memslot);
|
||||
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
|
||||
@@ -882,7 +904,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
gfn_t gfn_offset, unsigned long mask);
|
||||
void kvm_mmu_zap_all(struct kvm *kvm);
|
||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
|
||||
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
|
||||
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
|
||||
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
|
||||
|
||||
@@ -890,7 +912,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
|
||||
|
||||
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
const void *val, int bytes);
|
||||
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
|
||||
struct kvm_irq_mask_notifier {
|
||||
void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
|
||||
@@ -938,7 +959,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
|
||||
|
||||
void kvm_enable_efer_bits(u64);
|
||||
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
|
||||
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
|
||||
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
|
||||
struct x86_emulate_ctxt;
|
||||
@@ -967,7 +988,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
|
||||
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
|
||||
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
|
||||
|
||||
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
|
||||
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
||||
|
||||
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
|
||||
@@ -1110,6 +1131,14 @@ enum {
|
||||
#define HF_NMI_MASK (1 << 3)
|
||||
#define HF_IRET_MASK (1 << 4)
|
||||
#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
|
||||
#define HF_SMM_MASK (1 << 6)
|
||||
#define HF_SMM_INSIDE_NMI_MASK (1 << 7)
|
||||
|
||||
#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
|
||||
#define KVM_ADDRESS_SPACE_NUM 2
|
||||
|
||||
#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
|
||||
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
|
||||
|
||||
/*
|
||||
* Hardware virtualization extension instructions may fault if a
|
||||
@@ -1144,7 +1173,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
|
||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
|
||||
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
|
||||
void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
|
||||
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
|
||||
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
|
||||
unsigned long address);
|
||||
@@ -1168,16 +1197,9 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
|
||||
|
||||
int kvm_is_in_guest(void);
|
||||
|
||||
void kvm_pmu_init(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
|
||||
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
|
||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
|
||||
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc);
|
||||
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
|
||||
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
|
||||
void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
|
||||
int __x86_set_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem);
|
||||
int x86_set_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem);
|
||||
|
||||
#endif /* _ASM_X86_KVM_HOST_H */
|
||||
|
Reference in New Issue
Block a user