Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull first batch of KVM updates from Paolo Bonzini: "The bulk of the changes here is for x86. And for once it's not for silicon that no one owns: these are really new features for everyone. Details: - ARM: several features are in progress but missed the 4.2 deadline. So here is just a smattering of bug fixes, plus enabling the VFIO integration. - s390: Some fixes/refactorings/optimizations, plus support for 2GB pages. - x86: * host and guest support for marking kvmclock as a stable scheduler clock. * support for write combining. * support for system management mode, needed for secure boot in guests. * a bunch of cleanups required for the above * support for virtualized performance counters on AMD * legacy PCI device assignment is deprecated and defaults to "n" in Kconfig; VFIO replaces it On top of this there are also bug fixes and eager FPU context loading for FPU-heavy guests. - Common code: Support for multiple address spaces; for now it is used only for x86 SMM but the s390 folks also have plans" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (124 commits) KVM: s390: clear floating interrupt bitmap and parameters KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs KVM: x86/vPMU: Implement AMD vPMU code for KVM KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch KVM: x86/vPMU: introduce kvm_pmu_msr_idx_to_pmc KVM: x86/vPMU: reorder PMU functions KVM: x86/vPMU: whitespace and stylistic adjustments in PMU code KVM: x86/vPMU: use the new macros to go between PMC, PMU and VCPU KVM: x86/vPMU: introduce pmu.h header KVM: x86/vPMU: rename a few PMU functions KVM: MTRR: do not map huge page for non-consistent range KVM: MTRR: simplify kvm_mtrr_get_guest_memory_type KVM: MTRR: introduce mtrr_for_each_mem_type KVM: MTRR: introduce fixed_mtrr_addr_* functions KVM: MTRR: sort variable MTRRs KVM: MTRR: introduce var_mtrr_range KVM: MTRR: introduce fixed_mtrr_segment table KVM: MTRR: improve kvm_mtrr_get_guest_memory_type KVM: MTRR: do not split 64 bits MSR content KVM: MTRR: clean up mtrr default type ...
This commit is contained in:
@@ -36,6 +36,10 @@
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
|
||||
#define KMSG_COMPONENT "kvm-s390"
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
#include "trace-s390.h"
|
||||
@@ -110,7 +114,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
/* upper facilities limit for kvm */
|
||||
unsigned long kvm_s390_fac_list_mask[] = {
|
||||
0xffe6fffbfcfdfc40UL,
|
||||
0x005c800000000000UL,
|
||||
0x005e800000000000UL,
|
||||
};
|
||||
|
||||
unsigned long kvm_s390_fac_list_mask_size(void)
|
||||
@@ -236,6 +240,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||
{
|
||||
int r;
|
||||
unsigned long n;
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
int is_dirty = 0;
|
||||
|
||||
@@ -245,7 +250,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||
if (log->slot >= KVM_USER_MEM_SLOTS)
|
||||
goto out;
|
||||
|
||||
memslot = id_to_memslot(kvm->memslots, log->slot);
|
||||
slots = kvm_memslots(kvm);
|
||||
memslot = id_to_memslot(slots, log->slot);
|
||||
r = -ENOENT;
|
||||
if (!memslot->dirty_bitmap)
|
||||
goto out;
|
||||
@@ -454,10 +460,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm->arch.epoch = gtod - host_tod;
|
||||
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
|
||||
kvm_s390_vcpu_block_all(kvm);
|
||||
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
|
||||
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
||||
exit_sie(cur_vcpu);
|
||||
}
|
||||
kvm_s390_vcpu_unblock_all(kvm);
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 0;
|
||||
}
|
||||
@@ -1311,8 +1317,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
|
||||
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
|
||||
CPUSTAT_SM |
|
||||
CPUSTAT_STOPPED |
|
||||
CPUSTAT_GED);
|
||||
CPUSTAT_STOPPED);
|
||||
|
||||
if (test_kvm_facility(vcpu->kvm, 78))
|
||||
atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
|
||||
else if (test_kvm_facility(vcpu->kvm, 8))
|
||||
atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
|
||||
|
||||
kvm_s390_vcpu_setup_model(vcpu);
|
||||
|
||||
vcpu->arch.sie_block->ecb = 6;
|
||||
@@ -1409,16 +1420,28 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||
return kvm_s390_vcpu_has_irq(vcpu, 0);
|
||||
}
|
||||
|
||||
void s390_vcpu_block(struct kvm_vcpu *vcpu)
|
||||
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
||||
exit_sie(vcpu);
|
||||
}
|
||||
|
||||
void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
|
||||
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
|
||||
}
|
||||
|
||||
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
||||
exit_sie(vcpu);
|
||||
}
|
||||
|
||||
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
|
||||
}
|
||||
|
||||
/*
|
||||
* Kick a guest cpu out of SIE and wait until SIE is not running.
|
||||
* If the CPU is not running (e.g. waiting as idle) the function will
|
||||
@@ -1430,11 +1453,11 @@ void exit_sie(struct kvm_vcpu *vcpu)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* Kick a guest cpu out of SIE and prevent SIE-reentry */
|
||||
void exit_sie_sync(struct kvm_vcpu *vcpu)
|
||||
/* Kick a guest cpu out of SIE to process a request synchronously */
|
||||
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
s390_vcpu_block(vcpu);
|
||||
exit_sie(vcpu);
|
||||
kvm_make_request(req, vcpu);
|
||||
kvm_s390_vcpu_request(vcpu);
|
||||
}
|
||||
|
||||
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
|
||||
@@ -1447,8 +1470,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
|
||||
/* match against both prefix pages */
|
||||
if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
|
||||
VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
|
||||
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
|
||||
exit_sie_sync(vcpu);
|
||||
kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1720,8 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu->requests)
|
||||
return 0;
|
||||
retry:
|
||||
s390_vcpu_unblock(vcpu);
|
||||
kvm_s390_vcpu_request_handled(vcpu);
|
||||
/*
|
||||
* We use MMU_RELOAD just to re-arm the ipte notifier for the
|
||||
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.
|
||||
@@ -1993,12 +2017,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
* As PF_VCPU will be used in fault handler, between
|
||||
* guest_enter and guest_exit should be no uaccess.
|
||||
*/
|
||||
preempt_disable();
|
||||
kvm_guest_enter();
|
||||
preempt_enable();
|
||||
local_irq_disable();
|
||||
__kvm_guest_enter();
|
||||
local_irq_enable();
|
||||
exit_reason = sie64a(vcpu->arch.sie_block,
|
||||
vcpu->run->s.regs.gprs);
|
||||
kvm_guest_exit();
|
||||
local_irq_disable();
|
||||
__kvm_guest_exit();
|
||||
local_irq_enable();
|
||||
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
rc = vcpu_post_run(vcpu, exit_reason);
|
||||
@@ -2068,7 +2094,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
|
||||
kvm_s390_vcpu_start(vcpu);
|
||||
} else if (is_vcpu_stopped(vcpu)) {
|
||||
pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
|
||||
pr_err_ratelimited("can't run stopped vcpu %d\n",
|
||||
vcpu->vcpu_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -2206,8 +2232,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
|
||||
kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
|
||||
exit_sie_sync(vcpu);
|
||||
kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
|
||||
}
|
||||
|
||||
static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
|
||||
@@ -2223,8 +2248,7 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
|
||||
static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
|
||||
kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
|
||||
exit_sie_sync(vcpu);
|
||||
kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
|
||||
}
|
||||
|
||||
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
|
||||
@@ -2563,7 +2587,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
/* Section: memory related */
|
||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
/* A few sanity checks. We can have memory slots which have to be
|
||||
@@ -2581,8 +2605,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
}
|
||||
|
||||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old,
|
||||
const struct kvm_memory_slot *new,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
int rc;
|
||||
@@ -2601,7 +2626,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
|
||||
mem->guest_phys_addr, mem->memory_size);
|
||||
if (rc)
|
||||
printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
|
||||
pr_warn("failed to commit memory region\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user