Merge tag 'kvm-4.9-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM updates from Radim Krčmář:
 "All architectures:
   - move `make kvmconfig` stubs from x86
   - use 64 bits for debugfs stats

  ARM:
   - Important fixes for not using an in-kernel irqchip
   - handle SError exceptions and present them to guests if appropriate
   - proxying of GICV access at EL2 if guest mappings are unsafe
   - GICv3 on AArch32 on ARMv8
   - preparations for GICv3 save/restore, including ABI docs
   - cleanups and a bit of optimizations

  MIPS:
   - A couple of fixes in preparation for supporting MIPS EVA host
     kernels
   - MIPS SMP host & TLB invalidation fixes

  PPC:
   - Fix the bug which caused guests to falsely report lockups
   - other minor fixes
   - a small optimization

  s390:
   - Lazy enablement of runtime instrumentation
   - up to 255 CPUs for nested guests
   - rework of machine check deliver
   - cleanups and fixes

  x86:
   - IOMMU part of AMD's AVIC for vmexit-less interrupt delivery
   - Hyper-V TSC page
   - per-vcpu tsc_offset in debugfs
   - accelerated INS/OUTS in nVMX
   - cleanups and fixes"

* tag 'kvm-4.9-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (140 commits)
  KVM: MIPS: Drop dubious EntryHi optimisation
  KVM: MIPS: Invalidate TLB by regenerating ASIDs
  KVM: MIPS: Split kernel/user ASID regeneration
  KVM: MIPS: Drop other CPU ASIDs on guest MMU changes
  KVM: arm/arm64: vgic: Don't flush/sync without a working vgic
  KVM: arm64: Require in-kernel irqchip for PMU support
  KVM: PPC: Book3s PR: Allow access to unprivileged MMCR2 register
  KVM: PPC: Book3S PR: Support 64kB page size on POWER8E and POWER8NVL
  KVM: PPC: Book3S: Remove duplicate setting of the B field in tlbie
  KVM: PPC: BookE: Fix a sanity check
  KVM: PPC: Book3S HV: Take out virtual core piggybacking code
  KVM: PPC: Book3S: Treat VTB as a per-subcore register, not per-thread
  ARM: gic-v3: Work around definition of gic_write_bpr1
  KVM: nVMX: Fix the NMI IDT-vectoring handling
  KVM: VMX: Enable MSR-BASED TPR shadow even if APICv is inactive
  KVM: nVMX: Fix reload apic access page warning
  kvmconfig: add virtio-gpu to config fragment
  config: move x86 kvm_guest.config to a common location
  arm64: KVM: Remove duplicating init code for setting VMID
  ARM: KVM: Support vgic-v3
  ...
This commit is contained in:
Linus Torvalds
2016-10-06 10:49:01 -07:00
127 changed files with 4541 additions and 1636 deletions

View File

@@ -495,6 +495,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
switch (code) {
case PGM_PROTECTION:
switch (prot) {
case PROT_TYPE_ALC:
tec->b60 = 1;
/* FALL THROUGH */
case PROT_TYPE_DAT:
tec->b61 = 1;
break;
default: /* LA and KEYC set b61 to 0, other params undefined */
return code;
}
/* FALL THROUGH */
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
case PGM_REGION_FIRST_TRANS:
@@ -504,8 +516,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
/*
* op_access_id only applies to MOVE_PAGE -> set bit 61
* exc_access_id has to be set to 0 for some instructions. Both
* cases have to be handled by the caller. We can always store
* exc_access_id, as it is undefined for non-ar cases.
* cases have to be handled by the caller.
*/
tec->addr = gva >> PAGE_SHIFT;
tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
@@ -516,25 +527,13 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
case PGM_ASTE_VALIDITY:
case PGM_ASTE_SEQUENCE:
case PGM_EXTENDED_AUTHORITY:
/*
* We can always store exc_access_id, as it is
* undefined for non-ar cases. It is undefined for
* most DAT protection exceptions.
*/
pgm->exc_access_id = ar;
break;
case PGM_PROTECTION:
switch (prot) {
case PROT_TYPE_ALC:
tec->b60 = 1;
/* FALL THROUGH */
case PROT_TYPE_DAT:
tec->b61 = 1;
tec->addr = gva >> PAGE_SHIFT;
tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
/* exc_access_id is undefined for most cases */
pgm->exc_access_id = ar;
break;
default: /* LA and KEYC set b61 to 0, other params undefined */
break;
}
break;
}
return code;
}

View File

@@ -206,7 +206,7 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
int ret = 0, nr_wp = 0, nr_bp = 0, i, size;
int ret = 0, nr_wp = 0, nr_bp = 0, i;
struct kvm_hw_breakpoint *bp_data = NULL;
struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL;
@@ -216,17 +216,10 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
return -EINVAL;
size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint);
bp_data = kmalloc(size, GFP_KERNEL);
if (!bp_data) {
ret = -ENOMEM;
goto error;
}
if (copy_from_user(bp_data, dbg->arch.hw_bp, size)) {
ret = -EFAULT;
goto error;
}
bp_data = memdup_user(dbg->arch.hw_bp,
sizeof(*bp_data) * dbg->arch.nr_hw_bp);
if (IS_ERR(bp_data))
return PTR_ERR(bp_data);
for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
switch (bp_data[i].type) {
@@ -241,17 +234,19 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
}
}
size = nr_wp * sizeof(struct kvm_hw_wp_info_arch);
if (size > 0) {
wp_info = kmalloc(size, GFP_KERNEL);
if (nr_wp > 0) {
wp_info = kmalloc_array(nr_wp,
sizeof(*wp_info),
GFP_KERNEL);
if (!wp_info) {
ret = -ENOMEM;
goto error;
}
}
size = nr_bp * sizeof(struct kvm_hw_bp_info_arch);
if (size > 0) {
bp_info = kmalloc(size, GFP_KERNEL);
if (nr_bp > 0) {
bp_info = kmalloc_array(nr_bp,
sizeof(*bp_info),
GFP_KERNEL);
if (!bp_info) {
ret = -ENOMEM;
goto error;
@@ -382,14 +377,20 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
}
#define PER_CODE_MASK (PER_EVENT_MASK >> 24)
#define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24)
#define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24)
#define PER_CODE_STORE (PER_EVENT_STORE >> 24)
#define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24)
#define per_bp_event(code) \
(code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH))
(code & (PER_CODE_IFETCH | PER_CODE_BRANCH))
#define per_write_wp_event(code) \
(code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL))
(code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
static int debug_exit_required(struct kvm_vcpu *vcpu)
{
u32 perc = (vcpu->arch.sie_block->perc << 24);
u8 perc = vcpu->arch.sie_block->perc;
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL;
@@ -444,7 +445,7 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
const u8 ilen = kvm_s390_get_ilen(vcpu);
struct kvm_s390_pgm_info pgm_info = {
.code = PGM_PER,
.per_code = PER_EVENT_IFETCH >> 24,
.per_code = PER_CODE_IFETCH,
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
};
@@ -458,33 +459,33 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
static void filter_guest_per_event(struct kvm_vcpu *vcpu)
{
u32 perc = vcpu->arch.sie_block->perc << 24;
const u8 perc = vcpu->arch.sie_block->perc;
u64 peraddr = vcpu->arch.sie_block->peraddr;
u64 addr = vcpu->arch.sie_block->gpsw.addr;
u64 cr9 = vcpu->arch.sie_block->gcr[9];
u64 cr10 = vcpu->arch.sie_block->gcr[10];
u64 cr11 = vcpu->arch.sie_block->gcr[11];
/* filter all events, demanded by the guest */
u32 guest_perc = perc & cr9 & PER_EVENT_MASK;
u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
if (!guest_per_enabled(vcpu))
guest_perc = 0;
/* filter "successful-branching" events */
if (guest_perc & PER_EVENT_BRANCH &&
if (guest_perc & PER_CODE_BRANCH &&
cr9 & PER_CONTROL_BRANCH_ADDRESS &&
!in_addr_range(addr, cr10, cr11))
guest_perc &= ~PER_EVENT_BRANCH;
guest_perc &= ~PER_CODE_BRANCH;
/* filter "instruction-fetching" events */
if (guest_perc & PER_EVENT_IFETCH &&
if (guest_perc & PER_CODE_IFETCH &&
!in_addr_range(peraddr, cr10, cr11))
guest_perc &= ~PER_EVENT_IFETCH;
guest_perc &= ~PER_CODE_IFETCH;
/* All other PER events will be given to the guest */
/* TODO: Check altered address/address space */
vcpu->arch.sie_block->perc = guest_perc >> 24;
vcpu->arch.sie_block->perc = guest_perc;
if (!guest_perc)
vcpu->arch.sie_block->iprcc &= ~PGM_PER;

View File

@@ -29,6 +29,7 @@ static const intercept_handler_t instruction_handlers[256] = {
[0x01] = kvm_s390_handle_01,
[0x82] = kvm_s390_handle_lpsw,
[0x83] = kvm_s390_handle_diag,
[0xaa] = kvm_s390_handle_aa,
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
[0xb6] = kvm_s390_handle_stctl,

View File

@@ -24,6 +24,8 @@
#include <asm/sclp.h>
#include <asm/isc.h>
#include <asm/gmap.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -40,6 +42,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
return 0;
BUG_ON(!kvm_s390_use_sca_entries());
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -68,6 +71,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{
int expect, rc;
BUG_ON(!kvm_s390_use_sca_entries());
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -109,6 +113,8 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc, expect;
if (!kvm_s390_use_sca_entries())
return;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
@@ -400,12 +406,78 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
static int __write_machine_check(struct kvm_vcpu *vcpu,
struct kvm_s390_mchk_info *mchk)
{
unsigned long ext_sa_addr;
freg_t fprs[NUM_FPRS];
union mci mci;
int rc;
mci.val = mchk->mcic;
/* take care of lazy register loading via vcpu load/put */
save_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs);
/* Extended save area */
rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr,
sizeof(unsigned long));
/* Only bits 0-53 are used for address formation */
ext_sa_addr &= ~0x3ffUL;
if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
512))
mci.vr = 0;
} else {
mci.vr = 0;
}
/* General interruption information */
rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
/* Register-save areas */
if (MACHINE_HAS_VX) {
convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
} else {
rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
vcpu->run->s.regs.fprs, 128);
}
rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128);
rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
(u32 __user *) __LC_FP_CREG_SAVE_AREA);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
(u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
(u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
(u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
&vcpu->run->s.regs.acrs, 64);
rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
&vcpu->arch.sie_block->gcr, 128);
/* Extended interruption information */
rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
(u32 __user *) __LC_EXT_DAMAGE_CODE);
rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
(u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
sizeof(mchk->fixed_logout));
return rc ? -EFAULT : 0;
}
static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_mchk_info mchk = {};
unsigned long adtl_status_addr;
int deliver = 0;
int rc = 0;
@@ -446,29 +518,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_MCHK,
mchk.cr14, mchk.mcic);
rc = kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_PREFIXED);
rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
&adtl_status_addr,
sizeof(unsigned long));
rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
adtl_status_addr);
rc |= put_guest_lc(vcpu, mchk.mcic,
(u64 __user *) __LC_MCCK_CODE);
rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
(u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
&mchk.fixed_logout,
sizeof(mchk.fixed_logout));
rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
&vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
&vcpu->arch.sie_block->gpsw,
sizeof(psw_t));
rc = __write_machine_check(vcpu, &mchk);
}
return rc ? -EFAULT : 0;
return rc;
}
static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)

View File

@@ -384,7 +384,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
r = KVM_S390_BSCA_CPU_SLOTS;
if (sclp.has_esca && sclp.has_64bscao)
if (!kvm_s390_use_sca_entries())
r = KVM_MAX_VCPUS;
else if (sclp.has_esca && sclp.has_64bscao)
r = KVM_S390_ESCA_CPU_SLOTS;
break;
case KVM_CAP_NR_MEMSLOTS:
@@ -1498,6 +1500,16 @@ out_err:
return rc;
}
bool kvm_arch_has_vcpu_debugfs(void)
{
return false;
}
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 3, "%s", "free cpu");
@@ -1561,6 +1573,8 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{
if (!kvm_s390_use_sca_entries())
return;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -1578,6 +1592,13 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu)
static void sca_add_vcpu(struct kvm_vcpu *vcpu)
{
if (!kvm_s390_use_sca_entries()) {
struct bsca_block *sca = vcpu->kvm->arch.sca;
/* we still need the basic sca for the ipte control */
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
}
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -1658,6 +1679,11 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
{
int rc;
if (!kvm_s390_use_sca_entries()) {
if (id < KVM_MAX_VCPUS)
return true;
return false;
}
if (id < KVM_S390_BSCA_CPU_SLOTS)
return true;
if (!sclp.has_esca || !sclp.has_64bscao)
@@ -1946,8 +1972,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->eca |= 1;
if (sclp.has_sigpif)
vcpu->arch.sie_block->eca |= 0x10000000U;
if (test_kvm_facility(vcpu->kvm, 64))
vcpu->arch.sie_block->ecb3 |= 0x01;
if (test_kvm_facility(vcpu->kvm, 129)) {
vcpu->arch.sie_block->eca |= 0x00020000;
vcpu->arch.sie_block->ecd |= 0x20000000;
@@ -2704,6 +2728,19 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
}
/*
* If userspace sets the riccb (e.g. after migration) to a valid state,
* we should enable RI here instead of doing the lazy enablement.
*/
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
test_kvm_facility(vcpu->kvm, 64)) {
struct runtime_instr_cb *riccb =
(struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
if (riccb->valid)
vcpu->arch.sie_block->ecb3 |= 0x01;
}
kvm_run->kvm_dirty_regs = 0;
}
@@ -2847,38 +2884,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr);
}
/*
* store additional status at address
*/
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
unsigned long gpa)
{
/* Only bits 0-53 are used for address formation */
if (!(gpa & ~0x3ff))
return 0;
return write_guest_abs(vcpu, gpa & ~0x3ff,
(void *)&vcpu->run->s.regs.vrs, 512);
}
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!test_kvm_facility(vcpu->kvm, 129))
return 0;
/*
* The guest VXRS are in the host VXRs due to the lazy
* copying in vcpu load/put. We can simply call save_fpu_regs()
* to save the current register state because we are in the
* middle of a load/put cycle.
*
* Let's update our copies before we save it into the save area.
*/
save_fpu_regs();
return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
}
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);

View File

@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <asm/facility.h>
#include <asm/processor.h>
#include <asm/sclp.h>
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
@@ -245,6 +246,7 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
/* implemented in priv.c */
int is_valid_psw(psw_t *psw);
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
@@ -273,10 +275,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu);
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
@@ -389,4 +388,13 @@ static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
return &sca->ipte_control;
}
static inline int kvm_s390_use_sca_entries(void)
{
/*
* Without SIGP interpretation, only SRS interpretation (if available)
* might use the entries. By not setting the entries and keeping them
* invalid, hardware will not access them but intercept.
*/
return sclp.has_sigpif;
}
#endif

View File

@@ -32,6 +32,24 @@
#include "kvm-s390.h"
#include "trace.h"
static int handle_ri(struct kvm_vcpu *vcpu)
{
if (test_kvm_facility(vcpu->kvm, 64)) {
vcpu->arch.sie_block->ecb3 |= 0x01;
kvm_s390_retry_instr(vcpu);
return 0;
} else
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
{
if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
return handle_ri(vcpu);
else
return -EOPNOTSUPP;
}
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
@@ -1093,6 +1111,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
static const intercept_handler_t eb_handlers[256] = {
[0x2f] = handle_lctlg,
[0x25] = handle_stctg,
[0x60] = handle_ri,
[0x61] = handle_ri,
[0x62] = handle_ri,
};
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)