Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "Small update for KVM: ARM: - lazy context-switching of FPSIMD registers on arm64 - "split" regions for vGIC redistributor s390: - cleanups for nested - clock handling - crypto - storage keys - control register bits x86: - many bugfixes - implement more Hyper-V super powers - implement lapic_timer_advance_ns even when the LAPIC timer is emulated using the processor's VMX preemption timer. - two security-related bugfixes at the top of the branch" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (79 commits) kvm: fix typo in flag name kvm: x86: use correct privilege level for sgdt/sidt/fxsave/fxrstor access KVM: x86: pass kvm_vcpu to kvm_read_guest_virt and kvm_write_guest_virt_system KVM: x86: introduce linear_{read,write}_system kvm: nVMX: Enforce cpl=0 for VMX instructions kvm: nVMX: Add support for "VMWRITE to any supported field" kvm: nVMX: Restrict VMX capability MSR changes KVM: VMX: Optimize tscdeadline timer latency KVM: docs: nVMX: Remove known limitations as they do not exist now KVM: docs: mmu: KVM support exposing SLAT to guests kvm: no need to check return value of debugfs_create functions kvm: Make VM ioctl do valloc for some archs kvm: Change return type to vm_fault_t KVM: docs: mmu: Fix link to NPT presentation from KVM Forum 2008 kvm: x86: Amend the KVM_GET_SUPPORTED_CPUID API documentation KVM: x86: hyperv: declare KVM_CAP_HYPERV_TLBFLUSH capability KVM: x86: hyperv: simplistic HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}_EX implementation KVM: x86: hyperv: simplistic HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE} implementation KVM: introduce kvm_make_vcpus_request_mask() API KVM: x86: hyperv: do rep check for each hypercall separately ...
This commit is contained in:
@@ -153,7 +153,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (guestdbg_sstep_enabled(vcpu)) {
|
||||
/* disable timer (clock-comparator) interrupts */
|
||||
vcpu->arch.sie_block->gcr[0] &= ~0x800ul;
|
||||
vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK;
|
||||
vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
|
||||
vcpu->arch.sie_block->gcr[10] = 0;
|
||||
vcpu->arch.sie_block->gcr[11] = -1UL;
|
||||
|
@@ -159,7 +159,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
|
||||
static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (psw_extint_disabled(vcpu) ||
|
||||
!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
|
||||
!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
|
||||
return 0;
|
||||
if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
|
||||
/* No timer interrupts when single stepping */
|
||||
@@ -172,7 +172,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu)
|
||||
const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
||||
const u64 ckc = vcpu->arch.sie_block->ckc;
|
||||
|
||||
if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
||||
if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
|
||||
if ((s64)ckc >= (s64)now)
|
||||
return 0;
|
||||
} else if (ckc >= now) {
|
||||
@@ -184,7 +184,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu)
|
||||
static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !psw_extint_disabled(vcpu) &&
|
||||
(vcpu->arch.sie_block->gcr[0] & 0x400ul);
|
||||
(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
|
||||
}
|
||||
|
||||
static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
|
||||
@@ -285,15 +285,15 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
|
||||
active_mask &= ~IRQ_PEND_IO_MASK;
|
||||
else
|
||||
active_mask = disable_iscs(vcpu, active_mask);
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
|
||||
__clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
|
||||
__clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
|
||||
__clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
|
||||
__clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
|
||||
__clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
|
||||
if (psw_mchk_disabled(vcpu))
|
||||
active_mask &= ~IRQ_PEND_MCHK_MASK;
|
||||
@@ -1042,7 +1042,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
|
||||
/* external call pending and deliverable */
|
||||
if (kvm_s390_ext_call_pending(vcpu) &&
|
||||
!psw_extint_disabled(vcpu) &&
|
||||
(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
|
||||
(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
|
||||
return 1;
|
||||
|
||||
if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
|
||||
@@ -1062,7 +1062,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
|
||||
u64 cputm, sltime = 0;
|
||||
|
||||
if (ckc_interrupts_enabled(vcpu)) {
|
||||
if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
||||
if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
|
||||
if ((s64)now < (s64)ckc)
|
||||
sltime = tod_to_ns((s64)ckc - (s64)now);
|
||||
} else if (now < ckc) {
|
||||
|
@@ -791,11 +791,21 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
|
||||
|
||||
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
|
||||
|
||||
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
kvm_s390_vcpu_block_all(kvm);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_s390_vcpu_crypto_setup(vcpu);
|
||||
|
||||
kvm_s390_vcpu_unblock_all(kvm);
|
||||
}
|
||||
|
||||
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
{
|
||||
if (!test_kvm_facility(kvm, 76))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -832,10 +842,7 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
kvm_s390_vcpu_crypto_setup(vcpu);
|
||||
exit_sie(vcpu);
|
||||
}
|
||||
kvm_s390_vcpu_crypto_reset_all(kvm);
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 0;
|
||||
}
|
||||
@@ -1033,8 +1040,8 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
|
||||
struct kvm_s390_vm_tod_clock *gtod)
|
||||
static void kvm_s390_get_tod_clock(struct kvm *kvm,
|
||||
struct kvm_s390_vm_tod_clock *gtod)
|
||||
{
|
||||
struct kvm_s390_tod_clock_ext htod;
|
||||
|
||||
@@ -1043,10 +1050,12 @@ static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
|
||||
get_tod_clock_ext((char *)&htod);
|
||||
|
||||
gtod->tod = htod.tod + kvm->arch.epoch;
|
||||
gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
|
||||
|
||||
if (gtod->tod < htod.tod)
|
||||
gtod->epoch_idx += 1;
|
||||
gtod->epoch_idx = 0;
|
||||
if (test_kvm_facility(kvm, 139)) {
|
||||
gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
|
||||
if (gtod->tod < htod.tod)
|
||||
gtod->epoch_idx += 1;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
@@ -1056,12 +1065,7 @@ static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
struct kvm_s390_vm_tod_clock gtod;
|
||||
|
||||
memset(>od, 0, sizeof(gtod));
|
||||
|
||||
if (test_kvm_facility(kvm, 139))
|
||||
kvm_s390_get_tod_clock_ext(kvm, >od);
|
||||
else
|
||||
gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
|
||||
|
||||
kvm_s390_get_tod_clock(kvm, >od);
|
||||
if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
|
||||
return -EFAULT;
|
||||
|
||||
@@ -1493,7 +1497,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||
return -EINVAL;
|
||||
|
||||
/* Is this guest using storage keys? */
|
||||
if (!mm_use_skey(current->mm))
|
||||
if (!mm_uses_skeys(current->mm))
|
||||
return KVM_S390_GET_SKEYS_NONE;
|
||||
|
||||
/* Enforce sane limit on memory allocation */
|
||||
@@ -1982,10 +1986,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
rc = -ENOMEM;
|
||||
|
||||
kvm->arch.use_esca = 0; /* start with basic SCA */
|
||||
if (!sclp.has_64bscao)
|
||||
alloc_flags |= GFP_DMA;
|
||||
rwlock_init(&kvm->arch.sca_lock);
|
||||
/* start with basic SCA */
|
||||
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
|
||||
if (!kvm->arch.sca)
|
||||
goto out_err;
|
||||
@@ -2036,8 +2040,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
kvm_s390_crypto_init(kvm);
|
||||
|
||||
mutex_init(&kvm->arch.float_int.ais_lock);
|
||||
kvm->arch.float_int.simm = 0;
|
||||
kvm->arch.float_int.nimm = 0;
|
||||
spin_lock_init(&kvm->arch.float_int.lock);
|
||||
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
||||
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
||||
@@ -2063,11 +2065,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
kvm->arch.gmap->pfault_enabled = 0;
|
||||
}
|
||||
|
||||
kvm->arch.css_support = 0;
|
||||
kvm->arch.use_irqchip = 0;
|
||||
kvm->arch.use_pfmfi = sclp.has_pfmfi;
|
||||
kvm->arch.epoch = 0;
|
||||
|
||||
kvm->arch.use_skf = sclp.has_skey;
|
||||
spin_lock_init(&kvm->arch.start_stop_lock);
|
||||
kvm_s390_vsie_init(kvm);
|
||||
kvm_s390_gisa_init(kvm);
|
||||
@@ -2433,8 +2432,12 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.sie_block->ckc = 0UL;
|
||||
vcpu->arch.sie_block->todpr = 0;
|
||||
memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
|
||||
vcpu->arch.sie_block->gcr[0] = 0xE0UL;
|
||||
vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
|
||||
vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
|
||||
CR0_INTERRUPT_KEY_SUBMASK |
|
||||
CR0_MEASUREMENT_ALERT_SUBMASK;
|
||||
vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
|
||||
CR14_UNUSED_33 |
|
||||
CR14_EXTERNAL_DAMAGE_SUBMASK;
|
||||
/* make sure the new fpc will be lazily loaded */
|
||||
save_fpu_regs();
|
||||
current->thread.fpu.fpc = 0;
|
||||
@@ -3192,7 +3195,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
if (kvm_s390_vcpu_has_irq(vcpu, 0))
|
||||
return 0;
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
|
||||
if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
|
||||
return 0;
|
||||
if (!vcpu->arch.gmap->pfault_enabled)
|
||||
return 0;
|
||||
@@ -3990,7 +3993,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
|
||||
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
|
||||
{
|
||||
#ifdef CONFIG_KVM_S390_UCONTROL
|
||||
if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
|
||||
|
@@ -410,4 +410,17 @@ static inline int kvm_s390_use_sca_entries(void)
|
||||
}
|
||||
void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
|
||||
struct mcck_volatile_info *mcck_info);
|
||||
|
||||
/**
|
||||
* kvm_s390_vcpu_crypto_reset_all
|
||||
*
|
||||
* Reset the crypto attributes for each vcpu. This can be done while the vcpus
|
||||
* are running as each vcpu will be removed from SIE before resetting the crypt
|
||||
* attributes and restored to SIE afterward.
|
||||
*
|
||||
* Note: The kvm->lock must be held while calling this function
|
||||
*
|
||||
* @kvm: the KVM guest
|
||||
*/
|
||||
void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
|
||||
#endif
|
||||
|
@@ -204,24 +204,28 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc;
|
||||
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
|
||||
|
||||
trace_kvm_s390_skey_related_inst(vcpu);
|
||||
if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
|
||||
/* Already enabled? */
|
||||
if (vcpu->kvm->arch.use_skf &&
|
||||
!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
|
||||
!kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
|
||||
return rc;
|
||||
return 0;
|
||||
|
||||
rc = s390_enable_skey();
|
||||
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
|
||||
if (!rc) {
|
||||
if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
|
||||
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
|
||||
else
|
||||
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
|
||||
ICTL_RRBE);
|
||||
}
|
||||
return rc;
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
|
||||
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
|
||||
if (!vcpu->kvm->arch.use_skf)
|
||||
sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||
else
|
||||
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int try_handle_skey(struct kvm_vcpu *vcpu)
|
||||
@@ -231,7 +235,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
|
||||
rc = kvm_s390_skey_check_enable(vcpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (sclp.has_skey) {
|
||||
if (vcpu->kvm->arch.use_skf) {
|
||||
/* with storage-key facility, SIE interprets it for us */
|
||||
kvm_s390_retry_instr(vcpu);
|
||||
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
|
||||
|
@@ -557,7 +557,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
|
||||
gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
|
||||
if (gpa) {
|
||||
if (!(gpa & ~0x1fffUL))
|
||||
if (gpa < 2 * PAGE_SIZE)
|
||||
rc = set_validity_icpt(scb_s, 0x0038U);
|
||||
else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
|
||||
rc = set_validity_icpt(scb_s, 0x0011U);
|
||||
@@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
|
||||
gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
|
||||
if (gpa && (scb_s->ecb & ECB_TE)) {
|
||||
if (!(gpa & ~0x1fffUL)) {
|
||||
if (gpa < 2 * PAGE_SIZE) {
|
||||
rc = set_validity_icpt(scb_s, 0x0080U);
|
||||
goto unpin;
|
||||
}
|
||||
@@ -594,7 +594,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
|
||||
gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
|
||||
if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
|
||||
if (!(gpa & ~0x1fffUL)) {
|
||||
if (gpa < 2 * PAGE_SIZE) {
|
||||
rc = set_validity_icpt(scb_s, 0x1310U);
|
||||
goto unpin;
|
||||
}
|
||||
@@ -613,7 +613,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
|
||||
gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
|
||||
if (gpa && (scb_s->ecb3 & ECB3_RI)) {
|
||||
if (!(gpa & ~0x1fffUL)) {
|
||||
if (gpa < 2 * PAGE_SIZE) {
|
||||
rc = set_validity_icpt(scb_s, 0x0043U);
|
||||
goto unpin;
|
||||
}
|
||||
@@ -632,7 +632,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
|
||||
gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
|
||||
sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
|
||||
if (!gpa || !(gpa & ~0x1fffUL)) {
|
||||
if (!gpa || gpa < 2 * PAGE_SIZE) {
|
||||
rc = set_validity_icpt(scb_s, 0x10b0U);
|
||||
goto unpin;
|
||||
}
|
||||
|
Reference in New Issue
Block a user