Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "4.11 is going to be a relatively large release for KVM, with a little over 200 commits and noteworthy changes for most architectures. ARM: - GICv3 save/restore - cache flushing fixes - working MSI injection for GICv3 ITS - physical timer emulation MIPS: - various improvements under the hood - support for SMP guests - a large rewrite of MMU emulation. KVM MIPS can now use MMU notifiers to support copy-on-write, KSM, idle page tracking, swapping, ballooning and everything else. KVM_CAP_READONLY_MEM is also supported, so that writes to some memory regions can be treated as MMIO. The new MMU also paves the way for hardware virtualization support. PPC: - support for POWER9 using the radix-tree MMU for host and guest - resizable hashed page table - bugfixes. s390: - expose more features to the guest - more SIMD extensions - instruction execution protection - ESOP2 x86: - improved hashing in the MMU - faster PageLRU tracking for Intel CPUs without EPT A/D bits - some refactoring of nested VMX entry/exit code, preparing for live migration support of nested hypervisors - expose yet another AVX512 CPUID bit - host-to-guest PTP support - refactoring of interrupt injection, with some optimizations thrown in and some duct tape removed. - remove lazy FPU handling - optimizations of user-mode exits - optimizations of vcpu_is_preempted() for KVM guests generic: - alternative signaling mechanism that doesn't pound on tsk->sighand->siglock" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (195 commits) x86/kvm: Provide optimized version of vcpu_is_preempted() for x86-64 x86/paravirt: Change vcp_is_preempted() arg type to long KVM: VMX: use correct vmcs_read/write for guest segment selector/base x86/kvm/vmx: Defer TR reload after VM exit x86/asm/64: Drop __cacheline_aligned from struct x86_hw_tss x86/kvm/vmx: Simplify segment_base() x86/kvm/vmx: Get rid of segment_base() on 64-bit kernels x86/kvm/vmx: Don't fetch the TSS base from the GDT x86/asm: Define the kernel TSS limit in a macro kvm: fix page struct leak in handle_vmon KVM: PPC: Book3S HV: Disable HPT resizing on POWER9 for now KVM: Return an error code only as a constant in kvm_get_dirty_log() KVM: Return an error code only as a constant in kvm_get_dirty_log_protect() KVM: Return directly after a failed copy_from_user() in kvm_vm_compat_ioctl() KVM: x86: remove code for lazy FPU handling KVM: race-free exit from KVM_RUN without POSIX signals KVM: PPC: Book3S HV: Turn "KVM guest htab" message into a debug message KVM: PPC: Book3S PR: Ratelimit copy data failure error messages KVM: Support vCPU-based gfn->hva cache KVM: use separate generations for each address space ...
This commit is contained in:
@@ -13,6 +13,10 @@ static char syscalls_ia32[] = {
|
||||
#include <asm/syscalls_32.h>
|
||||
};
|
||||
|
||||
#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
#include <asm/kvm_para.h>
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
{
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
@@ -22,6 +26,11 @@ int main(void)
|
||||
BLANK();
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
OFFSET(KVM_STEAL_TIME_preempted, kvm_steal_time, preempted);
|
||||
BLANK();
|
||||
#endif
|
||||
|
||||
#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
|
||||
ENTRY(bx);
|
||||
ENTRY(cx);
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
/*
|
||||
* this changes the io permissions bitmap in the current task.
|
||||
@@ -45,6 +46,10 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
|
||||
memset(bitmap, 0xff, IO_BITMAP_BYTES);
|
||||
t->io_bitmap_ptr = bitmap;
|
||||
set_thread_flag(TIF_IO_BITMAP);
|
||||
|
||||
preempt_disable();
|
||||
refresh_TR();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -589,7 +589,8 @@ out:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
__visible bool __kvm_vcpu_is_preempted(int cpu)
|
||||
#ifdef CONFIG_X86_32
|
||||
__visible bool __kvm_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
|
||||
|
||||
@@ -597,6 +598,29 @@ __visible bool __kvm_vcpu_is_preempted(int cpu)
|
||||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
|
||||
|
||||
#else
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
|
||||
|
||||
/*
|
||||
* Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
|
||||
* restoring to/from the stack.
|
||||
*/
|
||||
asm(
|
||||
".pushsection .text;"
|
||||
".global __raw_callee_save___kvm_vcpu_is_preempted;"
|
||||
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
|
||||
"__raw_callee_save___kvm_vcpu_is_preempted:"
|
||||
"movq __per_cpu_offset(,%rdi,8), %rax;"
|
||||
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
|
||||
"setne %al;"
|
||||
"ret;"
|
||||
".popsection");
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
|
||||
*/
|
||||
|
@@ -28,6 +28,7 @@
|
||||
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/reboot.h>
|
||||
#include <asm/kvmclock.h>
|
||||
|
||||
static int kvmclock __ro_after_init = 1;
|
||||
static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
|
||||
@@ -49,6 +50,7 @@ struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
|
||||
{
|
||||
return hv_clock;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pvclock_pvti_cpu0_va);
|
||||
|
||||
/*
|
||||
* The wallclock is the time of day when we booted. Since then, some time may
|
||||
@@ -174,13 +176,14 @@ bool kvm_check_and_clear_guest_paused(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct clocksource kvm_clock = {
|
||||
struct clocksource kvm_clock = {
|
||||
.name = "kvm-clock",
|
||||
.read = kvm_clock_get_cycles,
|
||||
.rating = 400,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(kvm_clock);
|
||||
|
||||
int kvm_register_clock(char *txt)
|
||||
{
|
||||
|
@@ -20,7 +20,7 @@ bool pv_is_native_spin_unlock(void)
|
||||
__raw_callee_save___native_queued_spin_unlock;
|
||||
}
|
||||
|
||||
__visible bool __native_vcpu_is_preempted(int cpu)
|
||||
__visible bool __native_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@@ -32,6 +32,7 @@
|
||||
#include <asm/mce.h>
|
||||
#include <asm/vm86.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
@@ -64,6 +65,9 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
|
||||
};
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_tss);
|
||||
|
||||
DEFINE_PER_CPU(bool, need_tr_refresh);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh);
|
||||
|
||||
/*
|
||||
* this gets called so that we can store lazy state into memory and copy the
|
||||
* current task into the new thread.
|
||||
@@ -209,6 +213,12 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||
*/
|
||||
memcpy(tss->io_bitmap, next->io_bitmap_ptr,
|
||||
max(prev->io_bitmap_max, next->io_bitmap_max));
|
||||
|
||||
/*
|
||||
* Make sure that the TSS limit is correct for the CPU
|
||||
* to notice the IO bitmap.
|
||||
*/
|
||||
refresh_TR();
|
||||
} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
|
||||
/*
|
||||
* Clear any possible leftover bits:
|
||||
|
Reference in New Issue
Block a user