Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm updates from Paolo Bonzini:
 "ARM:
   - GICv4.1 support

   - 32bit host removal

  PPC:
   - secure (encrypted) using under the Protected Execution Framework
     ultravisor

  s390:
   - allow disabling GISA (hardware interrupt injection) and protected
     VMs/ultravisor support.

  x86:
   - New dirty bitmap flag that sets all bits in the bitmap when dirty
     page logging is enabled; this is faster because it doesn't require
     bulk modification of the page tables.

   - Initial work on making nested SVM event injection more similar to
     VMX, and less buggy.

   - Various cleanups to MMU code (though the big ones and related
     optimizations were delayed to 5.8). Instead of using cr3 in
     function names which occasionally means eptp, KVM too has
     standardized on "pgd".

   - A large refactoring of CPUID features, which now use an array that
     parallels the core x86_features.

   - Some removal of pointer chasing from kvm_x86_ops, which will also
     be switched to static calls as soon as they are available.

   - New Tigerlake CPUID features.

   - More bugfixes, optimizations and cleanups.

  Generic:
   - selftests: cleanups, new MMU notifier stress test, steal-time test

   - CSV output for kvm_stat"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (277 commits)
  x86/kvm: fix a missing-prototypes "vmread_error"
  KVM: x86: Fix BUILD_BUG() in __cpuid_entry_get_reg() w/ CONFIG_UBSAN=y
  KVM: VMX: Add a trampoline to fix VMREAD error handling
  KVM: SVM: Annotate svm_x86_ops as __initdata
  KVM: VMX: Annotate vmx_x86_ops as __initdata
  KVM: x86: Drop __exit from kvm_x86_ops' hardware_unsetup()
  KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection
  KVM: x86: Set kvm_x86_ops only after ->hardware_setup() completes
  KVM: VMX: Configure runtime hooks using vmx_x86_ops
  KVM: VMX: Move hardware_setup() definition below vmx_x86_ops
  KVM: x86: Move init-only kvm_x86_ops to separate struct
  KVM: Pass kvm_init()'s opaque param to additional arch funcs
  s390/gmap: return proper error code on ksm unsharing
  KVM: selftests: Fix cosmetic copy-paste error in vm_mem_region_move()
  KVM: Fix out of range accesses to memslots
  KVM: X86: Micro-optimize IPI fastpath delay
  KVM: X86: Delay read msr data iff writes ICR MSR
  KVM: PPC: Book3S HV: Add a capability for enabling secure guests
  KVM: arm64: GICv4.1: Expose HW-based SGIs in debugfs
  KVM: arm64: GICv4.1: Allow non-trapping WFI when using HW SGIs
  ...
This commit is contained in:
Linus Torvalds
2020-04-02 15:13:15 -07:00
206 zmienionych plików z 7874 dodań i 9705 usunięć

Wyświetl plik

@@ -11,9 +11,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_KVM_ARM_HOST
#include <linux/kvm_host.h>
#endif
#include <asm/cacheflush.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
@@ -167,14 +164,6 @@ int main(void)
DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp));
DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs));
#endif
BLANK();
#ifdef CONFIG_VDSO
DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
#endif

Wyświetl plik

@@ -189,19 +189,19 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
ENDPROC(__hyp_stub_install_secondary)
__hyp_stub_do_trap:
#ifdef ZIMAGE
teq r0, #HVC_SET_VECTORS
bne 1f
/* Only the ZIMAGE stubs can change the HYP vectors */
mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
b __hyp_stub_exit
#endif
1: teq r0, #HVC_SOFT_RESTART
bne 1f
bne 2f
bx r1
1: teq r0, #HVC_RESET_VECTORS
beq __hyp_stub_exit
ldr r0, =HVC_STUB_ERR
2: ldr r0, =HVC_STUB_ERR
__ERET
__hyp_stub_exit:
@@ -210,26 +210,9 @@ __hyp_stub_exit:
ENDPROC(__hyp_stub_do_trap)
/*
* __hyp_set_vectors: Call this after boot to set the initial hypervisor
* vectors as part of hypervisor installation. On an SMP system, this should
* be called on each CPU.
*
* r0 must be the physical address of the new vector table (which must lie in
* the bottom 4GB of physical address space.
*
* r0 must be 32-byte aligned.
*
* Before calling this, you must check that the stub hypervisor is installed
* everywhere, by waiting for any secondary CPUs to be brought up and then
* checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
*
* If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
* something else went wrong... in such cases, trying to install a new
* hypervisor is unlikely to work as desired.
*
* When you call into your shiny new hypervisor, sp_hyp will contain junk,
* so you will need to set that to something sensible at the new hypervisor's
* initialisation entry point.
* __hyp_set_vectors is only used when ZIMAGE must bounce between HYP
* and SVC. For the kernel itself, the vectors are set once and for
* all by the stubs.
*/
ENTRY(__hyp_set_vectors)
mov r1, r0
@@ -245,12 +228,6 @@ ENTRY(__hyp_soft_restart)
ret lr
ENDPROC(__hyp_soft_restart)
ENTRY(__hyp_reset_vectors)
mov r0, #HVC_RESET_VECTORS
__HVC(0)
ret lr
ENDPROC(__hyp_reset_vectors)
#ifndef ZIMAGE
.align 2
.L__boot_cpu_mode_offset:

Wyświetl plik

@@ -162,14 +162,6 @@ SECTIONS
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
/*
* The HYP init code can't be more than a page long,
* and should not cross a page boundary.
* The above comment applies as well.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
"HYP init code too big or misaligned")
#ifdef CONFIG_XIP_DEFLATED_DATA
/*
* The .bss is used as a stack area for __inflate_kernel_data() whose stack

Wyświetl plik

@@ -170,12 +170,4 @@ __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
/*
* The HYP init code can't be more than a page long,
* and should not cross a page boundary.
* The above comment applies as well.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
"HYP init code too big or misaligned")
#endif /* CONFIG_XIP_KERNEL */

Wyświetl plik

@@ -31,20 +31,11 @@
*(.proc.info.init) \
__proc_info_end = .;
#define HYPERVISOR_TEXT \
__hyp_text_start = .; \
*(.hyp.text) \
__hyp_text_end = .;
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
__idmap_text_start = .; \
*(.idmap.text) \
__idmap_text_end = .; \
. = ALIGN(PAGE_SIZE); \
__hyp_idmap_text_start = .; \
*(.hyp.idmap.text) \
__hyp_idmap_text_end = .;
#define ARM_DISCARD \
*(.ARM.exidx.exit.text) \
@@ -72,7 +63,6 @@
SCHED_TEXT \
CPUIDLE_TEXT \
LOCK_TEXT \
HYPERVISOR_TEXT \
KPROBES_TEXT \
*(.gnu.warning) \
*(.glue_7) \