Merge tag 'kvm-4.16-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Radim Krčmář: "ARM: - icache invalidation optimizations, improving VM startup time - support for forwarded level-triggered interrupts, improving performance for timers and passthrough platform devices - a small fix for power-management notifiers, and some cosmetic changes PPC: - add MMIO emulation for vector loads and stores - allow HPT guests to run on a radix host on POWER9 v2.2 CPUs without requiring the complex thread synchronization of older CPU versions - improve the handling of escalation interrupts with the XIVE interrupt controller - support decrement register migration - various cleanups and bugfixes. s390: - Cornelia Huck passed maintainership to Janosch Frank - exitless interrupts for emulated devices - cleanup of cpuflag handling - kvm_stat counter improvements - VSIE improvements - mm cleanup x86: - hypervisor part of SEV - UMIP, RDPID, and MSR_SMI_COUNT emulation - paravirtualized TLB shootdown using the new KVM_VCPU_PREEMPTED bit - allow guests to see TOPOEXT, GFNI, VAES, VPCLMULQDQ, and more AVX512 features - show vcpu id in its anonymous inode name - many fixes and cleanups - per-VCPU MSR bitmaps (already merged through x86/pti branch) - stable KVM clock when nesting on Hyper-V (merged through x86/hyperv)" * tag 'kvm-4.16-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (197 commits) KVM: PPC: Book3S: Add MMIO emulation for VMX instructions KVM: PPC: Book3S HV: Branch inside feature section KVM: PPC: Book3S HV: Make HPT resizing work on POWER9 KVM: PPC: Book3S HV: Fix handling of secondary HPTEG in HPT resizing code KVM: PPC: Book3S PR: Fix broken select due to misspelling KVM: x86: don't forget vcpu_put() in kvm_arch_vcpu_ioctl_set_sregs() KVM: PPC: Book3S PR: Fix svcpu copying with preemption enabled KVM: PPC: Book3S HV: Drop locks before reading guest memory kvm: x86: remove efer_reload entry in kvm_vcpu_stat KVM: x86: AMD Processor Topology Information x86/kvm/vmx: do not use vm-exit instruction length for fast MMIO when running nested kvm: embed vcpu id to dentry of vcpu anon inode kvm: Map PFN-type memory regions as writable (if possible) x86/kvm: Make it compile on 32bit and with HYPYERVISOR_GUEST=n KVM: arm/arm64: Fixup userspace irqchip static key optimization KVM: arm/arm64: Fix userspace_irqchip_in_use counting KVM: arm/arm64: Fix incorrect timer_is_pending logic MAINTAINERS: update KVM/s390 maintainers MAINTAINERS: add Halil as additional vfio-ccw maintainer MAINTAINERS: add David as a reviewer for KVM/s390 ...
Este cometimento está contido em:
@@ -556,6 +556,51 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 msr;
|
||||
|
||||
/*
|
||||
* BIOS support is required for SME and SEV.
|
||||
* For SME: If BIOS has enabled SME then adjust x86_phys_bits by
|
||||
* the SME physical address space reduction value.
|
||||
* If BIOS has not enabled SME then don't advertise the
|
||||
* SME feature (set in scattered.c).
|
||||
* For SEV: If BIOS has not enabled SEV then don't advertise the
|
||||
* SEV feature (set in scattered.c).
|
||||
*
|
||||
* In all cases, since support for SME and SEV requires long mode,
|
||||
* don't advertise the feature under CONFIG_X86_32.
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
|
||||
/* Check if memory encryption is enabled */
|
||||
rdmsrl(MSR_K8_SYSCFG, msr);
|
||||
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
|
||||
goto clear_all;
|
||||
|
||||
/*
|
||||
* Always adjust physical address bits. Even though this
|
||||
* will be a value above 32-bits this is still done for
|
||||
* CONFIG_X86_32 so that accurate values are reported.
|
||||
*/
|
||||
c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
goto clear_all;
|
||||
|
||||
rdmsrl(MSR_K7_HWCR, msr);
|
||||
if (!(msr & MSR_K7_HWCR_SMMLOCK))
|
||||
goto clear_sev;
|
||||
|
||||
return;
|
||||
|
||||
clear_all:
|
||||
clear_cpu_cap(c, X86_FEATURE_SME);
|
||||
clear_sev:
|
||||
clear_cpu_cap(c, X86_FEATURE_SEV);
|
||||
}
|
||||
}
|
||||
|
||||
static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 dummy;
|
||||
@@ -627,26 +672,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
if (cpu_has_amd_erratum(c, amd_erratum_400))
|
||||
set_cpu_bug(c, X86_BUG_AMD_E400);
|
||||
|
||||
/*
|
||||
* BIOS support is required for SME. If BIOS has enabled SME then
|
||||
* adjust x86_phys_bits by the SME physical address space reduction
|
||||
* value. If BIOS has not enabled SME then don't advertise the
|
||||
* feature (set in scattered.c). Also, since the SME support requires
|
||||
* long mode, don't advertise the feature under CONFIG_X86_32.
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_SME)) {
|
||||
u64 msr;
|
||||
|
||||
/* Check if SME is enabled */
|
||||
rdmsrl(MSR_K8_SYSCFG, msr);
|
||||
if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
|
||||
c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
clear_cpu_cap(c, X86_FEATURE_SME);
|
||||
} else {
|
||||
clear_cpu_cap(c, X86_FEATURE_SME);
|
||||
}
|
||||
}
|
||||
early_detect_mem_encrypt(c);
|
||||
}
|
||||
|
||||
static void init_amd_k8(struct cpuinfo_x86 *c)
|
||||
|
@@ -251,6 +251,12 @@ static void __init ms_hyperv_init_platform(void)
|
||||
hyperv_setup_mmu_ops();
|
||||
/* Setup the IDT for hypervisor callback */
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
|
||||
|
||||
/* Setup the IDT for reenlightenment notifications */
|
||||
if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT)
|
||||
alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR,
|
||||
hyperv_reenlightenment_vector);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -30,6 +30,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
|
||||
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
||||
{ X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
|
||||
{ X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
|
||||
{ 0, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
|
@@ -141,6 +141,15 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
irq_stats(j)->irq_hv_callback_count);
|
||||
seq_puts(p, " Hypervisor callback interrupts\n");
|
||||
}
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
|
||||
seq_printf(p, "%*s: ", prec, "HRE");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ",
|
||||
irq_stats(j)->irq_hv_reenlightenment_count);
|
||||
seq_puts(p, " Hyper-V reenlightenment interrupts\n");
|
||||
}
|
||||
#endif
|
||||
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
||||
#if defined(CONFIG_X86_IO_APIC)
|
||||
|
@@ -498,6 +498,34 @@ static void __init kvm_apf_trap_init(void)
|
||||
update_intr_gate(X86_TRAP_PF, async_page_fault);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
|
||||
|
||||
static void kvm_flush_tlb_others(const struct cpumask *cpumask,
|
||||
const struct flush_tlb_info *info)
|
||||
{
|
||||
u8 state;
|
||||
int cpu;
|
||||
struct kvm_steal_time *src;
|
||||
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
|
||||
|
||||
cpumask_copy(flushmask, cpumask);
|
||||
/*
|
||||
* We have to call flush only on online vCPUs. And
|
||||
* queue flush_on_enter for pre-empted vCPUs
|
||||
*/
|
||||
for_each_cpu(cpu, flushmask) {
|
||||
src = &per_cpu(steal_time, cpu);
|
||||
state = READ_ONCE(src->preempted);
|
||||
if ((state & KVM_VCPU_PREEMPTED)) {
|
||||
if (try_cmpxchg(&src->preempted, &state,
|
||||
state | KVM_VCPU_FLUSH_TLB))
|
||||
__cpumask_clear_cpu(cpu, flushmask);
|
||||
}
|
||||
}
|
||||
|
||||
native_flush_tlb_others(flushmask, info);
|
||||
}
|
||||
|
||||
static void __init kvm_guest_init(void)
|
||||
{
|
||||
int i;
|
||||
@@ -517,6 +545,9 @@ static void __init kvm_guest_init(void)
|
||||
pv_time_ops.steal_clock = kvm_steal_clock;
|
||||
}
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH))
|
||||
pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
||||
apic_set_eoi_write(kvm_guest_apic_eoi_write);
|
||||
|
||||
@@ -598,6 +629,22 @@ static __init int activate_jump_labels(void)
|
||||
}
|
||||
arch_initcall(activate_jump_labels);
|
||||
|
||||
static __init int kvm_setup_pv_tlb_flush(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
}
|
||||
pr_info("KVM setup pv remote TLB flush\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(kvm_setup_pv_tlb_flush);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
|
||||
/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
|
||||
@@ -643,7 +690,7 @@ __visible bool __kvm_vcpu_is_preempted(long cpu)
|
||||
{
|
||||
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
|
||||
|
||||
return !!src->preempted;
|
||||
return !!(src->preempted & KVM_VCPU_PREEMPTED);
|
||||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
|
||||
|
||||
|
Criar uma nova questão referindo esta
Bloquear um utilizador