Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "Small release, the most interesting stuff is x86 nested virt improvements. x86: - userspace can now hide nested VMX features from guests - nested VMX can now run Hyper-V in a guest - support for AVX512_4VNNIW and AVX512_FMAPS in KVM - infrastructure support for virtual Intel GPUs. PPC: - support for KVM guests on POWER9 - improved support for interrupt polling - optimizations and cleanups. s390: - two small optimizations, more stuff is in flight and will be in 4.11. ARM: - support for the GICv3 ITS on 32bit platforms" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (94 commits) arm64: KVM: pmu: Reset PMSELR_EL0.SEL to a sane value before entering the guest KVM: arm/arm64: timer: Check for properly initialized timer on init KVM: arm/arm64: vgic-v2: Limit ITARGETSR bits to number of VCPUs KVM: x86: Handle the kthread worker using the new API KVM: nVMX: invvpid handling improvements KVM: nVMX: check host CR3 on vmentry and vmexit KVM: nVMX: introduce nested_vmx_load_cr3 and call it on vmentry KVM: nVMX: propagate errors from prepare_vmcs02 KVM: nVMX: fix CR3 load if L2 uses PAE paging and EPT KVM: nVMX: load GUEST_EFER after GUEST_CR0 during emulated VM-entry KVM: nVMX: generate MSR_IA32_CR{0,4}_FIXED1 from guest CPUID KVM: nVMX: fix checks on CR{0,4} during virtual VMX operation KVM: nVMX: support restore of VMX capability MSRs KVM: nVMX: generate non-true VMX MSRs based on true versions KVM: x86: Do not clear RFLAGS.TF when a singlestep trap occurs. KVM: x86: Add kvm_skip_emulated_instruction and use it. KVM: VMX: Move skip_emulated_instruction out of nested_vmx_check_vmcs12 KVM: VMX: Reorder some skip_emulated_instruction calls KVM: x86: Add a return value to kvm_emulate_cpuid KVM: PPC: Book3S: Move prototypes for KVM functions into kvm_ppc.h ...
This commit is contained in:
@@ -70,16 +70,19 @@ MODULE_AUTHOR("Qumranet");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/* Architectures should define their poll value according to the halt latency */
|
||||
static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
|
||||
unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
|
||||
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
|
||||
EXPORT_SYMBOL_GPL(halt_poll_ns);
|
||||
|
||||
/* Default doubles per-vcpu halt_poll_ns. */
|
||||
static unsigned int halt_poll_ns_grow = 2;
|
||||
unsigned int halt_poll_ns_grow = 2;
|
||||
module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR);
|
||||
EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
|
||||
|
||||
/* Default resets per-vcpu halt_poll_ns . */
|
||||
static unsigned int halt_poll_ns_shrink;
|
||||
unsigned int halt_poll_ns_shrink;
|
||||
module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);
|
||||
EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
|
||||
|
||||
/*
|
||||
* Ordering of locks:
|
||||
@@ -595,7 +598,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
|
||||
stat_data->kvm = kvm;
|
||||
stat_data->offset = p->offset;
|
||||
kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
|
||||
if (!debugfs_create_file(p->name, 0444,
|
||||
if (!debugfs_create_file(p->name, 0644,
|
||||
kvm->debugfs_dentry,
|
||||
stat_data,
|
||||
stat_fops_per_vm[p->kind]))
|
||||
@@ -3669,11 +3672,23 @@ static int vm_stat_get_per_vm(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vm_stat_clear_per_vm(void *data, u64 val)
|
||||
{
|
||||
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
|
||||
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
*(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
__simple_attr_check_format("%llu\n", 0ull);
|
||||
return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
|
||||
NULL, "%llu\n");
|
||||
vm_stat_clear_per_vm, "%llu\n");
|
||||
}
|
||||
|
||||
static const struct file_operations vm_stat_get_per_vm_fops = {
|
||||
@@ -3699,11 +3714,26 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_stat_clear_per_vm(void *data, u64 val)
|
||||
{
|
||||
int i;
|
||||
struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
|
||||
*(u64 *)((void *)vcpu + stat_data->offset) = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
__simple_attr_check_format("%llu\n", 0ull);
|
||||
return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
|
||||
NULL, "%llu\n");
|
||||
vcpu_stat_clear_per_vm, "%llu\n");
|
||||
}
|
||||
|
||||
static const struct file_operations vcpu_stat_get_per_vm_fops = {
|
||||
@@ -3738,7 +3768,26 @@ static int vm_stat_get(void *_offset, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
|
||||
static int vm_stat_clear(void *_offset, u64 val)
|
||||
{
|
||||
unsigned offset = (long)_offset;
|
||||
struct kvm *kvm;
|
||||
struct kvm_stat_data stat_tmp = {.offset = offset};
|
||||
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vm_stat_clear_per_vm((void *)&stat_tmp, 0);
|
||||
}
|
||||
spin_unlock(&kvm_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
|
||||
|
||||
static int vcpu_stat_get(void *_offset, u64 *val)
|
||||
{
|
||||
@@ -3758,7 +3807,27 @@ static int vcpu_stat_get(void *_offset, u64 *val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
|
||||
static int vcpu_stat_clear(void *_offset, u64 val)
|
||||
{
|
||||
unsigned offset = (long)_offset;
|
||||
struct kvm *kvm;
|
||||
struct kvm_stat_data stat_tmp = {.offset = offset};
|
||||
|
||||
if (val)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
stat_tmp.kvm = kvm;
|
||||
vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
|
||||
}
|
||||
spin_unlock(&kvm_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
|
||||
"%llu\n");
|
||||
|
||||
static const struct file_operations *stat_fops[] = {
|
||||
[KVM_STAT_VCPU] = &vcpu_stat_fops,
|
||||
@@ -3776,7 +3845,7 @@ static int kvm_init_debug(void)
|
||||
|
||||
kvm_debugfs_num_entries = 0;
|
||||
for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
|
||||
if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
|
||||
if (!debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
|
||||
(void *)(long)p->offset,
|
||||
stat_fops[p->kind]))
|
||||
goto out_dir;
|
||||
|
Reference in New Issue
Block a user