Merge branch 'queue' into next
Merge patches queued during the run-up to the merge window. * queue: (25 commits) KVM: Choose better candidate for directed yield KVM: Note down when cpu relax intercepted or pause loop exited KVM: Add config to support ple or cpu relax optimzation KVM: switch to symbolic name for irq_states size KVM: x86: Fix typos in pmu.c KVM: x86: Fix typos in lapic.c KVM: x86: Fix typos in cpuid.c KVM: x86: Fix typos in emulate.c KVM: x86: Fix typos in x86.c KVM: SVM: Fix typos KVM: VMX: Fix typos KVM: remove the unused parameter of gfn_to_pfn_memslot KVM: remove is_error_hpa KVM: make bad_pfn static to kvm_main.c KVM: using get_fault_pfn to get the fault pfn KVM: MMU: track the refcount when unmap the page KVM: x86: remove unnecessary mark_page_dirty KVM: MMU: Avoid handling same rmap_pde in kvm_handle_hva_range() KVM: MMU: Push trace_kvm_age_page() into kvm_age_rmapp() KVM: MMU: Add memslot parameter to hva handlers ... Signed-off-by: Avi Kivity <avi@redhat.com>
Šī revīzija ir iekļauta:
@@ -37,6 +37,7 @@ config KVM
|
||||
select TASK_DELAY_ACCT
|
||||
select PERF_EVENTS
|
||||
select HAVE_KVM_MSI
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
---help---
|
||||
Support hosting fully virtualized guest machines using hardware
|
||||
virtualization extensions. You will need a fairly recent
|
||||
|
@@ -316,7 +316,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
||||
}
|
||||
case 7: {
|
||||
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
/* Mask ebx against host capbability word 9 */
|
||||
/* Mask ebx against host capability word 9 */
|
||||
if (index == 0) {
|
||||
entry->ebx &= kvm_supported_word9_x86_features;
|
||||
cpuid_mask(&entry->ebx, 9);
|
||||
|
@@ -642,7 +642,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
|
||||
if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
|
||||
goto bad;
|
||||
} else {
|
||||
/* exapand-down segment */
|
||||
/* expand-down segment */
|
||||
if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
|
||||
goto bad;
|
||||
lim = desc.d ? 0xffffffff : 0xffff;
|
||||
@@ -1383,7 +1383,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||
err_code = selector & 0xfffc;
|
||||
err_vec = GP_VECTOR;
|
||||
|
||||
/* can't load system descriptor into segment selecor */
|
||||
/* can't load system descriptor into segment selector */
|
||||
if (seg <= VCPU_SREG_GS && !seg_desc.s)
|
||||
goto exception;
|
||||
|
||||
@@ -2398,7 +2398,7 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
|
||||
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
|
||||
|
||||
/*
|
||||
* Now load segment descriptors. If fault happenes at this stage
|
||||
* Now load segment descriptors. If fault happens at this stage
|
||||
* it is handled in a context of new task
|
||||
*/
|
||||
ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
|
||||
@@ -2640,7 +2640,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
|
||||
*
|
||||
* 1. jmp/call/int to task gate: Check against DPL of the task gate
|
||||
* 2. Exception/IRQ/iret: No check is performed
|
||||
* 3. jmp/call to TSS: Check agains DPL of the TSS
|
||||
* 3. jmp/call to TSS: Check against DPL of the TSS
|
||||
*/
|
||||
if (reason == TASK_SWITCH_GATE) {
|
||||
if (idt_index != -1) {
|
||||
@@ -2681,7 +2681,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
|
||||
ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
|
||||
|
||||
/* set back link to prev task only if NT bit is set in eflags
|
||||
note that old_tss_sel is not used afetr this point */
|
||||
note that old_tss_sel is not used after this point */
|
||||
if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
|
||||
old_tss_sel = 0xffff;
|
||||
|
||||
|
@@ -70,7 +70,7 @@ struct kvm_pic {
|
||||
struct kvm_io_device dev_slave;
|
||||
struct kvm_io_device dev_eclr;
|
||||
void (*ack_notifier)(void *opaque, int irq);
|
||||
unsigned long irq_states[16];
|
||||
unsigned long irq_states[PIC_NUM_PINS];
|
||||
};
|
||||
|
||||
struct kvm_pic *kvm_create_pic(struct kvm *kvm);
|
||||
|
@@ -719,7 +719,7 @@ static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
|
||||
{
|
||||
unsigned char alignment = offset & 0xf;
|
||||
u32 result;
|
||||
/* this bitmask has a bit cleared for each reserver register */
|
||||
/* this bitmask has a bit cleared for each reserved register */
|
||||
static const u64 rmask = 0x43ff01ffffffe70cULL;
|
||||
|
||||
if ((alignment + len) > 4) {
|
||||
@@ -792,7 +792,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
|
||||
atomic_set(&apic->lapic_timer.pending, 0);
|
||||
|
||||
if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
|
||||
/* lapic timer in oneshot or peroidic mode */
|
||||
/* lapic timer in oneshot or periodic mode */
|
||||
now = apic->lapic_timer.timer.base->get_time();
|
||||
apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT)
|
||||
* APIC_BUS_CYCLE_NS * apic->divide_count;
|
||||
|
@@ -556,6 +556,14 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
|
||||
return 0;
|
||||
|
||||
pfn = spte_to_pfn(old_spte);
|
||||
|
||||
/*
|
||||
* KVM does not hold the refcount of the page used by
|
||||
* kvm mmu, before reclaiming the page, we should
|
||||
* unmap it from mmu first.
|
||||
*/
|
||||
WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
|
||||
|
||||
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
|
||||
kvm_set_pfn_accessed(pfn);
|
||||
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
|
||||
@@ -960,13 +968,13 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
|
||||
static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
|
||||
struct kvm_memory_slot *slot)
|
||||
{
|
||||
struct kvm_lpage_info *linfo;
|
||||
unsigned long idx;
|
||||
|
||||
if (likely(level == PT_PAGE_TABLE_LEVEL))
|
||||
return &slot->rmap[gfn - slot->base_gfn];
|
||||
|
||||
linfo = lpage_info_slot(gfn, slot, level);
|
||||
return &linfo->rmap_pde;
|
||||
idx = gfn_to_index(gfn, slot->base_gfn, level);
|
||||
return &slot->arch.rmap_pde[level - PT_DIRECTORY_LEVEL][idx];
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1200,7 +1208,7 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
|
||||
}
|
||||
|
||||
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
unsigned long data)
|
||||
struct kvm_memory_slot *slot, unsigned long data)
|
||||
{
|
||||
u64 *sptep;
|
||||
struct rmap_iterator iter;
|
||||
@@ -1218,7 +1226,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
}
|
||||
|
||||
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
unsigned long data)
|
||||
struct kvm_memory_slot *slot, unsigned long data)
|
||||
{
|
||||
u64 *sptep;
|
||||
struct rmap_iterator iter;
|
||||
@@ -1259,43 +1267,67 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
||||
unsigned long data,
|
||||
int (*handler)(struct kvm *kvm, unsigned long *rmapp,
|
||||
unsigned long data))
|
||||
static int kvm_handle_hva_range(struct kvm *kvm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
unsigned long data,
|
||||
int (*handler)(struct kvm *kvm,
|
||||
unsigned long *rmapp,
|
||||
struct kvm_memory_slot *slot,
|
||||
unsigned long data))
|
||||
{
|
||||
int j;
|
||||
int ret;
|
||||
int retval = 0;
|
||||
int ret = 0;
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
slots = kvm_memslots(kvm);
|
||||
|
||||
kvm_for_each_memslot(memslot, slots) {
|
||||
unsigned long start = memslot->userspace_addr;
|
||||
unsigned long end;
|
||||
unsigned long hva_start, hva_end;
|
||||
gfn_t gfn_start, gfn_end;
|
||||
|
||||
end = start + (memslot->npages << PAGE_SHIFT);
|
||||
if (hva >= start && hva < end) {
|
||||
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
|
||||
gfn_t gfn = memslot->base_gfn + gfn_offset;
|
||||
hva_start = max(start, memslot->userspace_addr);
|
||||
hva_end = min(end, memslot->userspace_addr +
|
||||
(memslot->npages << PAGE_SHIFT));
|
||||
if (hva_start >= hva_end)
|
||||
continue;
|
||||
/*
|
||||
* {gfn(page) | page intersects with [hva_start, hva_end)} =
|
||||
* {gfn_start, gfn_start+1, ..., gfn_end-1}.
|
||||
*/
|
||||
gfn_start = hva_to_gfn_memslot(hva_start, memslot);
|
||||
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
|
||||
|
||||
ret = handler(kvm, &memslot->rmap[gfn_offset], data);
|
||||
for (j = PT_PAGE_TABLE_LEVEL;
|
||||
j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
|
||||
unsigned long idx, idx_end;
|
||||
unsigned long *rmapp;
|
||||
|
||||
for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
|
||||
struct kvm_lpage_info *linfo;
|
||||
/*
|
||||
* {idx(page_j) | page_j intersects with
|
||||
* [hva_start, hva_end)} = {idx, idx+1, ..., idx_end}.
|
||||
*/
|
||||
idx = gfn_to_index(gfn_start, memslot->base_gfn, j);
|
||||
idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j);
|
||||
|
||||
linfo = lpage_info_slot(gfn, memslot,
|
||||
PT_DIRECTORY_LEVEL + j);
|
||||
ret |= handler(kvm, &linfo->rmap_pde, data);
|
||||
}
|
||||
trace_kvm_age_page(hva, memslot, ret);
|
||||
retval |= ret;
|
||||
rmapp = __gfn_to_rmap(gfn_start, j, memslot);
|
||||
|
||||
for (; idx <= idx_end; ++idx)
|
||||
ret |= handler(kvm, rmapp++, memslot, data);
|
||||
}
|
||||
}
|
||||
|
||||
return retval;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
||||
unsigned long data,
|
||||
int (*handler)(struct kvm *kvm, unsigned long *rmapp,
|
||||
struct kvm_memory_slot *slot,
|
||||
unsigned long data))
|
||||
{
|
||||
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
|
||||
}
|
||||
|
||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||
@@ -1303,13 +1335,18 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||
return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
{
|
||||
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
||||
}
|
||||
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
{
|
||||
kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
|
||||
}
|
||||
|
||||
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
unsigned long data)
|
||||
struct kvm_memory_slot *slot, unsigned long data)
|
||||
{
|
||||
u64 *sptep;
|
||||
struct rmap_iterator uninitialized_var(iter);
|
||||
@@ -1323,8 +1360,10 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
* This has some overhead, but not as much as the cost of swapping
|
||||
* out actively used pages or breaking up actively used hugepages.
|
||||
*/
|
||||
if (!shadow_accessed_mask)
|
||||
return kvm_unmap_rmapp(kvm, rmapp, data);
|
||||
if (!shadow_accessed_mask) {
|
||||
young = kvm_unmap_rmapp(kvm, rmapp, slot, data);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (sptep = rmap_get_first(*rmapp, &iter); sptep;
|
||||
sptep = rmap_get_next(&iter)) {
|
||||
@@ -1336,12 +1375,14 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
(unsigned long *)sptep);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* @data has hva passed to kvm_age_hva(). */
|
||||
trace_kvm_age_page(data, slot, young);
|
||||
return young;
|
||||
}
|
||||
|
||||
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
unsigned long data)
|
||||
struct kvm_memory_slot *slot, unsigned long data)
|
||||
{
|
||||
u64 *sptep;
|
||||
struct rmap_iterator iter;
|
||||
@@ -1379,13 +1420,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
||||
|
||||
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
|
||||
|
||||
kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
|
||||
kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, 0);
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
}
|
||||
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
|
||||
return kvm_handle_hva(kvm, hva, hva, kvm_age_rmapp);
|
||||
}
|
||||
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
@@ -2472,14 +2513,12 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
unsigned long hva;
|
||||
|
||||
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
|
||||
if (!slot) {
|
||||
get_page(fault_page);
|
||||
return page_to_pfn(fault_page);
|
||||
}
|
||||
if (!slot)
|
||||
return get_fault_pfn();
|
||||
|
||||
hva = gfn_to_hva_memslot(slot, gfn);
|
||||
|
||||
return hva_to_pfn_atomic(vcpu->kvm, hva);
|
||||
return hva_to_pfn_atomic(hva);
|
||||
}
|
||||
|
||||
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Kernel-based Virtual Machine -- Performane Monitoring Unit support
|
||||
* Kernel-based Virtual Machine -- Performance Monitoring Unit support
|
||||
*
|
||||
* Copyright 2011 Red Hat, Inc. and/or its affiliates.
|
||||
*
|
||||
|
@@ -2063,7 +2063,7 @@ static inline bool nested_svm_intr(struct vcpu_svm *svm)
|
||||
if (svm->nested.intercept & 1ULL) {
|
||||
/*
|
||||
* The #vmexit can't be emulated here directly because this
|
||||
* code path runs with irqs and preemtion disabled. A
|
||||
* code path runs with irqs and preemption disabled. A
|
||||
* #vmexit emulation might sleep. Only signal request for
|
||||
* the #vmexit here.
|
||||
*/
|
||||
@@ -2409,7 +2409,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
|
||||
{
|
||||
/*
|
||||
* This function merges the msr permission bitmaps of kvm and the
|
||||
* nested vmcb. It is omptimized in that it only merges the parts where
|
||||
* nested vmcb. It is optimized in that it only merges the parts where
|
||||
* the kvm msr permission bitmap may contain zero bits
|
||||
*/
|
||||
int i;
|
||||
|
@@ -1343,7 +1343,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
|
||||
guest_efer = vmx->vcpu.arch.efer;
|
||||
|
||||
/*
|
||||
* NX is emulated; LMA and LME handled by hardware; SCE meaninless
|
||||
* NX is emulated; LMA and LME handled by hardware; SCE meaningless
|
||||
* outside long mode
|
||||
*/
|
||||
ignore_bits = EFER_NX | EFER_SCE;
|
||||
@@ -3261,7 +3261,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
|
||||
* qemu binaries.
|
||||
* IA32 arch specifies that at the time of processor reset the
|
||||
* "Accessed" bit in the AR field of segment registers is 1. And qemu
|
||||
* is setting it to 0 in the usedland code. This causes invalid guest
|
||||
* is setting it to 0 in the userland code. This causes invalid guest
|
||||
* state vmexit when "unrestricted guest" mode is turned on.
|
||||
* Fix for this setup issue in cpu_reset is being pushed in the qemu
|
||||
* tree. Newer qemu binaries with that qemu fix would not need this
|
||||
@@ -4446,7 +4446,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
|
||||
hypercall[2] = 0xc1;
|
||||
}
|
||||
|
||||
/* called to set cr0 as approriate for a mov-to-cr0 exit. */
|
||||
/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
|
||||
static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
{
|
||||
if (to_vmx(vcpu)->nested.vmxon &&
|
||||
|
@@ -1093,7 +1093,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
|
||||
* For each generation, we track the original measured
|
||||
* nanosecond time, offset, and write, so if TSCs are in
|
||||
* sync, we can match exact offset, and if not, we can match
|
||||
* exact software computaion in compute_guest_tsc()
|
||||
* exact software computation in compute_guest_tsc()
|
||||
*
|
||||
* These values are tracked in kvm->arch.cur_xxx variables.
|
||||
*/
|
||||
@@ -1500,7 +1500,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
|
||||
{
|
||||
gpa_t gpa = data & ~0x3f;
|
||||
|
||||
/* Bits 2:5 are resrved, Should be zero */
|
||||
/* Bits 2:5 are reserved, Should be zero */
|
||||
if (data & 0x3c)
|
||||
return 1;
|
||||
|
||||
@@ -1723,7 +1723,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
* Ignore all writes to this no longer documented MSR.
|
||||
* Writes are only relevant for old K7 processors,
|
||||
* all pre-dating SVM, but a recommended workaround from
|
||||
* AMD for these chips. It is possible to speicify the
|
||||
* AMD for these chips. It is possible to specify the
|
||||
* affected processor models on the command line, hence
|
||||
* the need to ignore the workaround.
|
||||
*/
|
||||
@@ -2632,7 +2632,6 @@ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
|
||||
if (!vcpu->arch.time_page)
|
||||
return -EINVAL;
|
||||
src->flags |= PVCLOCK_GUEST_STOPPED;
|
||||
mark_page_dirty(vcpu->kvm, vcpu->arch.time >> PAGE_SHIFT);
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
return 0;
|
||||
}
|
||||
@@ -4492,7 +4491,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
|
||||
|
||||
/*
|
||||
* if emulation was due to access to shadowed page table
|
||||
* and it failed try to unshadow page and re-entetr the
|
||||
* and it failed try to unshadow page and re-enter the
|
||||
* guest to let CPU execute the instruction.
|
||||
*/
|
||||
if (kvm_mmu_unprotect_page_virt(vcpu, gva))
|
||||
@@ -5588,7 +5587,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
/*
|
||||
* We are here if userspace calls get_regs() in the middle of
|
||||
* instruction emulation. Registers state needs to be copied
|
||||
* back from emulation context to vcpu. Usrapace shouldn't do
|
||||
* back from emulation context to vcpu. Userspace shouldn't do
|
||||
* that usually, but some bad designed PV devices (vmware
|
||||
* backdoor interface) need this to work
|
||||
*/
|
||||
@@ -6117,7 +6116,7 @@ int kvm_arch_hardware_enable(void *garbage)
|
||||
* as we reset last_host_tsc on all VCPUs to stop this from being
|
||||
* called multiple times (one for each physical CPU bringup).
|
||||
*
|
||||
* Platforms with unnreliable TSCs don't have to deal with this, they
|
||||
* Platforms with unreliable TSCs don't have to deal with this, they
|
||||
* will be compensated by the logic in vcpu_load, which sets the TSC to
|
||||
* catchup mode. This will catchup all VCPUs to real time, but cannot
|
||||
* guarantee that they stay in perfect synchronization.
|
||||
@@ -6314,6 +6313,10 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
|
||||
if (!dont || free->arch.rmap_pde[i] != dont->arch.rmap_pde[i]) {
|
||||
kvm_kvfree(free->arch.rmap_pde[i]);
|
||||
free->arch.rmap_pde[i] = NULL;
|
||||
}
|
||||
if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
|
||||
kvm_kvfree(free->arch.lpage_info[i]);
|
||||
free->arch.lpage_info[i] = NULL;
|
||||
@@ -6333,6 +6336,11 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
|
||||
lpages = gfn_to_index(slot->base_gfn + npages - 1,
|
||||
slot->base_gfn, level) + 1;
|
||||
|
||||
slot->arch.rmap_pde[i] =
|
||||
kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap_pde[i]));
|
||||
if (!slot->arch.rmap_pde[i])
|
||||
goto out_free;
|
||||
|
||||
slot->arch.lpage_info[i] =
|
||||
kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
|
||||
if (!slot->arch.lpage_info[i])
|
||||
@@ -6361,7 +6369,9 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
|
||||
|
||||
out_free:
|
||||
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
|
||||
kvm_kvfree(slot->arch.rmap_pde[i]);
|
||||
kvm_kvfree(slot->arch.lpage_info[i]);
|
||||
slot->arch.rmap_pde[i] = NULL;
|
||||
slot->arch.lpage_info[i] = NULL;
|
||||
}
|
||||
return -ENOMEM;
|
||||
@@ -6381,7 +6391,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
map_flags = MAP_SHARED | MAP_ANONYMOUS;
|
||||
|
||||
/*To keep backward compatibility with older userspace,
|
||||
*x86 needs to hanlde !user_alloc case.
|
||||
*x86 needs to handle !user_alloc case.
|
||||
*/
|
||||
if (!user_alloc) {
|
||||
if (npages && !old.rmap) {
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user