Merge commit 'linus/master' into merge-linus
Conflicts: arch/x86/kvm/i8254.c
This commit is contained in:
@@ -46,4 +46,6 @@ config KVM_INTEL
|
||||
config KVM_TRACE
|
||||
bool
|
||||
|
||||
source drivers/virtio/Kconfig
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
@@ -44,7 +44,11 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
|
||||
EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
|
||||
|
||||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
|
||||
coalesced_mmio.o)
|
||||
coalesced_mmio.o irq_comm.o)
|
||||
|
||||
ifeq ($(CONFIG_DMAR),y)
|
||||
common-objs += $(addprefix ../../../virt/kvm/, vtd.o)
|
||||
endif
|
||||
|
||||
kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
|
31
arch/ia64/kvm/irq.h
Normal file
31
arch/ia64/kvm/irq.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* irq.h: In-kernel interrupt controller related definitions
|
||||
* Copyright (c) 2008, Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
* Place - Suite 330, Boston, MA 02111-1307 USA.
|
||||
*
|
||||
* Authors:
|
||||
* Xiantao Zhang <xiantao.zhang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __IRQ_H
|
||||
#define __IRQ_H
|
||||
|
||||
static inline int irqchip_in_kernel(struct kvm *kvm)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
@@ -31,6 +31,7 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/gcc_intrin.h>
|
||||
@@ -38,12 +39,14 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/div64.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/elf.h>
|
||||
|
||||
#include "misc.h"
|
||||
#include "vti.h"
|
||||
#include "iodev.h"
|
||||
#include "ioapic.h"
|
||||
#include "lapic.h"
|
||||
#include "irq.h"
|
||||
|
||||
static unsigned long kvm_vmm_base;
|
||||
static unsigned long kvm_vsa_base;
|
||||
@@ -61,12 +64,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
|
||||
struct fdesc{
|
||||
unsigned long ip;
|
||||
unsigned long gp;
|
||||
};
|
||||
|
||||
static void kvm_flush_icache(unsigned long start, unsigned long len)
|
||||
{
|
||||
int l;
|
||||
@@ -184,12 +181,16 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||
switch (ext) {
|
||||
case KVM_CAP_IRQCHIP:
|
||||
case KVM_CAP_USER_MEMORY:
|
||||
case KVM_CAP_MP_STATE:
|
||||
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
||||
break;
|
||||
case KVM_CAP_IOMMU:
|
||||
r = intel_iommu_found();
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
}
|
||||
@@ -776,6 +777,7 @@ static void kvm_init_vm(struct kvm *kvm)
|
||||
*/
|
||||
kvm_build_io_pmt(kvm);
|
||||
|
||||
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
|
||||
}
|
||||
|
||||
struct kvm *kvm_arch_create_vm(void)
|
||||
@@ -1339,6 +1341,10 @@ static void kvm_release_vm_pages(struct kvm *kvm)
|
||||
|
||||
void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
{
|
||||
kvm_iommu_unmap_guest(kvm);
|
||||
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
|
||||
kvm_free_all_assigned_devices(kvm);
|
||||
#endif
|
||||
kfree(kvm->arch.vioapic);
|
||||
kvm_release_vm_pages(kvm);
|
||||
kvm_free_physmem(kvm);
|
||||
@@ -1440,17 +1446,24 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
||||
int user_alloc)
|
||||
{
|
||||
unsigned long i;
|
||||
struct page *page;
|
||||
unsigned long pfn;
|
||||
int npages = mem->memory_size >> PAGE_SHIFT;
|
||||
struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
|
||||
unsigned long base_gfn = memslot->base_gfn;
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
page = gfn_to_page(kvm, base_gfn + i);
|
||||
kvm_set_pmt_entry(kvm, base_gfn + i,
|
||||
page_to_pfn(page) << PAGE_SHIFT,
|
||||
_PAGE_AR_RWX|_PAGE_MA_WB);
|
||||
memslot->rmap[i] = (unsigned long)page;
|
||||
pfn = gfn_to_pfn(kvm, base_gfn + i);
|
||||
if (!kvm_is_mmio_pfn(pfn)) {
|
||||
kvm_set_pmt_entry(kvm, base_gfn + i,
|
||||
pfn << PAGE_SHIFT,
|
||||
_PAGE_AR_RWX | _PAGE_MA_WB);
|
||||
memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
|
||||
} else {
|
||||
kvm_set_pmt_entry(kvm, base_gfn + i,
|
||||
GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
|
||||
_PAGE_MA_UC);
|
||||
memslot->rmap[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1794,11 +1807,43 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mp_state *mp_state)
|
||||
{
|
||||
return -EINVAL;
|
||||
vcpu_load(vcpu);
|
||||
mp_state->mp_state = vcpu->arch.mp_state;
|
||||
vcpu_put(vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
long psr;
|
||||
local_irq_save(psr);
|
||||
r = kvm_insert_vmm_mapping(vcpu);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
vcpu->arch.launched = 0;
|
||||
kvm_arch_vcpu_uninit(vcpu);
|
||||
r = kvm_arch_vcpu_init(vcpu);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
kvm_purge_vmm_mapping(vcpu);
|
||||
r = 0;
|
||||
fail:
|
||||
local_irq_restore(psr);
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mp_state *mp_state)
|
||||
{
|
||||
return -EINVAL;
|
||||
int r = 0;
|
||||
|
||||
vcpu_load(vcpu);
|
||||
vcpu->arch.mp_state = mp_state->mp_state;
|
||||
if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
|
||||
r = vcpu_reset(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
return r;
|
||||
}
|
||||
|
@@ -50,27 +50,18 @@
|
||||
|
||||
#define PAL_VSA_SYNC_READ \
|
||||
/* begin to call pal vps sync_read */ \
|
||||
{.mii; \
|
||||
add r25 = VMM_VPD_BASE_OFFSET, r21; \
|
||||
adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \
|
||||
;; \
|
||||
ld8 r25 = [r25]; /* read vpd base */ \
|
||||
ld8 r20 = [r20]; \
|
||||
;; \
|
||||
add r20 = PAL_VPS_SYNC_READ,r20; \
|
||||
;; \
|
||||
{ .mii; \
|
||||
nop 0x0; \
|
||||
mov r24 = ip; \
|
||||
mov b0 = r20; \
|
||||
mov r24=ip; \
|
||||
;; \
|
||||
} \
|
||||
{.mmb \
|
||||
add r24=0x20, r24; \
|
||||
ld8 r25 = [r25]; /* read vpd base */ \
|
||||
br.cond.sptk kvm_vps_sync_read; /*call the service*/ \
|
||||
;; \
|
||||
}; \
|
||||
{ .mmb; \
|
||||
add r24 = 0x20, r24; \
|
||||
nop 0x0; \
|
||||
br.cond.sptk b0; /* call the service */ \
|
||||
;; \
|
||||
};
|
||||
|
||||
|
||||
|
||||
#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
|
||||
|
@@ -1,9 +1,12 @@
|
||||
/*
|
||||
* arch/ia64/vmx/optvfault.S
|
||||
* arch/ia64/kvm/optvfault.S
|
||||
* optimize virtualization fault handler
|
||||
*
|
||||
* Copyright (C) 2006 Intel Co
|
||||
* Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
|
||||
* Copyright (C) 2008 Intel Co
|
||||
* Add the support for Tukwila processors.
|
||||
* Xiantao Zhang <xiantao.zhang@intel.com>
|
||||
*/
|
||||
|
||||
#include <asm/asmmacro.h>
|
||||
@@ -20,6 +23,98 @@
|
||||
#define ACCE_MOV_TO_PSR
|
||||
#define ACCE_THASH
|
||||
|
||||
#define VMX_VPS_SYNC_READ \
|
||||
add r16=VMM_VPD_BASE_OFFSET,r21; \
|
||||
mov r17 = b0; \
|
||||
mov r18 = r24; \
|
||||
mov r19 = r25; \
|
||||
mov r20 = r31; \
|
||||
;; \
|
||||
{.mii; \
|
||||
ld8 r16 = [r16]; \
|
||||
nop 0x0; \
|
||||
mov r24 = ip; \
|
||||
;; \
|
||||
}; \
|
||||
{.mmb; \
|
||||
add r24=0x20, r24; \
|
||||
mov r25 =r16; \
|
||||
br.sptk.many kvm_vps_sync_read; \
|
||||
}; \
|
||||
mov b0 = r17; \
|
||||
mov r24 = r18; \
|
||||
mov r25 = r19; \
|
||||
mov r31 = r20
|
||||
|
||||
ENTRY(kvm_vps_entry)
|
||||
adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
|
||||
;;
|
||||
ld8 r29 = [r29]
|
||||
;;
|
||||
add r29 = r29, r30
|
||||
;;
|
||||
mov b0 = r29
|
||||
br.sptk.many b0
|
||||
END(kvm_vps_entry)
|
||||
|
||||
/*
|
||||
* Inputs:
|
||||
* r24 : return address
|
||||
* r25 : vpd
|
||||
* r29 : scratch
|
||||
*
|
||||
*/
|
||||
GLOBAL_ENTRY(kvm_vps_sync_read)
|
||||
movl r30 = PAL_VPS_SYNC_READ
|
||||
;;
|
||||
br.sptk.many kvm_vps_entry
|
||||
END(kvm_vps_sync_read)
|
||||
|
||||
/*
|
||||
* Inputs:
|
||||
* r24 : return address
|
||||
* r25 : vpd
|
||||
* r29 : scratch
|
||||
*
|
||||
*/
|
||||
GLOBAL_ENTRY(kvm_vps_sync_write)
|
||||
movl r30 = PAL_VPS_SYNC_WRITE
|
||||
;;
|
||||
br.sptk.many kvm_vps_entry
|
||||
END(kvm_vps_sync_write)
|
||||
|
||||
/*
|
||||
* Inputs:
|
||||
* r23 : pr
|
||||
* r24 : guest b0
|
||||
* r25 : vpd
|
||||
*
|
||||
*/
|
||||
GLOBAL_ENTRY(kvm_vps_resume_normal)
|
||||
movl r30 = PAL_VPS_RESUME_NORMAL
|
||||
;;
|
||||
mov pr=r23,-2
|
||||
br.sptk.many kvm_vps_entry
|
||||
END(kvm_vps_resume_normal)
|
||||
|
||||
/*
|
||||
* Inputs:
|
||||
* r23 : pr
|
||||
* r24 : guest b0
|
||||
* r25 : vpd
|
||||
* r17 : isr
|
||||
*/
|
||||
GLOBAL_ENTRY(kvm_vps_resume_handler)
|
||||
movl r30 = PAL_VPS_RESUME_HANDLER
|
||||
;;
|
||||
ld8 r27=[r25]
|
||||
shr r17=r17,IA64_ISR_IR_BIT
|
||||
;;
|
||||
dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE
|
||||
mov pr=r23,-2
|
||||
br.sptk.many kvm_vps_entry
|
||||
END(kvm_vps_resume_handler)
|
||||
|
||||
//mov r1=ar3
|
||||
GLOBAL_ENTRY(kvm_asm_mov_from_ar)
|
||||
#ifndef ACCE_MOV_FROM_AR
|
||||
@@ -157,11 +252,11 @@ GLOBAL_ENTRY(kvm_asm_rsm)
|
||||
#ifndef ACCE_RSM
|
||||
br.many kvm_virtualization_fault_back
|
||||
#endif
|
||||
add r16=VMM_VPD_BASE_OFFSET,r21
|
||||
VMX_VPS_SYNC_READ
|
||||
;;
|
||||
extr.u r26=r25,6,21
|
||||
extr.u r27=r25,31,2
|
||||
;;
|
||||
ld8 r16=[r16]
|
||||
extr.u r28=r25,36,1
|
||||
dep r26=r27,r26,21,2
|
||||
;;
|
||||
@@ -196,7 +291,7 @@ GLOBAL_ENTRY(kvm_asm_rsm)
|
||||
tbit.nz p6,p0=r23,0
|
||||
;;
|
||||
tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
|
||||
(p6) br.dptk kvm_resume_to_guest
|
||||
(p6) br.dptk kvm_resume_to_guest_with_sync
|
||||
;;
|
||||
add r26=VMM_VCPU_META_RR0_OFFSET,r21
|
||||
add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
|
||||
@@ -212,7 +307,7 @@ GLOBAL_ENTRY(kvm_asm_rsm)
|
||||
mov rr[r28]=r27
|
||||
;;
|
||||
srlz.d
|
||||
br.many kvm_resume_to_guest
|
||||
br.many kvm_resume_to_guest_with_sync
|
||||
END(kvm_asm_rsm)
|
||||
|
||||
|
||||
@@ -221,11 +316,11 @@ GLOBAL_ENTRY(kvm_asm_ssm)
|
||||
#ifndef ACCE_SSM
|
||||
br.many kvm_virtualization_fault_back
|
||||
#endif
|
||||
add r16=VMM_VPD_BASE_OFFSET,r21
|
||||
VMX_VPS_SYNC_READ
|
||||
;;
|
||||
extr.u r26=r25,6,21
|
||||
extr.u r27=r25,31,2
|
||||
;;
|
||||
ld8 r16=[r16]
|
||||
extr.u r28=r25,36,1
|
||||
dep r26=r27,r26,21,2
|
||||
;; //r26 is imm24
|
||||
@@ -271,7 +366,7 @@ kvm_asm_ssm_1:
|
||||
tbit.nz p6,p0=r29,IA64_PSR_I_BIT
|
||||
;;
|
||||
tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
|
||||
(p6) br.dptk kvm_resume_to_guest
|
||||
(p6) br.dptk kvm_resume_to_guest_with_sync
|
||||
;;
|
||||
add r29=VPD_VTPR_START_OFFSET,r16
|
||||
add r30=VPD_VHPI_START_OFFSET,r16
|
||||
@@ -286,7 +381,7 @@ kvm_asm_ssm_1:
|
||||
;;
|
||||
cmp.gt p6,p0=r30,r17
|
||||
(p6) br.dpnt.few kvm_asm_dispatch_vexirq
|
||||
br.many kvm_resume_to_guest
|
||||
br.many kvm_resume_to_guest_with_sync
|
||||
END(kvm_asm_ssm)
|
||||
|
||||
|
||||
@@ -295,10 +390,9 @@ GLOBAL_ENTRY(kvm_asm_mov_to_psr)
|
||||
#ifndef ACCE_MOV_TO_PSR
|
||||
br.many kvm_virtualization_fault_back
|
||||
#endif
|
||||
add r16=VMM_VPD_BASE_OFFSET,r21
|
||||
extr.u r26=r25,13,7 //r2
|
||||
VMX_VPS_SYNC_READ
|
||||
;;
|
||||
ld8 r16=[r16]
|
||||
extr.u r26=r25,13,7 //r2
|
||||
addl r20=@gprel(asm_mov_from_reg),gp
|
||||
;;
|
||||
adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
|
||||
@@ -374,7 +468,7 @@ kvm_asm_mov_to_psr_1:
|
||||
;;
|
||||
tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
|
||||
tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
|
||||
(p6) br.dpnt.few kvm_resume_to_guest
|
||||
(p6) br.dpnt.few kvm_resume_to_guest_with_sync
|
||||
;;
|
||||
add r29=VPD_VTPR_START_OFFSET,r16
|
||||
add r30=VPD_VHPI_START_OFFSET,r16
|
||||
@@ -389,13 +483,29 @@ kvm_asm_mov_to_psr_1:
|
||||
;;
|
||||
cmp.gt p6,p0=r30,r17
|
||||
(p6) br.dpnt.few kvm_asm_dispatch_vexirq
|
||||
br.many kvm_resume_to_guest
|
||||
br.many kvm_resume_to_guest_with_sync
|
||||
END(kvm_asm_mov_to_psr)
|
||||
|
||||
|
||||
ENTRY(kvm_asm_dispatch_vexirq)
|
||||
//increment iip
|
||||
mov r17 = b0
|
||||
mov r18 = r31
|
||||
{.mii
|
||||
add r25=VMM_VPD_BASE_OFFSET,r21
|
||||
nop 0x0
|
||||
mov r24 = ip
|
||||
;;
|
||||
}
|
||||
{.mmb
|
||||
add r24 = 0x20, r24
|
||||
ld8 r25 = [r25]
|
||||
br.sptk.many kvm_vps_sync_write
|
||||
}
|
||||
mov b0 =r17
|
||||
mov r16=cr.ipsr
|
||||
mov r31 = r18
|
||||
mov r19 = 37
|
||||
;;
|
||||
extr.u r17=r16,IA64_PSR_RI_BIT,2
|
||||
tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
|
||||
@@ -435,25 +545,31 @@ GLOBAL_ENTRY(kvm_asm_thash)
|
||||
;;
|
||||
kvm_asm_thash_back1:
|
||||
shr.u r23=r19,61 // get RR number
|
||||
adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
|
||||
adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
|
||||
adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
|
||||
;;
|
||||
shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr
|
||||
shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr
|
||||
ld8 r17=[r16] // get PTA
|
||||
mov r26=1
|
||||
;;
|
||||
extr.u r29=r17,2,6 // get pta.size
|
||||
ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value
|
||||
extr.u r29=r17,2,6 // get pta.size
|
||||
ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value
|
||||
;;
|
||||
extr.u r25=r25,2,6 // get rr.ps
|
||||
mov b0=r24
|
||||
//Fallback to C if pta.vf is set
|
||||
tbit.nz p6,p0=r17, 8
|
||||
;;
|
||||
(p6) mov r24=EVENT_THASH
|
||||
(p6) br.cond.dpnt.many kvm_virtualization_fault_back
|
||||
extr.u r28=r28,2,6 // get rr.ps
|
||||
shl r22=r26,r29 // 1UL << pta.size
|
||||
;;
|
||||
shr.u r23=r19,r25 // vaddr >> rr.ps
|
||||
shr.u r23=r19,r28 // vaddr >> rr.ps
|
||||
adds r26=3,r29 // pta.size + 3
|
||||
shl r27=r17,3 // pta << 3
|
||||
;;
|
||||
shl r23=r23,3 // (vaddr >> rr.ps) << 3
|
||||
shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
|
||||
shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
|
||||
movl r16=7<<61
|
||||
;;
|
||||
adds r22=-1,r22 // (1UL << pta.size) - 1
|
||||
@@ -724,6 +840,29 @@ END(asm_mov_from_reg)
|
||||
* r31: pr
|
||||
* r24: b0
|
||||
*/
|
||||
ENTRY(kvm_resume_to_guest_with_sync)
|
||||
adds r19=VMM_VPD_BASE_OFFSET,r21
|
||||
mov r16 = r31
|
||||
mov r17 = r24
|
||||
;;
|
||||
{.mii
|
||||
ld8 r25 =[r19]
|
||||
nop 0x0
|
||||
mov r24 = ip
|
||||
;;
|
||||
}
|
||||
{.mmb
|
||||
add r24 =0x20, r24
|
||||
nop 0x0
|
||||
br.sptk.many kvm_vps_sync_write
|
||||
}
|
||||
|
||||
mov r31 = r16
|
||||
mov r24 =r17
|
||||
;;
|
||||
br.sptk.many kvm_resume_to_guest
|
||||
END(kvm_resume_to_guest_with_sync)
|
||||
|
||||
ENTRY(kvm_resume_to_guest)
|
||||
adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
|
||||
;;
|
||||
|
@@ -962,9 +962,9 @@ static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
|
||||
void vmm_transition(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
|
||||
0, 0, 0, 0, 0, 0);
|
||||
1, 0, 0, 0, 0, 0);
|
||||
vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
|
||||
ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
|
||||
0, 0, 0, 0, 0, 0);
|
||||
1, 0, 0, 0, 0, 0);
|
||||
kvm_do_resume_op(vcpu);
|
||||
}
|
||||
|
@@ -313,21 +313,21 @@ static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
|
||||
trp->rid = rid;
|
||||
}
|
||||
|
||||
extern u64 kvm_lookup_mpa(u64 gpfn);
|
||||
extern u64 kvm_gpa_to_mpa(u64 gpa);
|
||||
|
||||
/* Return I/O type if trye */
|
||||
#define __gpfn_is_io(gpfn) \
|
||||
({ \
|
||||
u64 pte, ret = 0; \
|
||||
pte = kvm_lookup_mpa(gpfn); \
|
||||
if (!(pte & GPFN_INV_MASK)) \
|
||||
ret = pte & GPFN_IO_MASK; \
|
||||
ret; \
|
||||
})
|
||||
extern u64 kvm_get_mpt_entry(u64 gpfn);
|
||||
|
||||
/* Return I/ */
|
||||
static inline u64 __gpfn_is_io(u64 gpfn)
|
||||
{
|
||||
u64 pte;
|
||||
pte = kvm_get_mpt_entry(gpfn);
|
||||
if (!(pte & GPFN_INV_MASK)) {
|
||||
pte = pte & GPFN_IO_MASK;
|
||||
if (pte != GPFN_PHYS_MMIO)
|
||||
return pte;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define IA64_NO_FAULT 0
|
||||
#define IA64_FAULT 1
|
||||
|
||||
|
@@ -1261,11 +1261,6 @@ kvm_rse_clear_invalid:
|
||||
adds r19=VMM_VPD_VPSR_OFFSET,r18
|
||||
;;
|
||||
ld8 r19=[r19] //vpsr
|
||||
adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21
|
||||
;;
|
||||
ld8 r20=[r20]
|
||||
;;
|
||||
//vsa_sync_write_start
|
||||
mov r25=r18
|
||||
adds r16= VMM_VCPU_GP_OFFSET,r21
|
||||
;;
|
||||
@@ -1274,10 +1269,7 @@ kvm_rse_clear_invalid:
|
||||
;;
|
||||
add r24=r24,r16
|
||||
;;
|
||||
add r16=PAL_VPS_SYNC_WRITE,r20
|
||||
;;
|
||||
mov b0=r16
|
||||
br.cond.sptk b0 // call the service
|
||||
br.sptk.many kvm_vps_sync_write // call the service
|
||||
;;
|
||||
END(ia64_leave_hypervisor)
|
||||
// fall through
|
||||
@@ -1288,28 +1280,15 @@ GLOBAL_ENTRY(ia64_vmm_entry)
|
||||
* r17:cr.isr
|
||||
* r18:vpd
|
||||
* r19:vpsr
|
||||
* r20:__vsa_base
|
||||
* r22:b0
|
||||
* r23:predicate
|
||||
*/
|
||||
mov r24=r22
|
||||
mov r25=r18
|
||||
tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
|
||||
(p1) br.cond.sptk.few kvm_vps_resume_normal
|
||||
(p2) br.cond.sptk.many kvm_vps_resume_handler
|
||||
;;
|
||||
(p1) add r29=PAL_VPS_RESUME_NORMAL,r20
|
||||
(p1) br.sptk.many ia64_vmm_entry_out
|
||||
;;
|
||||
tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir
|
||||
;;
|
||||
(p1) add r29=PAL_VPS_RESUME_NORMAL,r20
|
||||
(p2) add r29=PAL_VPS_RESUME_HANDLER,r20
|
||||
(p2) ld8 r26=[r25]
|
||||
;;
|
||||
ia64_vmm_entry_out:
|
||||
mov pr=r23,-2
|
||||
mov b0=r29
|
||||
;;
|
||||
br.cond.sptk b0 // call pal service
|
||||
END(ia64_vmm_entry)
|
||||
|
||||
|
||||
@@ -1376,6 +1355,9 @@ GLOBAL_ENTRY(vmm_reset_entry)
|
||||
//set up ipsr, iip, vpd.vpsr, dcr
|
||||
// For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
|
||||
// For DCR: all bits 0
|
||||
bsw.0
|
||||
;;
|
||||
mov r21 =r13
|
||||
adds r14=-VMM_PT_REGS_SIZE, r12
|
||||
;;
|
||||
movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
|
||||
@@ -1387,12 +1369,6 @@ GLOBAL_ENTRY(vmm_reset_entry)
|
||||
;;
|
||||
srlz.i
|
||||
;;
|
||||
bsw.0
|
||||
;;
|
||||
mov r21 =r13
|
||||
;;
|
||||
bsw.1
|
||||
;;
|
||||
mov ar.rsc = 0
|
||||
;;
|
||||
flushrs
|
||||
@@ -1406,12 +1382,9 @@ GLOBAL_ENTRY(vmm_reset_entry)
|
||||
ld8 r1 = [r20]
|
||||
;;
|
||||
mov cr.iip=r4
|
||||
;;
|
||||
adds r16=VMM_VPD_BASE_OFFSET,r13
|
||||
adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13
|
||||
;;
|
||||
ld8 r18=[r16]
|
||||
ld8 r20=[r20]
|
||||
;;
|
||||
adds r19=VMM_VPD_VPSR_OFFSET,r18
|
||||
;;
|
||||
|
@@ -390,7 +390,7 @@ void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
|
||||
|
||||
u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
|
||||
{
|
||||
u64 ps, ps_mask, paddr, maddr;
|
||||
u64 ps, ps_mask, paddr, maddr, io_mask;
|
||||
union pte_flags phy_pte;
|
||||
|
||||
ps = itir_ps(itir);
|
||||
@@ -398,8 +398,9 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
|
||||
phy_pte.val = *pte;
|
||||
paddr = *pte;
|
||||
paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
|
||||
maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT);
|
||||
if (maddr & GPFN_IO_MASK) {
|
||||
maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
|
||||
io_mask = maddr & GPFN_IO_MASK;
|
||||
if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
|
||||
*pte |= VTLB_PTE_IO;
|
||||
return -1;
|
||||
}
|
||||
@@ -418,7 +419,7 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
|
||||
u64 ifa, int type)
|
||||
{
|
||||
u64 ps;
|
||||
u64 phy_pte;
|
||||
u64 phy_pte, io_mask, index;
|
||||
union ia64_rr vrr, mrr;
|
||||
int ret = 0;
|
||||
|
||||
@@ -426,13 +427,16 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
|
||||
vrr.val = vcpu_get_rr(v, ifa);
|
||||
mrr.val = ia64_get_rr(ifa);
|
||||
|
||||
index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
|
||||
io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
|
||||
phy_pte = translate_phy_pte(&pte, itir, ifa);
|
||||
|
||||
/* Ensure WB attribute if pte is related to a normal mem page,
|
||||
* which is required by vga acceleration since qemu maps shared
|
||||
* vram buffer with WB.
|
||||
*/
|
||||
if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
|
||||
if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
|
||||
io_mask != GPFN_PHYS_MMIO) {
|
||||
pte &= ~_PAGE_MA_MASK;
|
||||
phy_pte &= ~_PAGE_MA_MASK;
|
||||
}
|
||||
@@ -566,12 +570,19 @@ void thash_init(struct thash_cb *hcb, u64 sz)
|
||||
}
|
||||
}
|
||||
|
||||
u64 kvm_lookup_mpa(u64 gpfn)
|
||||
u64 kvm_get_mpt_entry(u64 gpfn)
|
||||
{
|
||||
u64 *base = (u64 *) KVM_P2M_BASE;
|
||||
return *(base + gpfn);
|
||||
}
|
||||
|
||||
u64 kvm_lookup_mpa(u64 gpfn)
|
||||
{
|
||||
u64 maddr;
|
||||
maddr = kvm_get_mpt_entry(gpfn);
|
||||
return maddr&_PAGE_PPN_MASK;
|
||||
}
|
||||
|
||||
u64 kvm_gpa_to_mpa(u64 gpa)
|
||||
{
|
||||
u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
|
||||
|
Viittaa uudesa ongelmassa
Block a user