Merge tag 'kvm-arm-for-v4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into next
KVM/ARM Changes for v4.9 - Various cleanups and removal of redundant code - Two important fixes for not using an in-kernel irqchip - A bit of optimizations - Handle SError exceptions and present them to guests if appropriate - Proxying of GICV access at EL2 if guest mappings are unsafe - GICv3 on AArch32 on ARMv8 - Preparations for GICv3 save/restore, including ABI docs
Esse commit está contido em:
@@ -21,13 +21,16 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/
|
||||
obj-y += kvm-arm.o init.o interrupts.o
|
||||
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
|
||||
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
|
||||
obj-y += $(KVM)/arm/aarch32.o
|
||||
|
||||
obj-y += $(KVM)/arm/vgic/vgic.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-init.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-v2.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-v3.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
|
||||
obj-y += $(KVM)/irqchip.o
|
||||
obj-y += $(KVM)/arm/arch_timer.o
|
||||
|
@@ -1188,6 +1188,10 @@ static int init_common_resources(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* set size of VMID supported by CPU */
|
||||
kvm_vmid_bits = kvm_get_vmid_bits();
|
||||
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1253,10 +1257,6 @@ static void teardown_hyp_mode(void)
|
||||
|
||||
static int init_vhe_mode(void)
|
||||
{
|
||||
/* set size of VMID supported by CPU */
|
||||
kvm_vmid_bits = kvm_get_vmid_bits();
|
||||
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
|
||||
|
||||
kvm_info("VHE mode initialized successfully\n");
|
||||
return 0;
|
||||
}
|
||||
@@ -1340,10 +1340,6 @@ static int init_hyp_mode(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* set size of VMID supported by CPU */
|
||||
kvm_vmid_bits = kvm_get_vmid_bits();
|
||||
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
|
||||
|
||||
kvm_info("Hyp mode initialized successfully\n");
|
||||
|
||||
return 0;
|
||||
|
@@ -228,6 +228,35 @@ bool access_vm_reg(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
u64 reg;
|
||||
|
||||
if (!p->is_write)
|
||||
return read_from_write_only(vcpu, p);
|
||||
|
||||
reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
|
||||
reg |= *vcpu_reg(vcpu, p->Rt1) ;
|
||||
|
||||
vgic_v3_dispatch_sgi(vcpu, reg);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_gic_sre(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
return ignore_write(vcpu, p);
|
||||
|
||||
*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* We could trap ID_DFR0 and tell the guest we don't support performance
|
||||
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
|
||||
@@ -361,10 +390,16 @@ static const struct coproc_reg cp15_regs[] = {
|
||||
{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
|
||||
access_vm_reg, reset_unknown, c10_AMAIR1},
|
||||
|
||||
/* ICC_SGI1R */
|
||||
{ CRm64(12), Op1( 0), is64, access_gic_sgi},
|
||||
|
||||
/* VBAR: swapped by interrupt.S. */
|
||||
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
|
||||
NULL, reset_val, c12_VBAR, 0x00000000 },
|
||||
|
||||
/* ICC_SRE */
|
||||
{ CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
|
||||
|
||||
/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
|
||||
{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
|
||||
access_vm_reg, reset_val, c13_CID, 0x00000000 },
|
||||
|
@@ -161,105 +161,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A conditional instruction is allowed to trap, even though it
|
||||
* wouldn't be executed. So let's re-implement the hardware, in
|
||||
* software!
|
||||
*/
|
||||
bool kvm_condition_valid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cpsr, cond, insn;
|
||||
|
||||
/*
|
||||
* Exception Code 0 can only happen if we set HCR.TGE to 1, to
|
||||
* catch undefined instructions, and then we won't get past
|
||||
* the arm_exit_handlers test anyway.
|
||||
*/
|
||||
BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
|
||||
|
||||
/* Top two bits non-zero? Unconditional. */
|
||||
if (kvm_vcpu_get_hsr(vcpu) >> 30)
|
||||
return true;
|
||||
|
||||
cpsr = *vcpu_cpsr(vcpu);
|
||||
|
||||
/* Is condition field valid? */
|
||||
if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
|
||||
cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
|
||||
else {
|
||||
/* This can happen in Thumb mode: examine IT state. */
|
||||
unsigned long it;
|
||||
|
||||
it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
|
||||
|
||||
/* it == 0 => unconditional. */
|
||||
if (it == 0)
|
||||
return true;
|
||||
|
||||
/* The cond for this insn works out as the top 4 bits. */
|
||||
cond = (it >> 4);
|
||||
}
|
||||
|
||||
/* Shift makes it look like an ARM-mode instruction */
|
||||
insn = cond << 28;
|
||||
return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
|
||||
}
|
||||
|
||||
/**
|
||||
* adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* When exceptions occur while instructions are executed in Thumb IF-THEN
|
||||
* blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
|
||||
* to do this little bit of work manually. The fields map like this:
|
||||
*
|
||||
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
|
||||
*/
|
||||
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long itbits, cond;
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
bool is_arm = !(cpsr & PSR_T_BIT);
|
||||
|
||||
BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
|
||||
|
||||
if (!(cpsr & PSR_IT_MASK))
|
||||
return;
|
||||
|
||||
cond = (cpsr & 0xe000) >> 13;
|
||||
itbits = (cpsr & 0x1c00) >> (10 - 2);
|
||||
itbits |= (cpsr & (0x3 << 25)) >> 25;
|
||||
|
||||
/* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
|
||||
if ((itbits & 0x7) == 0)
|
||||
itbits = cond = 0;
|
||||
else
|
||||
itbits = (itbits << 1) & 0x1f;
|
||||
|
||||
cpsr &= ~PSR_IT_MASK;
|
||||
cpsr |= cond << 13;
|
||||
cpsr |= (itbits & 0x1c) << (10 - 2);
|
||||
cpsr |= (itbits & 0x3) << 25;
|
||||
*vcpu_cpsr(vcpu) = cpsr;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_skip_instr - skip a trapped instruction and proceed to the next
|
||||
* @vcpu: The vcpu pointer
|
||||
*/
|
||||
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
{
|
||||
bool is_thumb;
|
||||
|
||||
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
|
||||
if (is_thumb && !is_wide_instr)
|
||||
*vcpu_pc(vcpu) += 2;
|
||||
else
|
||||
*vcpu_pc(vcpu) += 4;
|
||||
kvm_adjust_itstate(vcpu);
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************************
|
||||
* Inject exceptions into the guest
|
||||
*/
|
||||
@@ -402,3 +303,15 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
inject_abt(vcpu, true, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_vabt - inject an async abort / SError into the guest
|
||||
* @vcpu: The VCPU to receive the exception
|
||||
*
|
||||
* It is assumed that this code is called from the VCPU thread and that the
|
||||
* VCPU therefore is not currently executing guest code.
|
||||
*/
|
||||
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VA);
|
||||
}
|
||||
|
@@ -28,14 +28,6 @@
|
||||
|
||||
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
|
||||
|
||||
static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
/* SVC called from Hyp mode should never get here */
|
||||
kvm_debug("SVC called from Hyp mode shouldn't go here\n");
|
||||
BUG();
|
||||
return -EINVAL; /* Squash warning */
|
||||
}
|
||||
|
||||
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
int ret;
|
||||
@@ -59,22 +51,6 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
/* The hypervisor should never cause aborts */
|
||||
kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
|
||||
kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
/* This is either an error in the ws. code or an external abort */
|
||||
kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
|
||||
kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
|
||||
* @vcpu: the vcpu pointer
|
||||
@@ -112,13 +88,10 @@ static exit_handle_fn arm_exit_handlers[] = {
|
||||
[HSR_EC_CP14_64] = kvm_handle_cp14_access,
|
||||
[HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
|
||||
[HSR_EC_CP10_ID] = kvm_handle_cp10_id,
|
||||
[HSR_EC_SVC_HYP] = handle_svc_hyp,
|
||||
[HSR_EC_HVC] = handle_hvc,
|
||||
[HSR_EC_SMC] = handle_smc,
|
||||
[HSR_EC_IABT] = kvm_handle_guest_abort,
|
||||
[HSR_EC_IABT_HYP] = handle_pabt_hyp,
|
||||
[HSR_EC_DABT] = kvm_handle_guest_abort,
|
||||
[HSR_EC_DABT_HYP] = handle_dabt_hyp,
|
||||
};
|
||||
|
||||
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
||||
@@ -144,6 +117,25 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
{
|
||||
exit_handle_fn exit_handler;
|
||||
|
||||
if (ARM_ABORT_PENDING(exception_index)) {
|
||||
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
|
||||
/*
|
||||
* HVC/SMC already have an adjusted PC, which we need
|
||||
* to correct in order to return to after having
|
||||
* injected the abort.
|
||||
*/
|
||||
if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) {
|
||||
u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
|
||||
*vcpu_pc(vcpu) -= adj;
|
||||
}
|
||||
|
||||
kvm_inject_vabt(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
exception_index = ARM_EXCEPTION_CODE(exception_index);
|
||||
|
||||
switch (exception_index) {
|
||||
case ARM_EXCEPTION_IRQ:
|
||||
return 1;
|
||||
@@ -160,6 +152,9 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
exit_handler = kvm_get_exit_handler(vcpu);
|
||||
|
||||
return exit_handler(vcpu, run);
|
||||
case ARM_EXCEPTION_DATA_ABORT:
|
||||
kvm_inject_vabt(vcpu);
|
||||
return 1;
|
||||
default:
|
||||
kvm_pr_unimpl("Unsupported exception type: %d",
|
||||
exception_index);
|
||||
|
@@ -5,6 +5,7 @@
|
||||
KVM=../../../../virt/kvm
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
|
||||
|
@@ -18,6 +18,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
|
||||
.arch_extension virt
|
||||
|
||||
@@ -63,6 +64,36 @@ ENTRY(__guest_exit)
|
||||
ldr lr, [r0, #4]
|
||||
|
||||
mov r0, r1
|
||||
mrs r1, SPSR
|
||||
mrs r2, ELR_hyp
|
||||
mrc p15, 4, r3, c5, c2, 0 @ HSR
|
||||
|
||||
/*
|
||||
* Force loads and stores to complete before unmasking aborts
|
||||
* and forcing the delivery of the exception. This gives us a
|
||||
* single instruction window, which the handler will try to
|
||||
* match.
|
||||
*/
|
||||
dsb sy
|
||||
cpsie a
|
||||
|
||||
.global abort_guest_exit_start
|
||||
abort_guest_exit_start:
|
||||
|
||||
isb
|
||||
|
||||
.global abort_guest_exit_end
|
||||
abort_guest_exit_end:
|
||||
|
||||
/*
|
||||
* If we took an abort, r0[31] will be set, and cmp will set
|
||||
* the N bit in PSTATE.
|
||||
*/
|
||||
cmp r0, #0
|
||||
msrmi SPSR_cxsf, r1
|
||||
msrmi ELR_hyp, r2
|
||||
mcrmi p15, 4, r3, c5, c2, 0 @ HSR
|
||||
|
||||
bx lr
|
||||
ENDPROC(__guest_exit)
|
||||
|
||||
|
@@ -81,7 +81,6 @@ __kvm_hyp_vector:
|
||||
invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
|
||||
invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
|
||||
invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
|
||||
invalid_vector hyp_dabt ARM_EXCEPTION_DATA_ABORT
|
||||
invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
|
||||
|
||||
ENTRY(__hyp_do_panic)
|
||||
@@ -164,6 +163,21 @@ hyp_irq:
|
||||
load_vcpu r0 @ Load VCPU pointer to r0
|
||||
b __guest_exit
|
||||
|
||||
hyp_dabt:
|
||||
push {r0, r1}
|
||||
mrs r0, ELR_hyp
|
||||
ldr r1, =abort_guest_exit_start
|
||||
THUMB( add r1, r1, #1)
|
||||
cmp r0, r1
|
||||
ldrne r1, =abort_guest_exit_end
|
||||
THUMB( addne r1, r1, #1)
|
||||
cmpne r0, r1
|
||||
pop {r0, r1}
|
||||
bne __hyp_panic
|
||||
|
||||
orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT)
|
||||
eret
|
||||
|
||||
.ltorg
|
||||
|
||||
.popsection
|
||||
|
@@ -14,6 +14,7 @@
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
@@ -54,6 +55,15 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* If we pended a virtual abort, preserve it until it gets
|
||||
* cleared. See B1.9.9 (Virtual Abort exception) for details,
|
||||
* but the crucial bit is the zeroing of HCR.VA in the
|
||||
* pseudocode.
|
||||
*/
|
||||
if (vcpu->arch.hcr & HCR_VA)
|
||||
vcpu->arch.hcr = read_sysreg(HCR);
|
||||
|
||||
write_sysreg(0, HCR);
|
||||
write_sysreg(0, HSTR);
|
||||
val = read_sysreg(HDCR);
|
||||
@@ -74,14 +84,21 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
|
||||
write_sysreg(read_sysreg(MIDR), VPIDR);
|
||||
}
|
||||
|
||||
|
||||
static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__vgic_v2_save_state(vcpu);
|
||||
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||
__vgic_v3_save_state(vcpu);
|
||||
else
|
||||
__vgic_v2_save_state(vcpu);
|
||||
}
|
||||
|
||||
static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__vgic_v2_restore_state(vcpu);
|
||||
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||
__vgic_v3_restore_state(vcpu);
|
||||
else
|
||||
__vgic_v2_restore_state(vcpu);
|
||||
}
|
||||
|
||||
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
|
||||
@@ -134,7 +151,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
|
||||
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_cpu_context *guest_ctxt;
|
||||
@@ -191,8 +208,6 @@ again:
|
||||
return exit_code;
|
||||
}
|
||||
|
||||
__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
static const char * const __hyp_panic_string[] = {
|
||||
[ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x",
|
||||
[ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
|
||||
|
@@ -34,7 +34,7 @@
|
||||
* As v7 does not support flushing per IPA, just nuke the whole TLB
|
||||
* instead, ignoring the ipa value.
|
||||
*/
|
||||
static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
|
||||
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
||||
{
|
||||
dsb(ishst);
|
||||
|
||||
@@ -50,21 +50,14 @@ static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
|
||||
write_sysreg(0, VTTBR);
|
||||
}
|
||||
|
||||
__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
|
||||
static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
{
|
||||
__tlb_flush_vmid(kvm);
|
||||
__kvm_tlb_flush_vmid(kvm);
|
||||
}
|
||||
|
||||
__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
|
||||
phys_addr_t ipa);
|
||||
|
||||
static void __hyp_text __tlb_flush_vm_context(void)
|
||||
void __hyp_text __kvm_flush_vm_context(void)
|
||||
{
|
||||
write_sysreg(0, TLBIALLNSNHIS);
|
||||
write_sysreg(0, ICIALLUIS);
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
|
||||
|
@@ -126,12 +126,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
|
||||
int access_size;
|
||||
bool sign_extend;
|
||||
|
||||
if (kvm_vcpu_dabt_isextabt(vcpu)) {
|
||||
/* cache operation on I/O addr, tell guest unsupported */
|
||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
|
||||
/* page table accesses IO mem: tell guest to fix its TTBR */
|
||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||
|
@@ -744,7 +744,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
kvm_clean_pgd(pgd);
|
||||
kvm->arch.pgd = pgd;
|
||||
return 0;
|
||||
}
|
||||
@@ -936,7 +935,6 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
||||
if (!cache)
|
||||
return 0; /* ignore calls from kvm_set_spte_hva */
|
||||
pte = mmu_memory_cache_alloc(cache);
|
||||
kvm_clean_pte(pte);
|
||||
pmd_populate_kernel(NULL, pmd, pte);
|
||||
get_page(virt_to_page(pmd));
|
||||
}
|
||||
@@ -1434,6 +1432,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
int ret, idx;
|
||||
|
||||
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
|
||||
kvm_inject_vabt(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
|
||||
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
|
||||
|
Referência em uma nova issue
Block a user