123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <[email protected]>
- */
- #include <linux/kvm_host.h>
- #include <asm/kvm_emulate.h>
- #include <trace/events/kvm.h>
- #include "trace.h"
- void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
- {
- void *datap = NULL;
- union {
- u8 byte;
- u16 hword;
- u32 word;
- u64 dword;
- } tmp;
- switch (len) {
- case 1:
- tmp.byte = data;
- datap = &tmp.byte;
- break;
- case 2:
- tmp.hword = data;
- datap = &tmp.hword;
- break;
- case 4:
- tmp.word = data;
- datap = &tmp.word;
- break;
- case 8:
- tmp.dword = data;
- datap = &tmp.dword;
- break;
- }
- memcpy(buf, datap, len);
- }
- unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
- {
- unsigned long data = 0;
- union {
- u16 hword;
- u32 word;
- u64 dword;
- } tmp;
- switch (len) {
- case 1:
- data = *(u8 *)buf;
- break;
- case 2:
- memcpy(&tmp.hword, buf, len);
- data = tmp.hword;
- break;
- case 4:
- memcpy(&tmp.word, buf, len);
- data = tmp.word;
- break;
- case 8:
- memcpy(&tmp.dword, buf, len);
- data = tmp.dword;
- break;
- }
- return data;
- }
- /**
- * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
- * or in-kernel IO emulation
- *
- * @vcpu: The VCPU pointer
- */
- int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
- {
- unsigned long data;
- unsigned int len;
- int mask;
- /* Detect an already handled MMIO return */
- if (unlikely(!vcpu->mmio_needed))
- return 0;
- vcpu->mmio_needed = 0;
- if (!kvm_vcpu_dabt_iswrite(vcpu)) {
- struct kvm_run *run = vcpu->run;
- len = kvm_vcpu_dabt_get_as(vcpu);
- data = kvm_mmio_read_buf(run->mmio.data, len);
- if (kvm_vcpu_dabt_issext(vcpu) &&
- len < sizeof(unsigned long)) {
- mask = 1U << ((len * 8) - 1);
- data = (data ^ mask) - mask;
- }
- if (!kvm_vcpu_dabt_issf(vcpu))
- data = data & 0xffffffff;
- trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
- &data);
- data = vcpu_data_host_to_guest(vcpu, data, len);
- vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
- }
- /*
- * The MMIO instruction is emulated and should not be re-executed
- * in the guest.
- */
- kvm_incr_pc(vcpu);
- return 0;
- }
- int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
- {
- struct kvm_run *run = vcpu->run;
- unsigned long data;
- unsigned long rt;
- int ret;
- bool is_write;
- int len;
- u8 data_buf[8];
- /*
- * No valid syndrome? Ask userspace for help if it has
- * volunteered to do so, and bail out otherwise.
- *
- * In the protected VM case, there isn't much userspace can do
- * though, so directly deliver an exception to the guest.
- */
- if (!kvm_vcpu_dabt_isvalid(vcpu)) {
- if (is_protected_kvm_enabled() &&
- kvm_vm_is_protected(vcpu->kvm)) {
- kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
- return 1;
- }
- if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
- &vcpu->kvm->arch.flags)) {
- run->exit_reason = KVM_EXIT_ARM_NISV;
- run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
- run->arm_nisv.fault_ipa = fault_ipa;
- return 0;
- }
- kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
- return -ENOSYS;
- }
- /*
- * Prepare MMIO operation. First decode the syndrome data we get
- * from the CPU. Then try if some in-kernel emulation feels
- * responsible, otherwise let user space do its magic.
- */
- is_write = kvm_vcpu_dabt_iswrite(vcpu);
- len = kvm_vcpu_dabt_get_as(vcpu);
- rt = kvm_vcpu_dabt_get_rd(vcpu);
- if (is_write) {
- data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
- len);
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
- kvm_mmio_write_buf(data_buf, len, data);
- ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
- data_buf);
- } else {
- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
- fault_ipa, NULL);
- ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
- data_buf);
- }
- /* Now prepare kvm_run for the potential return to userland. */
- run->mmio.is_write = is_write;
- run->mmio.phys_addr = fault_ipa;
- run->mmio.len = len;
- vcpu->mmio_needed = 1;
- if (!ret) {
- /* We handled the access successfully in the kernel. */
- if (!is_write)
- memcpy(run->mmio.data, data_buf, len);
- vcpu->stat.mmio_exit_kernel++;
- kvm_handle_mmio_return(vcpu);
- return 1;
- }
- if (is_write)
- memcpy(run->mmio.data, data_buf, len);
- vcpu->stat.mmio_exit_user++;
- run->exit_reason = KVM_EXIT_MMIO;
- return 0;
- }
|