Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm into next
Pull KVM updates from Paolo Bonzini: "At over 200 commits, covering almost all supported architectures, this was a pretty active cycle for KVM. Changes include: - a lot of s390 changes: optimizations, support for migration, GDB support and more - ARM changes are pretty small: support for the PSCI 0.2 hypercall interface on both the guest and the host (the latter acked by Catalin) - initial POWER8 and little-endian host support - support for running u-boot on embedded POWER targets - pretty large changes to MIPS too, completing the userspace interface and improving the handling of virtualized timer hardware - for x86, a larger set of changes is scheduled for 3.17. Still, we have a few emulator bugfixes and support for running nested fully-virtualized Xen guests (para-virtualized Xen guests have always worked). And some optimizations too. The only missing architecture here is ia64. It's not a coincidence that support for KVM on ia64 is scheduled for removal in 3.17" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (203 commits) KVM: add missing cleanup_srcu_struct KVM: PPC: Book3S PR: Rework SLB switching code KVM: PPC: Book3S PR: Use SLB entry 0 KVM: PPC: Book3S HV: Fix machine check delivery to guest KVM: PPC: Book3S HV: Work around POWER8 performance monitor bugs KVM: PPC: Book3S HV: Make sure we don't miss dirty pages KVM: PPC: Book3S HV: Fix dirty map for hugepages KVM: PPC: Book3S HV: Put huge-page HPTEs in rmap chain for base address KVM: PPC: Book3S HV: Fix check for running inside guest in global_invalidates() KVM: PPC: Book3S: Move KVM_REG_PPC_WORT to an unused register number KVM: PPC: Book3S: Add ONE_REG register names that were missed KVM: PPC: Add CAP to indicate hcall fixes KVM: PPC: MPIC: Reset IRQ source private members KVM: PPC: Graciously fail broken LE hypercalls PPC: ePAPR: Fix hypercall on LE guest KVM: PPC: BOOK3S: Remove open coded make_dsisr in alignment handler KVM: PPC: BOOK3S: Always use the saved DAR value PPC: KVM: Make NX bit available with magic page KVM: PPC: Disable NX for old magic page using guests KVM: PPC: BOOK3S: HV: Add mixed page-size support for guest ...
This commit is contained in:
@@ -1756,14 +1756,14 @@ config KVM_GUEST
|
||||
help
|
||||
Select this option if building a guest kernel for KVM (Trap & Emulate) mode
|
||||
|
||||
config KVM_HOST_FREQ
|
||||
int "KVM Host Processor Frequency (MHz)"
|
||||
config KVM_GUEST_TIMER_FREQ
|
||||
int "Count/Compare Timer Frequency (MHz)"
|
||||
depends on KVM_GUEST
|
||||
default 500
|
||||
default 100
|
||||
help
|
||||
Select this option if building a guest kernel for KVM to skip
|
||||
RTC emulation when determining guest CPU Frequency. Instead, the guest
|
||||
processor frequency is automatically derived from the host frequency.
|
||||
Set this to non-zero if building a guest kernel for KVM to skip RTC
|
||||
emulation when determining guest CPU Frequency. Instead, the guest's
|
||||
timer frequency is specified directly.
|
||||
|
||||
choice
|
||||
prompt "Kernel page size"
|
||||
|
@@ -19,6 +19,38 @@
|
||||
#include <linux/threads.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/* MIPS KVM register ids */
|
||||
#define MIPS_CP0_32(_R, _S) \
|
||||
(KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
|
||||
|
||||
#define MIPS_CP0_64(_R, _S) \
|
||||
(KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
|
||||
|
||||
#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
|
||||
#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
|
||||
#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
|
||||
#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
|
||||
#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
|
||||
#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
|
||||
#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
|
||||
#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
|
||||
#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
|
||||
#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
|
||||
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
|
||||
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
|
||||
#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
|
||||
#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
|
||||
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
|
||||
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
|
||||
|
||||
|
||||
#define KVM_MAX_VCPUS 1
|
||||
#define KVM_USER_MEM_SLOTS 8
|
||||
@@ -372,8 +404,19 @@ struct kvm_vcpu_arch {
|
||||
|
||||
u32 io_gpr; /* GPR used as IO source/target */
|
||||
|
||||
/* Used to calibrate the virutal count register for the guest */
|
||||
int32_t host_cp0_count;
|
||||
struct hrtimer comparecount_timer;
|
||||
/* Count timer control KVM register */
|
||||
uint32_t count_ctl;
|
||||
/* Count bias from the raw time */
|
||||
uint32_t count_bias;
|
||||
/* Frequency of timer in Hz */
|
||||
uint32_t count_hz;
|
||||
/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
|
||||
s64 count_dyn_bias;
|
||||
/* Resume time */
|
||||
ktime_t count_resume;
|
||||
/* Period of timer tick in ns */
|
||||
u64 count_period;
|
||||
|
||||
/* Bitmask of exceptions that are pending */
|
||||
unsigned long pending_exceptions;
|
||||
@@ -394,8 +437,6 @@ struct kvm_vcpu_arch {
|
||||
uint32_t guest_kernel_asid[NR_CPUS];
|
||||
struct mm_struct guest_kernel_mm, guest_user_mm;
|
||||
|
||||
struct hrtimer comparecount_timer;
|
||||
|
||||
int last_sched_cpu;
|
||||
|
||||
/* WAIT executed */
|
||||
@@ -410,6 +451,7 @@ struct kvm_vcpu_arch {
|
||||
#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
|
||||
#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
|
||||
#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
|
||||
#define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
|
||||
#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
|
||||
#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
|
||||
#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
|
||||
@@ -449,15 +491,74 @@ struct kvm_vcpu_arch {
|
||||
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
|
||||
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
|
||||
|
||||
/*
|
||||
* Some of the guest registers may be modified asynchronously (e.g. from a
|
||||
* hrtimer callback in hard irq context) and therefore need stronger atomicity
|
||||
* guarantees than other registers.
|
||||
*/
|
||||
|
||||
static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
|
||||
unsigned long val)
|
||||
{
|
||||
unsigned long temp;
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" " __LL "%0, %1 \n"
|
||||
" or %0, %2 \n"
|
||||
" " __SC "%0, %1 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r" (temp), "+m" (*reg)
|
||||
: "r" (val));
|
||||
} while (unlikely(!temp));
|
||||
}
|
||||
|
||||
static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
|
||||
unsigned long val)
|
||||
{
|
||||
unsigned long temp;
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" " __LL "%0, %1 \n"
|
||||
" and %0, %2 \n"
|
||||
" " __SC "%0, %1 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r" (temp), "+m" (*reg)
|
||||
: "r" (~val));
|
||||
} while (unlikely(!temp));
|
||||
}
|
||||
|
||||
static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
|
||||
unsigned long change,
|
||||
unsigned long val)
|
||||
{
|
||||
unsigned long temp;
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" " __LL "%0, %1 \n"
|
||||
" and %0, %2 \n"
|
||||
" or %0, %3 \n"
|
||||
" " __SC "%0, %1 \n"
|
||||
" .set mips0 \n"
|
||||
: "=&r" (temp), "+m" (*reg)
|
||||
: "r" (~change), "r" (val & change));
|
||||
} while (unlikely(!temp));
|
||||
}
|
||||
|
||||
#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
|
||||
#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
|
||||
#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
|
||||
#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
|
||||
|
||||
/* Cause can be modified asynchronously from hardirq hrtimer callback */
|
||||
#define kvm_set_c0_guest_cause(cop0, val) \
|
||||
_kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
|
||||
#define kvm_clear_c0_guest_cause(cop0, val) \
|
||||
_kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
|
||||
#define kvm_change_c0_guest_cause(cop0, change, val) \
|
||||
{ \
|
||||
kvm_clear_c0_guest_cause(cop0, change); \
|
||||
kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
|
||||
}
|
||||
_kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
|
||||
change, val)
|
||||
|
||||
#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
|
||||
#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
|
||||
#define kvm_change_c0_guest_ebase(cop0, change, val) \
|
||||
@@ -468,29 +569,33 @@ struct kvm_vcpu_arch {
|
||||
|
||||
|
||||
struct kvm_mips_callbacks {
|
||||
int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
|
||||
int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
|
||||
int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
|
||||
int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
|
||||
int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
|
||||
int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
|
||||
int (*handle_syscall) (struct kvm_vcpu *vcpu);
|
||||
int (*handle_res_inst) (struct kvm_vcpu *vcpu);
|
||||
int (*handle_break) (struct kvm_vcpu *vcpu);
|
||||
int (*vm_init) (struct kvm *kvm);
|
||||
int (*vcpu_init) (struct kvm_vcpu *vcpu);
|
||||
int (*vcpu_setup) (struct kvm_vcpu *vcpu);
|
||||
gpa_t(*gva_to_gpa) (gva_t gva);
|
||||
void (*queue_timer_int) (struct kvm_vcpu *vcpu);
|
||||
void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
|
||||
void (*queue_io_int) (struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq);
|
||||
void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq);
|
||||
int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_syscall)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_res_inst)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_break)(struct kvm_vcpu *vcpu);
|
||||
int (*vm_init)(struct kvm *kvm);
|
||||
int (*vcpu_init)(struct kvm_vcpu *vcpu);
|
||||
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
|
||||
gpa_t (*gva_to_gpa)(gva_t gva);
|
||||
void (*queue_timer_int)(struct kvm_vcpu *vcpu);
|
||||
void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
|
||||
void (*queue_io_int)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq);
|
||||
void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq);
|
||||
int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
int (*get_one_reg)(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg, s64 *v);
|
||||
int (*set_one_reg)(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg, s64 v);
|
||||
};
|
||||
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
|
||||
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
|
||||
@@ -609,7 +714,16 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
|
||||
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run);
|
||||
|
||||
enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
|
||||
uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
|
||||
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu);
|
||||
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
|
||||
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
|
||||
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
|
||||
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
|
||||
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
|
||||
|
||||
enum emulation_result kvm_mips_check_privilege(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
@@ -646,7 +760,6 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Misc */
|
||||
extern void mips32_SyncICache(unsigned long addr, unsigned long size);
|
||||
extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
|
||||
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
|
||||
|
||||
|
@@ -106,6 +106,41 @@ struct kvm_fpu {
|
||||
#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
|
||||
#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
|
||||
|
||||
/* KVM specific control registers */
|
||||
|
||||
/*
|
||||
* CP0_Count control
|
||||
* DC: Set 0: Master disable CP0_Count and set COUNT_RESUME to now
|
||||
* Set 1: Master re-enable CP0_Count with unchanged bias, handling timer
|
||||
* interrupts since COUNT_RESUME
|
||||
* This can be used to freeze the timer to get a consistent snapshot of
|
||||
* the CP0_Count and timer interrupt pending state, while also resuming
|
||||
* safely without losing time or guest timer interrupts.
|
||||
* Other: Reserved, do not change.
|
||||
*/
|
||||
#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
|
||||
0x20000 | 0)
|
||||
#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
|
||||
|
||||
/*
|
||||
* CP0_Count resume monotonic nanoseconds
|
||||
* The monotonic nanosecond time of the last set of COUNT_CTL.DC (master
|
||||
* disable). Any reads and writes of Count related registers while
|
||||
* COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is
|
||||
* cleared again (master enable) any timer interrupts since this time will be
|
||||
* emulated.
|
||||
* Modifications to times in the future are rejected.
|
||||
*/
|
||||
#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
|
||||
0x20000 | 1)
|
||||
/*
|
||||
* CP0_Count rate in Hz
|
||||
* Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
|
||||
* discontinuities in CP0_Count.
|
||||
*/
|
||||
#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
|
||||
0x20000 | 2)
|
||||
|
||||
/*
|
||||
* KVM MIPS specific structures and definitions
|
||||
*
|
||||
|
@@ -611,35 +611,3 @@ MIPSX(exceptions):
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 29
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 30
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 31
|
||||
|
||||
|
||||
/* This routine makes changes to the instruction stream effective to the hardware.
|
||||
* It should be called after the instruction stream is written.
|
||||
* On return, the new instructions are effective.
|
||||
* Inputs:
|
||||
* a0 = Start address of new instruction stream
|
||||
* a1 = Size, in bytes, of new instruction stream
|
||||
*/
|
||||
|
||||
#define HW_SYNCI_Step $1
|
||||
LEAF(MIPSX(SyncICache))
|
||||
.set push
|
||||
.set mips32r2
|
||||
beq a1, zero, 20f
|
||||
nop
|
||||
REG_ADDU a1, a0, a1
|
||||
rdhwr v0, HW_SYNCI_Step
|
||||
beq v0, zero, 20f
|
||||
nop
|
||||
10:
|
||||
synci 0(a0)
|
||||
REG_ADDU a0, a0, v0
|
||||
sltu v1, a0, a1
|
||||
bne v1, zero, 10b
|
||||
nop
|
||||
sync
|
||||
20:
|
||||
jr.hb ra
|
||||
nop
|
||||
.set pop
|
||||
END(MIPSX(SyncICache))
|
||||
|
@@ -61,11 +61,6 @@ static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
return gfn;
|
||||
}
|
||||
|
||||
/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
|
||||
* are "runnable" if interrupts are pending
|
||||
*/
|
||||
@@ -130,8 +125,8 @@ static void kvm_mips_init_vm_percpu(void *arg)
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
if (atomic_inc_return(&kvm_mips_instance) == 1) {
|
||||
kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
|
||||
__func__);
|
||||
kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
|
||||
__func__);
|
||||
on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
|
||||
}
|
||||
|
||||
@@ -149,9 +144,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
|
||||
if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
|
||||
kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
|
||||
}
|
||||
|
||||
if (kvm->arch.guest_pmap)
|
||||
kfree(kvm->arch.guest_pmap);
|
||||
kfree(kvm->arch.guest_pmap);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
kvm_arch_vcpu_free(vcpu);
|
||||
@@ -186,8 +179,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
|
||||
/* If this is the last instance, restore wired count */
|
||||
if (atomic_dec_return(&kvm_mips_instance) == 0) {
|
||||
kvm_info("%s: last KVM instance, restoring TLB parameters\n",
|
||||
__func__);
|
||||
kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
|
||||
__func__);
|
||||
on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
|
||||
}
|
||||
}
|
||||
@@ -249,9 +242,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvm_info
|
||||
("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
|
||||
npages, kvm->arch.guest_pmap);
|
||||
kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
|
||||
npages, kvm->arch.guest_pmap);
|
||||
|
||||
/* Now setup the page table */
|
||||
for (i = 0; i < npages; i++) {
|
||||
@@ -296,7 +288,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
if (err)
|
||||
goto out_free_cpu;
|
||||
|
||||
kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
|
||||
kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
|
||||
|
||||
/* Allocate space for host mode exception handlers that handle
|
||||
* guest mode exits
|
||||
@@ -304,7 +296,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
if (cpu_has_veic || cpu_has_vint) {
|
||||
size = 0x200 + VECTORSPACING * 64;
|
||||
} else {
|
||||
size = 0x200;
|
||||
size = 0x4000;
|
||||
}
|
||||
|
||||
/* Save Linux EBASE */
|
||||
@@ -316,8 +308,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
err = -ENOMEM;
|
||||
goto out_free_cpu;
|
||||
}
|
||||
kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
|
||||
ALIGN(size, PAGE_SIZE), gebase);
|
||||
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
|
||||
ALIGN(size, PAGE_SIZE), gebase);
|
||||
|
||||
/* Save new ebase */
|
||||
vcpu->arch.guest_ebase = gebase;
|
||||
@@ -342,15 +334,16 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
|
||||
/* General handler, relocate to unmapped space for sanity's sake */
|
||||
offset = 0x2000;
|
||||
kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
|
||||
gebase + offset,
|
||||
mips32_GuestExceptionEnd - mips32_GuestException);
|
||||
kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
|
||||
gebase + offset,
|
||||
mips32_GuestExceptionEnd - mips32_GuestException);
|
||||
|
||||
memcpy(gebase + offset, mips32_GuestException,
|
||||
mips32_GuestExceptionEnd - mips32_GuestException);
|
||||
|
||||
/* Invalidate the icache for these ranges */
|
||||
mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
|
||||
local_flush_icache_range((unsigned long)gebase,
|
||||
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
|
||||
|
||||
/* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
|
||||
vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
|
||||
@@ -360,14 +353,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
goto out_free_gebase;
|
||||
}
|
||||
|
||||
kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
|
||||
kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
|
||||
kvm_mips_commpage_init(vcpu);
|
||||
|
||||
/* Init */
|
||||
vcpu->arch.last_sched_cpu = -1;
|
||||
|
||||
/* Start off the timer */
|
||||
kvm_mips_emulate_count(vcpu);
|
||||
kvm_mips_init_count(vcpu);
|
||||
|
||||
return vcpu;
|
||||
|
||||
@@ -389,12 +382,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_mips_dump_stats(vcpu);
|
||||
|
||||
if (vcpu->arch.guest_ebase)
|
||||
kfree(vcpu->arch.guest_ebase);
|
||||
|
||||
if (vcpu->arch.kseg0_commpage)
|
||||
kfree(vcpu->arch.kseg0_commpage);
|
||||
|
||||
kfree(vcpu->arch.guest_ebase);
|
||||
kfree(vcpu->arch.kseg0_commpage);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
@@ -423,11 +412,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
vcpu->mmio_needed = 0;
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
/* Check if we have any exceptions/interrupts pending */
|
||||
kvm_mips_deliver_interrupts(vcpu,
|
||||
kvm_read_c0_guest_cause(vcpu->arch.cop0));
|
||||
|
||||
local_irq_disable();
|
||||
kvm_guest_enter();
|
||||
|
||||
r = __kvm_mips_vcpu_run(run, vcpu);
|
||||
@@ -490,36 +479,6 @@ kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
#define MIPS_CP0_32(_R, _S) \
|
||||
(KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
|
||||
|
||||
#define MIPS_CP0_64(_R, _S) \
|
||||
(KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
|
||||
|
||||
#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
|
||||
#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
|
||||
#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
|
||||
#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
|
||||
#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
|
||||
#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
|
||||
#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
|
||||
#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
|
||||
#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
|
||||
#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
|
||||
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
|
||||
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
|
||||
#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
|
||||
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
|
||||
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
|
||||
|
||||
static u64 kvm_mips_get_one_regs[] = {
|
||||
KVM_REG_MIPS_R0,
|
||||
KVM_REG_MIPS_R1,
|
||||
@@ -560,25 +519,34 @@ static u64 kvm_mips_get_one_regs[] = {
|
||||
|
||||
KVM_REG_MIPS_CP0_INDEX,
|
||||
KVM_REG_MIPS_CP0_CONTEXT,
|
||||
KVM_REG_MIPS_CP0_USERLOCAL,
|
||||
KVM_REG_MIPS_CP0_PAGEMASK,
|
||||
KVM_REG_MIPS_CP0_WIRED,
|
||||
KVM_REG_MIPS_CP0_HWRENA,
|
||||
KVM_REG_MIPS_CP0_BADVADDR,
|
||||
KVM_REG_MIPS_CP0_COUNT,
|
||||
KVM_REG_MIPS_CP0_ENTRYHI,
|
||||
KVM_REG_MIPS_CP0_COMPARE,
|
||||
KVM_REG_MIPS_CP0_STATUS,
|
||||
KVM_REG_MIPS_CP0_CAUSE,
|
||||
/* EPC set via kvm_regs, et al. */
|
||||
KVM_REG_MIPS_CP0_EPC,
|
||||
KVM_REG_MIPS_CP0_CONFIG,
|
||||
KVM_REG_MIPS_CP0_CONFIG1,
|
||||
KVM_REG_MIPS_CP0_CONFIG2,
|
||||
KVM_REG_MIPS_CP0_CONFIG3,
|
||||
KVM_REG_MIPS_CP0_CONFIG7,
|
||||
KVM_REG_MIPS_CP0_ERROREPC
|
||||
KVM_REG_MIPS_CP0_ERROREPC,
|
||||
|
||||
KVM_REG_MIPS_COUNT_CTL,
|
||||
KVM_REG_MIPS_COUNT_RESUME,
|
||||
KVM_REG_MIPS_COUNT_HZ,
|
||||
};
|
||||
|
||||
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
int ret;
|
||||
s64 v;
|
||||
|
||||
switch (reg->id) {
|
||||
@@ -601,24 +569,36 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||
case KVM_REG_MIPS_CP0_CONTEXT:
|
||||
v = (long)kvm_read_c0_guest_context(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_USERLOCAL:
|
||||
v = (long)kvm_read_c0_guest_userlocal(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_PAGEMASK:
|
||||
v = (long)kvm_read_c0_guest_pagemask(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_WIRED:
|
||||
v = (long)kvm_read_c0_guest_wired(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_HWRENA:
|
||||
v = (long)kvm_read_c0_guest_hwrena(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_BADVADDR:
|
||||
v = (long)kvm_read_c0_guest_badvaddr(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_ENTRYHI:
|
||||
v = (long)kvm_read_c0_guest_entryhi(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_COMPARE:
|
||||
v = (long)kvm_read_c0_guest_compare(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_STATUS:
|
||||
v = (long)kvm_read_c0_guest_status(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CAUSE:
|
||||
v = (long)kvm_read_c0_guest_cause(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_EPC:
|
||||
v = (long)kvm_read_c0_guest_epc(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_ERROREPC:
|
||||
v = (long)kvm_read_c0_guest_errorepc(cop0);
|
||||
break;
|
||||
@@ -637,6 +617,15 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||
case KVM_REG_MIPS_CP0_CONFIG7:
|
||||
v = (long)kvm_read_c0_guest_config7(cop0);
|
||||
break;
|
||||
/* registers to be handled specially */
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
case KVM_REG_MIPS_COUNT_RESUME:
|
||||
case KVM_REG_MIPS_COUNT_HZ:
|
||||
ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -697,12 +686,18 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
||||
case KVM_REG_MIPS_CP0_CONTEXT:
|
||||
kvm_write_c0_guest_context(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_USERLOCAL:
|
||||
kvm_write_c0_guest_userlocal(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_PAGEMASK:
|
||||
kvm_write_c0_guest_pagemask(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_WIRED:
|
||||
kvm_write_c0_guest_wired(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_HWRENA:
|
||||
kvm_write_c0_guest_hwrena(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_BADVADDR:
|
||||
kvm_write_c0_guest_badvaddr(cop0, v);
|
||||
break;
|
||||
@@ -712,12 +707,20 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
||||
case KVM_REG_MIPS_CP0_STATUS:
|
||||
kvm_write_c0_guest_status(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CAUSE:
|
||||
kvm_write_c0_guest_cause(cop0, v);
|
||||
case KVM_REG_MIPS_CP0_EPC:
|
||||
kvm_write_c0_guest_epc(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_ERROREPC:
|
||||
kvm_write_c0_guest_errorepc(cop0, v);
|
||||
break;
|
||||
/* registers to be handled specially */
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
case KVM_REG_MIPS_CP0_COMPARE:
|
||||
case KVM_REG_MIPS_CP0_CAUSE:
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
case KVM_REG_MIPS_COUNT_RESUME:
|
||||
case KVM_REG_MIPS_COUNT_HZ:
|
||||
return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -920,7 +923,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
|
||||
return -1;
|
||||
|
||||
printk("VCPU Register Dump:\n");
|
||||
printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
|
||||
printk("\tpc = 0x%08lx\n", vcpu->arch.pc);
|
||||
printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
|
||||
|
||||
for (i = 0; i < 32; i += 4) {
|
||||
@@ -969,7 +972,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_mips_comparecount_func(unsigned long data)
|
||||
static void kvm_mips_comparecount_func(unsigned long data)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
|
||||
|
||||
@@ -984,15 +987,13 @@ void kvm_mips_comparecount_func(unsigned long data)
|
||||
/*
|
||||
* low level hrtimer wake routine.
|
||||
*/
|
||||
enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
|
||||
static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
|
||||
kvm_mips_comparecount_func((unsigned long) vcpu);
|
||||
hrtimer_forward_now(&vcpu->arch.comparecount_timer,
|
||||
ktime_set(0, MS_TO_NS(10)));
|
||||
return HRTIMER_RESTART;
|
||||
return kvm_mips_count_timeout(vcpu);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "kvm_mips_comm.h"
|
||||
|
||||
@@ -40,7 +41,7 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
|
||||
mips32_SyncICache(kseg0_opc, 32);
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
|
||||
return result;
|
||||
}
|
||||
@@ -66,7 +67,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
|
||||
mips32_SyncICache(kseg0_opc, 32);
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
|
||||
return result;
|
||||
}
|
||||
@@ -99,11 +100,12 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
|
||||
mips32_SyncICache(kseg0_opc, 32);
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
|
||||
mips32_SyncICache((unsigned long) opc, 32);
|
||||
local_flush_icache_range((unsigned long)opc,
|
||||
(unsigned long)opc + 32);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
kvm_err("%s: Invalid address: %p\n", __func__, opc);
|
||||
@@ -134,11 +136,12 @@ kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
|
||||
mips32_SyncICache(kseg0_opc, 32);
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
|
||||
mips32_SyncICache((unsigned long) opc, 32);
|
||||
local_flush_icache_range((unsigned long)opc,
|
||||
(unsigned long)opc + 32);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
kvm_err("%s: Invalid address: %p\n", __func__, opc);
|
||||
|
@@ -11,6 +11,7 @@
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
@@ -228,25 +229,520 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
|
||||
return er;
|
||||
}
|
||||
|
||||
/* Everytime the compare register is written to, we need to decide when to fire
|
||||
* the timer that represents timer ticks to the GUEST.
|
||||
/**
|
||||
* kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Returns: 1 if the CP0_Count timer is disabled by either the guest
|
||||
* CP0_Cause.DC bit or the count_ctl.DC bit.
|
||||
* 0 otherwise (in which case CP0_Count timer is running).
|
||||
*/
|
||||
enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
|
||||
static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
|
||||
(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
|
||||
}
|
||||
|
||||
/* If COUNT is enabled */
|
||||
if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
|
||||
hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
|
||||
hrtimer_start(&vcpu->arch.comparecount_timer,
|
||||
ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
|
||||
} else {
|
||||
hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
|
||||
/**
|
||||
* kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
|
||||
*
|
||||
* Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
|
||||
*
|
||||
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
|
||||
*/
|
||||
static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
|
||||
{
|
||||
s64 now_ns, periods;
|
||||
u64 delta;
|
||||
|
||||
now_ns = ktime_to_ns(now);
|
||||
delta = now_ns + vcpu->arch.count_dyn_bias;
|
||||
|
||||
if (delta >= vcpu->arch.count_period) {
|
||||
/* If delta is out of safe range the bias needs adjusting */
|
||||
periods = div64_s64(now_ns, vcpu->arch.count_period);
|
||||
vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
|
||||
/* Recalculate delta with new bias */
|
||||
delta = now_ns + vcpu->arch.count_dyn_bias;
|
||||
}
|
||||
|
||||
return er;
|
||||
/*
|
||||
* We've ensured that:
|
||||
* delta < count_period
|
||||
*
|
||||
* Therefore the intermediate delta*count_hz will never overflow since
|
||||
* at the boundary condition:
|
||||
* delta = count_period
|
||||
* delta = NSEC_PER_SEC * 2^32 / count_hz
|
||||
* delta * count_hz = NSEC_PER_SEC * 2^32
|
||||
*/
|
||||
return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_count_time() - Get effective current time.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Get effective monotonic ktime. This is usually a straightforward ktime_get(),
|
||||
* except when the master disable bit is set in count_ctl, in which case it is
|
||||
* count_resume, i.e. the time that the count was disabled.
|
||||
*
|
||||
* Returns: Effective monotonic ktime for CP0_Count.
|
||||
*/
|
||||
static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
|
||||
return vcpu->arch.count_resume;
|
||||
|
||||
return ktime_get();
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_read_count_running() - Read the current count value as if running.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @now: Kernel time to read CP0_Count at.
|
||||
*
|
||||
* Returns the current guest CP0_Count register at time @now and handles if the
|
||||
* timer interrupt is pending and hasn't been handled yet.
|
||||
*
|
||||
* Returns: The current value of the guest CP0_Count register.
|
||||
*/
|
||||
static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
|
||||
{
|
||||
ktime_t expires;
|
||||
int running;
|
||||
|
||||
/* Is the hrtimer pending? */
|
||||
expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
|
||||
if (ktime_compare(now, expires) >= 0) {
|
||||
/*
|
||||
* Cancel it while we handle it so there's no chance of
|
||||
* interference with the timeout handler.
|
||||
*/
|
||||
running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
|
||||
|
||||
/* Nothing should be waiting on the timeout */
|
||||
kvm_mips_callbacks->queue_timer_int(vcpu);
|
||||
|
||||
/*
|
||||
* Restart the timer if it was running based on the expiry time
|
||||
* we read, so that we don't push it back 2 periods.
|
||||
*/
|
||||
if (running) {
|
||||
expires = ktime_add_ns(expires,
|
||||
vcpu->arch.count_period);
|
||||
hrtimer_start(&vcpu->arch.comparecount_timer, expires,
|
||||
HRTIMER_MODE_ABS);
|
||||
}
|
||||
}
|
||||
|
||||
/* Return the biased and scaled guest CP0_Count */
|
||||
return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_read_count() - Read the current count value.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Read the current guest CP0_Count value, taking into account whether the timer
|
||||
* is stopped.
|
||||
*
|
||||
* Returns: The current guest CP0_Count value.
|
||||
*/
|
||||
uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
|
||||
/* If count disabled just read static copy of count */
|
||||
if (kvm_mips_count_disabled(vcpu))
|
||||
return kvm_read_c0_guest_count(cop0);
|
||||
|
||||
return kvm_mips_read_count_running(vcpu, ktime_get());
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @count: Output pointer for CP0_Count value at point of freeze.
|
||||
*
|
||||
* Freeze the hrtimer safely and return both the ktime and the CP0_Count value
|
||||
* at the point it was frozen. It is guaranteed that any pending interrupts at
|
||||
* the point it was frozen are handled, and none after that point.
|
||||
*
|
||||
* This is useful where the time/CP0_Count is needed in the calculation of the
|
||||
* new parameters.
|
||||
*
|
||||
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
|
||||
*
|
||||
* Returns: The ktime at the point of freeze.
|
||||
*/
|
||||
static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
|
||||
uint32_t *count)
|
||||
{
|
||||
ktime_t now;
|
||||
|
||||
/* stop hrtimer before finding time */
|
||||
hrtimer_cancel(&vcpu->arch.comparecount_timer);
|
||||
now = ktime_get();
|
||||
|
||||
/* find count at this point and handle pending hrtimer */
|
||||
*count = kvm_mips_read_count_running(vcpu, now);
|
||||
|
||||
return now;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @now: ktime at point of resume.
|
||||
* @count: CP0_Count at point of resume.
|
||||
*
|
||||
* Resumes the timer and updates the timer expiry based on @now and @count.
|
||||
* This can be used in conjunction with kvm_mips_freeze_timer() when timer
|
||||
* parameters need to be changed.
|
||||
*
|
||||
* It is guaranteed that a timer interrupt immediately after resume will be
|
||||
* handled, but not if CP_Compare is exactly at @count. That case is already
|
||||
* handled by kvm_mips_freeze_timer().
|
||||
*
|
||||
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
|
||||
*/
|
||||
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
|
||||
ktime_t now, uint32_t count)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
uint32_t compare;
|
||||
u64 delta;
|
||||
ktime_t expire;
|
||||
|
||||
/* Calculate timeout (wrap 0 to 2^32) */
|
||||
compare = kvm_read_c0_guest_compare(cop0);
|
||||
delta = (u64)(uint32_t)(compare - count - 1) + 1;
|
||||
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
|
||||
expire = ktime_add_ns(now, delta);
|
||||
|
||||
/* Update hrtimer to use new timeout */
|
||||
hrtimer_cancel(&vcpu->arch.comparecount_timer);
|
||||
hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Recalculates and updates the expiry time of the hrtimer. This can be used
|
||||
* after timer parameters have been altered which do not depend on the time that
|
||||
* the change occurs (in those cases kvm_mips_freeze_hrtimer() and
|
||||
* kvm_mips_resume_hrtimer() are used directly).
|
||||
*
|
||||
* It is guaranteed that no timer interrupts will be lost in the process.
|
||||
*
|
||||
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
|
||||
*/
|
||||
static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ktime_t now;
|
||||
uint32_t count;
|
||||
|
||||
/*
|
||||
* freeze_hrtimer takes care of a timer interrupts <= count, and
|
||||
* resume_hrtimer the hrtimer takes care of a timer interrupts > count.
|
||||
*/
|
||||
now = kvm_mips_freeze_hrtimer(vcpu, &count);
|
||||
kvm_mips_resume_hrtimer(vcpu, now, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_write_count() - Modify the count and update timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @count: Guest CP0_Count value to set.
|
||||
*
|
||||
* Sets the CP0_Count value and updates the timer accordingly.
|
||||
*/
|
||||
void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
ktime_t now;
|
||||
|
||||
/* Calculate bias */
|
||||
now = kvm_mips_count_time(vcpu);
|
||||
vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
|
||||
|
||||
if (kvm_mips_count_disabled(vcpu))
|
||||
/* The timer's disabled, adjust the static count */
|
||||
kvm_write_c0_guest_count(cop0, count);
|
||||
else
|
||||
/* Update timeout */
|
||||
kvm_mips_resume_hrtimer(vcpu, now, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_init_count() - Initialise timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
|
||||
* it going if it's enabled.
|
||||
*/
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* 100 MHz */
|
||||
vcpu->arch.count_hz = 100*1000*1000;
|
||||
vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
|
||||
vcpu->arch.count_hz);
|
||||
vcpu->arch.count_dyn_bias = 0;
|
||||
|
||||
/* Starting at 0 */
|
||||
kvm_mips_write_count(vcpu, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_set_count_hz() - Update the frequency of the timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @count_hz: Frequency of CP0_Count timer in Hz.
|
||||
*
|
||||
* Change the frequency of the CP0_Count timer. This is done atomically so that
|
||||
* CP0_Count is continuous and no timer interrupt is lost.
|
||||
*
|
||||
* Returns: -EINVAL if @count_hz is out of range.
|
||||
* 0 on success.
|
||||
*/
|
||||
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
int dc;
|
||||
ktime_t now;
|
||||
u32 count;
|
||||
|
||||
/* ensure the frequency is in a sensible range... */
|
||||
if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
|
||||
return -EINVAL;
|
||||
/* ... and has actually changed */
|
||||
if (vcpu->arch.count_hz == count_hz)
|
||||
return 0;
|
||||
|
||||
/* Safely freeze timer so we can keep it continuous */
|
||||
dc = kvm_mips_count_disabled(vcpu);
|
||||
if (dc) {
|
||||
now = kvm_mips_count_time(vcpu);
|
||||
count = kvm_read_c0_guest_count(cop0);
|
||||
} else {
|
||||
now = kvm_mips_freeze_hrtimer(vcpu, &count);
|
||||
}
|
||||
|
||||
/* Update the frequency */
|
||||
vcpu->arch.count_hz = count_hz;
|
||||
vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
|
||||
vcpu->arch.count_dyn_bias = 0;
|
||||
|
||||
/* Calculate adjusted bias so dynamic count is unchanged */
|
||||
vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
|
||||
|
||||
/* Update and resume hrtimer */
|
||||
if (!dc)
|
||||
kvm_mips_resume_hrtimer(vcpu, now, count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_write_compare() - Modify compare and update timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @compare: New CP0_Compare value.
|
||||
*
|
||||
* Update CP0_Compare to a new value and update the timeout.
|
||||
*/
|
||||
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
|
||||
/* if unchanged, must just be an ack */
|
||||
if (kvm_read_c0_guest_compare(cop0) == compare)
|
||||
return;
|
||||
|
||||
/* Update compare */
|
||||
kvm_write_c0_guest_compare(cop0, compare);
|
||||
|
||||
/* Update timeout if count enabled */
|
||||
if (!kvm_mips_count_disabled(vcpu))
|
||||
kvm_mips_update_hrtimer(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_count_disable() - Disable count.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Disable the CP0_Count timer. A timer interrupt on or before the final stop
|
||||
* time will be handled but not after.
|
||||
*
|
||||
* Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
|
||||
* count_ctl.DC has been set (count disabled).
|
||||
*
|
||||
* Returns: The time that the timer was stopped.
|
||||
*/
|
||||
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
uint32_t count;
|
||||
ktime_t now;
|
||||
|
||||
/* Stop hrtimer */
|
||||
hrtimer_cancel(&vcpu->arch.comparecount_timer);
|
||||
|
||||
/* Set the static count from the dynamic count, handling pending TI */
|
||||
now = ktime_get();
|
||||
count = kvm_mips_read_count_running(vcpu, now);
|
||||
kvm_write_c0_guest_count(cop0, count);
|
||||
|
||||
return now;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
|
||||
* before the final stop time will be handled if the timer isn't disabled by
|
||||
* count_ctl.DC, but not after.
|
||||
*
|
||||
* Assumes CP0_Cause.DC is clear (count enabled).
|
||||
*/
|
||||
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
|
||||
kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
|
||||
if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
|
||||
kvm_mips_count_disable(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
|
||||
* the start time will be handled if the timer isn't disabled by count_ctl.DC,
|
||||
* potentially before even returning, so the caller should be careful with
|
||||
* ordering of CP0_Cause modifications so as not to lose it.
|
||||
*
|
||||
* Assumes CP0_Cause.DC is set (count disabled).
|
||||
*/
|
||||
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
uint32_t count;
|
||||
|
||||
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
|
||||
|
||||
/*
|
||||
* Set the dynamic count to match the static count.
|
||||
* This starts the hrtimer if count_ctl.DC allows it.
|
||||
* Otherwise it conveniently updates the biases.
|
||||
*/
|
||||
count = kvm_read_c0_guest_count(cop0);
|
||||
kvm_mips_write_count(vcpu, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_set_count_ctl() - Update the count control KVM register.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @count_ctl: Count control register new value.
|
||||
*
|
||||
* Set the count control KVM register. The timer is updated accordingly.
|
||||
*
|
||||
* Returns: -EINVAL if reserved bits are set.
|
||||
* 0 on success.
|
||||
*/
|
||||
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
|
||||
s64 delta;
|
||||
ktime_t expire, now;
|
||||
uint32_t count, compare;
|
||||
|
||||
/* Only allow defined bits to be changed */
|
||||
if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
|
||||
return -EINVAL;
|
||||
|
||||
/* Apply new value */
|
||||
vcpu->arch.count_ctl = count_ctl;
|
||||
|
||||
/* Master CP0_Count disable */
|
||||
if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
|
||||
/* Is CP0_Cause.DC already disabling CP0_Count? */
|
||||
if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
|
||||
if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
|
||||
/* Just record the current time */
|
||||
vcpu->arch.count_resume = ktime_get();
|
||||
} else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
|
||||
/* disable timer and record current time */
|
||||
vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
|
||||
} else {
|
||||
/*
|
||||
* Calculate timeout relative to static count at resume
|
||||
* time (wrap 0 to 2^32).
|
||||
*/
|
||||
count = kvm_read_c0_guest_count(cop0);
|
||||
compare = kvm_read_c0_guest_compare(cop0);
|
||||
delta = (u64)(uint32_t)(compare - count - 1) + 1;
|
||||
delta = div_u64(delta * NSEC_PER_SEC,
|
||||
vcpu->arch.count_hz);
|
||||
expire = ktime_add_ns(vcpu->arch.count_resume, delta);
|
||||
|
||||
/* Handle pending interrupt */
|
||||
now = ktime_get();
|
||||
if (ktime_compare(now, expire) >= 0)
|
||||
/* Nothing should be waiting on the timeout */
|
||||
kvm_mips_callbacks->queue_timer_int(vcpu);
|
||||
|
||||
/* Resume hrtimer without changing bias */
|
||||
count = kvm_mips_read_count_running(vcpu, now);
|
||||
kvm_mips_resume_hrtimer(vcpu, now, count);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_set_count_resume() - Update the count resume KVM register.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @count_resume: Count resume register new value.
|
||||
*
|
||||
* Set the count resume KVM register.
|
||||
*
|
||||
* Returns: -EINVAL if out of valid range (0..now).
|
||||
* 0 on success.
|
||||
*/
|
||||
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
|
||||
{
|
||||
/*
|
||||
* It doesn't make sense for the resume time to be in the future, as it
|
||||
* would be possible for the next interrupt to be more than a full
|
||||
* period in the future.
|
||||
*/
|
||||
if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
|
||||
return -EINVAL;
|
||||
|
||||
vcpu->arch.count_resume = ns_to_ktime(count_resume);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_count_timeout() - Push timer forward on timeout.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Handle an hrtimer event by push the hrtimer forward a period.
|
||||
*
|
||||
* Returns: The hrtimer_restart value to return to the hrtimer subsystem.
|
||||
*/
|
||||
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Add the Count period to the current expiry time */
|
||||
hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
|
||||
vcpu->arch.count_period);
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
|
||||
@@ -471,8 +967,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
|
||||
#endif
|
||||
/* Get reg */
|
||||
if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
|
||||
/* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
|
||||
vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
|
||||
vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
|
||||
} else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
|
||||
vcpu->arch.gprs[rt] = 0x0;
|
||||
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
|
||||
@@ -539,10 +1034,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
|
||||
}
|
||||
/* Are we writing to COUNT */
|
||||
else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
|
||||
/* Linux doesn't seem to write into COUNT, we throw an error
|
||||
* if we notice a write to COUNT
|
||||
*/
|
||||
/*er = EMULATE_FAIL; */
|
||||
kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
|
||||
goto done;
|
||||
} else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
|
||||
kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
|
||||
@@ -552,8 +1044,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
|
||||
/* If we are writing to COMPARE */
|
||||
/* Clear pending timer interrupt, if any */
|
||||
kvm_mips_callbacks->dequeue_timer_int(vcpu);
|
||||
kvm_write_c0_guest_compare(cop0,
|
||||
vcpu->arch.gprs[rt]);
|
||||
kvm_mips_write_compare(vcpu,
|
||||
vcpu->arch.gprs[rt]);
|
||||
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
|
||||
kvm_write_c0_guest_status(cop0,
|
||||
vcpu->arch.gprs[rt]);
|
||||
@@ -564,6 +1056,20 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
|
||||
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
|
||||
kvm_mips_trans_mtc0(inst, opc, vcpu);
|
||||
#endif
|
||||
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
|
||||
uint32_t old_cause, new_cause;
|
||||
old_cause = kvm_read_c0_guest_cause(cop0);
|
||||
new_cause = vcpu->arch.gprs[rt];
|
||||
/* Update R/W bits */
|
||||
kvm_change_c0_guest_cause(cop0, 0x08800300,
|
||||
new_cause);
|
||||
/* DC bit enabling/disabling timer? */
|
||||
if ((old_cause ^ new_cause) & CAUSEF_DC) {
|
||||
if (new_cause & CAUSEF_DC)
|
||||
kvm_mips_count_disable_cause(vcpu);
|
||||
else
|
||||
kvm_mips_count_enable_cause(vcpu);
|
||||
}
|
||||
} else {
|
||||
cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
|
||||
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
|
||||
@@ -887,7 +1393,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
|
||||
|
||||
printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
|
||||
|
||||
mips32_SyncICache(CKSEG0ADDR(pa), 32);
|
||||
local_flush_icache_range(CKSEG0ADDR(pa), 32);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1325,8 +1831,12 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
|
||||
struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
#ifdef DEBUG
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
|
||||
(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
|
||||
int index;
|
||||
|
||||
/*
|
||||
* If address not in the guest TLB, then we are in trouble
|
||||
*/
|
||||
@@ -1553,8 +2063,7 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
|
||||
current_cpu_data.icache.linesz);
|
||||
break;
|
||||
case 2: /* Read count register */
|
||||
printk("RDHWR: Cont register\n");
|
||||
arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
|
||||
arch->gprs[rt] = kvm_mips_read_count(vcpu);
|
||||
break;
|
||||
case 3: /* Count register resolution */
|
||||
switch (current_cpu_data.cputype) {
|
||||
@@ -1810,11 +2319,9 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
|
||||
er = EMULATE_FAIL;
|
||||
}
|
||||
} else {
|
||||
#ifdef DEBUG
|
||||
kvm_debug
|
||||
("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
|
||||
tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
|
||||
#endif
|
||||
/* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
|
||||
NULL);
|
||||
|
@@ -222,26 +222,19 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (idx < 0) {
|
||||
idx = read_c0_random() % current_cpu_data.tlbsize;
|
||||
write_c0_index(idx);
|
||||
mtc0_tlbw_hazard();
|
||||
}
|
||||
write_c0_entrylo0(entrylo0);
|
||||
write_c0_entrylo1(entrylo1);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_write_indexed();
|
||||
if (idx < 0)
|
||||
tlb_write_random();
|
||||
else
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
|
||||
#ifdef DEBUG
|
||||
if (debug) {
|
||||
kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
|
||||
"entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
|
||||
vcpu->arch.pc, idx, read_c0_entryhi(),
|
||||
read_c0_entrylo0(), read_c0_entrylo1());
|
||||
}
|
||||
#endif
|
||||
kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
|
||||
vcpu->arch.pc, idx, read_c0_entryhi(),
|
||||
read_c0_entrylo0(), read_c0_entrylo1());
|
||||
|
||||
/* Flush D-cache */
|
||||
if (flush_dcache_mask) {
|
||||
@@ -348,11 +341,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
#ifdef DEBUG
|
||||
kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
|
||||
vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
|
||||
read_c0_entrylo0(), read_c0_entrylo1());
|
||||
#endif
|
||||
|
||||
/* Restore old ASID */
|
||||
write_c0_entryhi(old_entryhi);
|
||||
@@ -400,10 +391,8 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
||||
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
|
||||
|
||||
#ifdef DEBUG
|
||||
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
|
||||
tlb->tlb_lo0, tlb->tlb_lo1);
|
||||
#endif
|
||||
|
||||
return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
|
||||
tlb->tlb_mask);
|
||||
@@ -424,10 +413,8 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
|
||||
__func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
|
||||
#endif
|
||||
|
||||
return index;
|
||||
}
|
||||
@@ -461,9 +448,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
#ifdef DEBUG
|
||||
kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
|
||||
#endif
|
||||
|
||||
return idx;
|
||||
}
|
||||
@@ -508,12 +493,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (idx > 0) {
|
||||
if (idx > 0)
|
||||
kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
|
||||
(va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
|
||||
}
|
||||
#endif
|
||||
(va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -658,15 +640,30 @@ void kvm_local_flush_tlb_all(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_migrate_count() - Migrate timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
|
||||
* if it was running prior to being cancelled.
|
||||
*
|
||||
* Must be called when the VCPU is migrated to a different CPU to ensure that
|
||||
* timer expiry during guest execution interrupts the guest and causes the
|
||||
* interrupt to be delivered in a timely manner.
|
||||
*/
|
||||
static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
|
||||
hrtimer_restart(&vcpu->arch.comparecount_timer);
|
||||
}
|
||||
|
||||
/* Restore ASID once we are scheduled back after preemption */
|
||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
int newasid = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
|
||||
#endif
|
||||
|
||||
/* Alocate new kernel and user ASIDs if needed */
|
||||
|
||||
@@ -682,17 +679,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
vcpu->arch.guest_user_mm.context.asid[cpu];
|
||||
newasid++;
|
||||
|
||||
kvm_info("[%d]: cpu_context: %#lx\n", cpu,
|
||||
cpu_context(cpu, current->mm));
|
||||
kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
|
||||
cpu, vcpu->arch.guest_kernel_asid[cpu]);
|
||||
kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
|
||||
vcpu->arch.guest_user_asid[cpu]);
|
||||
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
|
||||
cpu_context(cpu, current->mm));
|
||||
kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
|
||||
cpu, vcpu->arch.guest_kernel_asid[cpu]);
|
||||
kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
|
||||
vcpu->arch.guest_user_asid[cpu]);
|
||||
}
|
||||
|
||||
if (vcpu->arch.last_sched_cpu != cpu) {
|
||||
kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
|
||||
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
|
||||
kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
|
||||
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
|
||||
/*
|
||||
* Migrate the timer interrupt to the current CPU so that it
|
||||
* always interrupts the guest and synchronously triggers a
|
||||
* guest timer interrupt.
|
||||
*/
|
||||
kvm_mips_migrate_count(vcpu);
|
||||
}
|
||||
|
||||
if (!newasid) {
|
||||
|
@@ -32,9 +32,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
|
||||
gpa = KVM_INVALID_ADDR;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
|
||||
#endif
|
||||
|
||||
return gpa;
|
||||
}
|
||||
@@ -85,11 +83,9 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|
||||
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
|
||||
#ifdef DEBUG
|
||||
kvm_debug
|
||||
("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
#endif
|
||||
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
|
||||
|
||||
if (er == EMULATE_DONE)
|
||||
@@ -138,11 +134,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|
||||
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
|
||||
#ifdef DEBUG
|
||||
kvm_debug
|
||||
("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
#endif
|
||||
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
@@ -188,10 +182,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|
||||
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
|
||||
#ifdef DEBUG
|
||||
kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
|
||||
vcpu->arch.pc, badvaddr);
|
||||
#endif
|
||||
|
||||
/* User Address (UA) fault, this could happen if
|
||||
* (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
|
||||
@@ -236,9 +228,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (KVM_GUEST_KERNEL_MODE(vcpu)
|
||||
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
|
||||
#ifdef DEBUG
|
||||
kvm_debug("Emulate Store to MMIO space\n");
|
||||
#endif
|
||||
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_FAIL) {
|
||||
printk("Emulate Store to MMIO space failed\n");
|
||||
@@ -268,9 +258,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
|
||||
#ifdef DEBUG
|
||||
kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
|
||||
#endif
|
||||
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_FAIL) {
|
||||
printk("Emulate Load from MMIO space failed\n");
|
||||
@@ -401,6 +389,78 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg,
|
||||
s64 *v)
|
||||
{
|
||||
switch (reg->id) {
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
*v = kvm_mips_read_count(vcpu);
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
*v = vcpu->arch.count_ctl;
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_RESUME:
|
||||
*v = ktime_to_ns(vcpu->arch.count_resume);
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_HZ:
|
||||
*v = vcpu->arch.count_hz;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg,
|
||||
s64 v)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
int ret = 0;
|
||||
|
||||
switch (reg->id) {
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
kvm_mips_write_count(vcpu, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_COMPARE:
|
||||
kvm_mips_write_compare(vcpu, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CAUSE:
|
||||
/*
|
||||
* If the timer is stopped or started (DC bit) it must look
|
||||
* atomic with changes to the interrupt pending bits (TI, IRQ5).
|
||||
* A timer interrupt should not happen in between.
|
||||
*/
|
||||
if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
|
||||
if (v & CAUSEF_DC) {
|
||||
/* disable timer first */
|
||||
kvm_mips_count_disable_cause(vcpu);
|
||||
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
|
||||
} else {
|
||||
/* enable timer last */
|
||||
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
|
||||
kvm_mips_count_enable_cause(vcpu);
|
||||
}
|
||||
} else {
|
||||
kvm_write_c0_guest_cause(cop0, v);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
ret = kvm_mips_set_count_ctl(vcpu, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_RESUME:
|
||||
ret = kvm_mips_set_count_resume(vcpu, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_HZ:
|
||||
ret = kvm_mips_set_count_hz(vcpu, v);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
|
||||
/* exit handlers */
|
||||
.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
|
||||
@@ -423,6 +483,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
|
||||
.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
|
||||
.irq_deliver = kvm_mips_irq_deliver_cb,
|
||||
.irq_clear = kvm_mips_irq_clear_cb,
|
||||
.get_one_reg = kvm_trap_emul_get_one_reg,
|
||||
.set_one_reg = kvm_trap_emul_set_one_reg,
|
||||
};
|
||||
|
||||
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
|
||||
|
@@ -31,6 +31,7 @@ void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
|
||||
void (*flush_icache_range)(unsigned long start, unsigned long end);
|
||||
EXPORT_SYMBOL_GPL(flush_icache_range);
|
||||
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
|
||||
EXPORT_SYMBOL_GPL(local_flush_icache_range);
|
||||
|
||||
void (*__flush_cache_vmap)(void);
|
||||
void (*__flush_cache_vunmap)(void);
|
||||
|
@@ -74,18 +74,8 @@ static void __init estimate_frequencies(void)
|
||||
unsigned int giccount = 0, gicstart = 0;
|
||||
#endif
|
||||
|
||||
#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
|
||||
unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
|
||||
|
||||
/*
|
||||
* XXXKYMA: hardwire the CPU frequency to Host Freq/4
|
||||
*/
|
||||
count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
|
||||
if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
|
||||
(prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
|
||||
count *= 2;
|
||||
|
||||
mips_hpt_frequency = count;
|
||||
#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
|
||||
mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
|
||||
return;
|
||||
#endif
|
||||
|
||||
|
Fai riferimento in un nuovo problema
Block a user