MIPS: Retrieve ASID masks using function accepting struct cpuinfo_mips

In preparation for supporting variable ASID masks, retrieve ASID masks
using functions in asm/cpu-info.h which accept struct cpuinfo_mips. This
will allow those functions to determine the ASID mask based upon the CPU
in a later patch. This also allows for the r3k & r8k cases to be handled
in Kconfig, which is arguably cleaner than the previous #ifdefs.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13210/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Paul Burton
2016-05-06 14:36:23 +01:00
committed by Ralf Baechle
parent f1b711c638
commit 4edf00a46b
10 changed files with 92 additions and 61 deletions

View File

@@ -49,12 +49,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
int cpu = smp_processor_id();
return vcpu->arch.guest_kernel_asid[cpu] &
cpu_asid_mask(&cpu_data[cpu]);
}
uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
int cpu = smp_processor_id();
return vcpu->arch.guest_user_asid[cpu] &
cpu_asid_mask(&cpu_data[cpu]);
}
inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
@@ -78,7 +84,8 @@ void kvm_mips_dump_host_tlbs(void)
old_pagemask = read_c0_pagemask();
kvm_info("HOST TLBs:\n");
kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
kvm_info("ASID: %#lx\n", read_c0_entryhi() &
cpu_asid_mask(&current_cpu_data));
for (i = 0; i < current_cpu_data.tlbsize; i++) {
write_c0_index(i);
@@ -564,15 +571,15 @@ void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
{
unsigned long asid = asid_cache(cpu);
asid += ASID_INC;
if (!(asid & ASID_MASK)) {
asid += cpu_asid_inc();
if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
kvm_local_flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION;
asid = asid_first_version(cpu);
}
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
@@ -627,6 +634,7 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
/* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
unsigned long flags;
int newasid = 0;
@@ -637,7 +645,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
local_irq_save(flags);
if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
ASID_VERSION_MASK) {
asid_version_mask(cpu)) {
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu];
@@ -672,7 +680,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/
if (current->flags & PF_VCPU) {
write_c0_entryhi(vcpu->arch.
preempt_entryhi & ASID_MASK);
preempt_entryhi & asid_mask);
ehb();
}
} else {
@@ -687,11 +695,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (KVM_GUEST_KERNEL_MODE(vcpu))
write_c0_entryhi(vcpu->arch.
guest_kernel_asid[cpu] &
ASID_MASK);
asid_mask);
else
write_c0_entryhi(vcpu->arch.
guest_user_asid[cpu] &
ASID_MASK);
asid_mask);
ehb();
}
}
@@ -721,7 +729,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_mips_callbacks->vcpu_get_regs(vcpu);
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
ASID_VERSION_MASK)) {
asid_version_mask(cpu))) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
cpu_context(cpu, current->mm));
drop_mmu_context(current->mm, cpu);