KVM: PPC: Use accessor functions for GPR access

All code in PPC KVM currently accesses gprs in the vcpu struct directly.

While there's nothing wrong with that wrt the current way gprs are stored
and loaded, it doesn't suffice for the PACA acceleration that will follow
in this patchset.

So let's just create little wrapper inline functions that we call whenever
a GPR needs to be read from or written to. The compiled code shouldn't really
change at all for now.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Alexander Graf
2010-01-08 02:58:01 +01:00
committed by Marcelo Tosatti
parent 0d178975d0
commit 8e5b26b55a
11 changed files with 274 additions and 225 deletions

View File

@@ -74,54 +74,55 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_PID:
vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
vcpu->arch.pid = vcpu->arch.gpr[rs];
vcpu->arch.pid = spr_val;
break;
case SPRN_PID1:
vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break;
vcpu_e500->pid[1] = spr_val; break;
case SPRN_PID2:
vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break;
vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break;
vcpu_e500->mas0 = spr_val; break;
case SPRN_MAS1:
vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break;
vcpu_e500->mas1 = spr_val; break;
case SPRN_MAS2:
vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break;
vcpu_e500->mas2 = spr_val; break;
case SPRN_MAS3:
vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break;
vcpu_e500->mas3 = spr_val; break;
case SPRN_MAS4:
vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break;
vcpu_e500->mas4 = spr_val; break;
case SPRN_MAS6:
vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break;
vcpu_e500->mas6 = spr_val; break;
case SPRN_MAS7:
vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break;
vcpu_e500->mas7 = spr_val; break;
case SPRN_L1CSR1:
vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break;
vcpu_e500->l1csr1 = spr_val; break;
case SPRN_HID0:
vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break;
vcpu_e500->hid0 = spr_val; break;
case SPRN_HID1:
vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break;
vcpu_e500->hid1 = spr_val; break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
vcpu->arch.gpr[rs]);
spr_val);
break;
/* extra exceptions */
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs];
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
break;
case SPRN_IVOR33:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs];
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
break;
case SPRN_IVOR34:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs];
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
break;
case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs];
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
default:
@@ -138,63 +139,71 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_PID:
vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
case SPRN_PID1:
vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
case SPRN_PID2:
vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
case SPRN_MAS0:
vcpu->arch.gpr[rt] = vcpu_e500->mas0; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
case SPRN_MAS1:
vcpu->arch.gpr[rt] = vcpu_e500->mas1; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
case SPRN_MAS2:
vcpu->arch.gpr[rt] = vcpu_e500->mas2; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
case SPRN_MAS3:
vcpu->arch.gpr[rt] = vcpu_e500->mas3; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break;
case SPRN_MAS4:
vcpu->arch.gpr[rt] = vcpu_e500->mas4; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
case SPRN_MAS6:
vcpu->arch.gpr[rt] = vcpu_e500->mas6; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
case SPRN_MAS7:
vcpu->arch.gpr[rt] = vcpu_e500->mas7; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break;
case SPRN_TLB0CFG:
vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG);
vcpu->arch.gpr[rt] &= ~0xfffUL;
vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0];
{
ulong tmp = SPRN_TLB0CFG;
tmp &= ~0xfffUL;
tmp |= vcpu_e500->guest_tlb_size[0];
kvmppc_set_gpr(vcpu, rt, tmp);
break;
}
case SPRN_TLB1CFG:
vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG);
vcpu->arch.gpr[rt] &= ~0xfffUL;
vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1];
{
ulong tmp = SPRN_TLB1CFG;
tmp &= ~0xfffUL;
tmp |= vcpu_e500->guest_tlb_size[1];
kvmppc_set_gpr(vcpu, rt, tmp);
break;
}
case SPRN_L1CSR1:
vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
case SPRN_HID0:
vcpu->arch.gpr[rt] = vcpu_e500->hid0; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
case SPRN_HID1:
vcpu->arch.gpr[rt] = vcpu_e500->hid1; break;
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
case SPRN_MMUCSR0:
vcpu->arch.gpr[rt] = 0; break;
kvmppc_set_gpr(vcpu, rt, 0); break;
case SPRN_MMUCFG:
vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break;
kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
/* extra exceptions */
case SPRN_IVOR32:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
break;
case SPRN_IVOR33:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
break;
case SPRN_IVOR34:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
break;
case SPRN_IVOR35:
vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
break;
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);