Merge branch 'kvm-updates/3.1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/3.1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (143 commits) KVM: IOMMU: Disable device assignment without interrupt remapping KVM: MMU: trace mmio page fault KVM: MMU: mmio page fault support KVM: MMU: reorganize struct kvm_shadow_walk_iterator KVM: MMU: lockless walking shadow page table KVM: MMU: do not need atomicly to set/clear spte KVM: MMU: introduce the rules to modify shadow page table KVM: MMU: abstract some functions to handle fault pfn KVM: MMU: filter out the mmio pfn from the fault pfn KVM: MMU: remove bypass_guest_pf KVM: MMU: split kvm_mmu_free_page KVM: MMU: count used shadow pages on prepareing path KVM: MMU: rename 'pt_write' to 'emulate' KVM: MMU: cleanup for FNAME(fetch) KVM: MMU: optimize to handle dirty bit KVM: MMU: cache mmio info on page fault path KVM: x86: introduce vcpu_mmio_gva_to_gpa to cleanup the code KVM: MMU: do not update slot bitmap if spte is nonpresent KVM: MMU: fix walking shadow page table KVM guest: KVM Steal time registration ...
This commit is contained in:
@@ -128,6 +128,7 @@ int main(void)
|
||||
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
|
||||
/* paca */
|
||||
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
|
||||
DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
|
||||
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
|
||||
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
|
||||
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
|
||||
@@ -187,7 +188,9 @@ int main(void)
|
||||
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
|
||||
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
|
||||
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
|
||||
DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
|
||||
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
|
||||
DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
|
||||
DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
|
||||
@@ -198,11 +201,6 @@ int main(void)
|
||||
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
|
||||
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
|
||||
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
|
||||
DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
|
||||
DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
|
||||
#endif
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
/* RTAS */
|
||||
@@ -397,67 +395,160 @@ int main(void)
|
||||
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
|
||||
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
|
||||
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
|
||||
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
|
||||
DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
|
||||
DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
|
||||
#endif
|
||||
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
|
||||
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
|
||||
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
|
||||
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
|
||||
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
|
||||
DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
|
||||
DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
|
||||
DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
|
||||
DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
|
||||
DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
|
||||
DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
|
||||
#endif
|
||||
DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
|
||||
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
|
||||
DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
|
||||
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
|
||||
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
|
||||
DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
|
||||
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
|
||||
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
|
||||
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
|
||||
|
||||
/* book3s */
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
|
||||
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
|
||||
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
|
||||
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
|
||||
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
|
||||
DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
|
||||
DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
|
||||
DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
|
||||
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
|
||||
DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
|
||||
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
|
||||
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
|
||||
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
|
||||
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
|
||||
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
|
||||
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
|
||||
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
|
||||
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
|
||||
DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
|
||||
DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
|
||||
DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
|
||||
DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
|
||||
DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
|
||||
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
|
||||
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
|
||||
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
|
||||
DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
|
||||
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
|
||||
DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
|
||||
DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
|
||||
DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
|
||||
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
|
||||
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
|
||||
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
|
||||
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
|
||||
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
|
||||
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
|
||||
DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
|
||||
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
|
||||
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
|
||||
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
|
||||
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
|
||||
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
|
||||
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
|
||||
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
|
||||
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
|
||||
DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
|
||||
offsetof(struct kvmppc_vcpu_book3s, vcpu));
|
||||
DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
|
||||
DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
|
||||
DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
|
||||
DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
|
||||
DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
|
||||
DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
|
||||
DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
|
||||
DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
|
||||
DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
|
||||
DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
|
||||
DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
|
||||
DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
|
||||
DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
|
||||
DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
|
||||
DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
|
||||
DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
|
||||
DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
|
||||
DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
|
||||
DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
|
||||
DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
|
||||
DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
|
||||
DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
|
||||
vmhandler));
|
||||
DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
|
||||
scratch0));
|
||||
DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
|
||||
scratch1));
|
||||
DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
|
||||
in_guest));
|
||||
DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
|
||||
fault_dsisr));
|
||||
DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
|
||||
fault_dar));
|
||||
DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
|
||||
last_inst));
|
||||
DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
|
||||
shadow_srr1));
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
|
||||
#endif
|
||||
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
|
||||
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
|
||||
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
|
||||
#else
|
||||
# define SVCPU_FIELD(x, f)
|
||||
#endif
|
||||
# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
|
||||
#else /* 32-bit */
|
||||
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
|
||||
# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
|
||||
#endif
|
||||
|
||||
SVCPU_FIELD(SVCPU_CR, cr);
|
||||
SVCPU_FIELD(SVCPU_XER, xer);
|
||||
SVCPU_FIELD(SVCPU_CTR, ctr);
|
||||
SVCPU_FIELD(SVCPU_LR, lr);
|
||||
SVCPU_FIELD(SVCPU_PC, pc);
|
||||
SVCPU_FIELD(SVCPU_R0, gpr[0]);
|
||||
SVCPU_FIELD(SVCPU_R1, gpr[1]);
|
||||
SVCPU_FIELD(SVCPU_R2, gpr[2]);
|
||||
SVCPU_FIELD(SVCPU_R3, gpr[3]);
|
||||
SVCPU_FIELD(SVCPU_R4, gpr[4]);
|
||||
SVCPU_FIELD(SVCPU_R5, gpr[5]);
|
||||
SVCPU_FIELD(SVCPU_R6, gpr[6]);
|
||||
SVCPU_FIELD(SVCPU_R7, gpr[7]);
|
||||
SVCPU_FIELD(SVCPU_R8, gpr[8]);
|
||||
SVCPU_FIELD(SVCPU_R9, gpr[9]);
|
||||
SVCPU_FIELD(SVCPU_R10, gpr[10]);
|
||||
SVCPU_FIELD(SVCPU_R11, gpr[11]);
|
||||
SVCPU_FIELD(SVCPU_R12, gpr[12]);
|
||||
SVCPU_FIELD(SVCPU_R13, gpr[13]);
|
||||
SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
|
||||
SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
|
||||
SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
|
||||
SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
SVCPU_FIELD(SVCPU_SR, sr);
|
||||
#endif
|
||||
#ifdef CONFIG_PPC64
|
||||
SVCPU_FIELD(SVCPU_SLB, slb);
|
||||
SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
|
||||
#endif
|
||||
|
||||
HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
|
||||
HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
|
||||
HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
|
||||
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
|
||||
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
|
||||
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
|
||||
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
|
||||
HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
|
||||
HSTATE_FIELD(HSTATE_PMC, host_pmc);
|
||||
HSTATE_FIELD(HSTATE_PURR, host_purr);
|
||||
HSTATE_FIELD(HSTATE_SPURR, host_spurr);
|
||||
HSTATE_FIELD(HSTATE_DSCR, host_dscr);
|
||||
HSTATE_FIELD(HSTATE_DABR, dabr);
|
||||
HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
|
||||
#endif /* CONFIG_KVM_BOOK3S_64_HV */
|
||||
|
||||
#else /* CONFIG_PPC_BOOK3S */
|
||||
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
|
||||
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
|
||||
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
|
||||
@@ -467,7 +558,7 @@ int main(void)
|
||||
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
|
||||
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
|
||||
#endif /* CONFIG_PPC_BOOK3S */
|
||||
#endif
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
|
||||
@@ -497,6 +588,13 @@ int main(void)
|
||||
DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
|
||||
DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
|
||||
DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
|
||||
DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
|
||||
DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
|
||||
arch.timing_exit.tv32.tbu));
|
||||
|
@@ -45,12 +45,12 @@ _GLOBAL(__restore_cpu_power7)
|
||||
blr
|
||||
|
||||
__init_hvmode_206:
|
||||
/* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */
|
||||
/* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */
|
||||
mfmsr r3
|
||||
rldicl. r0,r3,4,63
|
||||
bnelr
|
||||
ld r5,CPU_SPEC_FEATURES(r4)
|
||||
LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206)
|
||||
LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
|
||||
xor r5,r5,r6
|
||||
std r5,CPU_SPEC_FEATURES(r4)
|
||||
blr
|
||||
@@ -61,19 +61,23 @@ __init_LPCR:
|
||||
* LPES = 0b01 (HSRR0/1 used for 0x500)
|
||||
* PECE = 0b111
|
||||
* DPFD = 4
|
||||
* HDICE = 0
|
||||
* VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
|
||||
* VRMASD = 0b10000 (L=1, LP=00)
|
||||
*
|
||||
* Other bits untouched for now
|
||||
*/
|
||||
mfspr r3,SPRN_LPCR
|
||||
ori r3,r3,(LPCR_LPES0|LPCR_LPES1)
|
||||
xori r3,r3, LPCR_LPES0
|
||||
li r5,1
|
||||
rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
|
||||
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
|
||||
li r5,7
|
||||
sldi r5,r5,LPCR_DPFD_SH
|
||||
andc r3,r3,r5
|
||||
li r5,4
|
||||
sldi r5,r5,LPCR_DPFD_SH
|
||||
or r3,r3,r5
|
||||
rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
|
||||
clrrdi r3,r3,1 /* clear HDICE */
|
||||
li r5,4
|
||||
rldimi r3,r5, LPCR_VC_SH, 0
|
||||
li r5,0x10
|
||||
rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
|
||||
mtspr SPRN_LPCR,r3
|
||||
isync
|
||||
blr
|
||||
|
@@ -76,7 +76,7 @@ _GLOBAL(__setup_cpu_ppc970)
|
||||
/* Do nothing if not running in HV mode */
|
||||
mfmsr r0
|
||||
rldicl. r0,r0,4,63
|
||||
beqlr
|
||||
beq no_hv_mode
|
||||
|
||||
mfspr r0,SPRN_HID0
|
||||
li r11,5 /* clear DOZE and SLEEP */
|
||||
@@ -90,7 +90,7 @@ _GLOBAL(__setup_cpu_ppc970MP)
|
||||
/* Do nothing if not running in HV mode */
|
||||
mfmsr r0
|
||||
rldicl. r0,r0,4,63
|
||||
beqlr
|
||||
beq no_hv_mode
|
||||
|
||||
mfspr r0,SPRN_HID0
|
||||
li r11,0x15 /* clear DOZE and SLEEP */
|
||||
@@ -109,6 +109,14 @@ load_hids:
|
||||
sync
|
||||
isync
|
||||
|
||||
/* Try to set LPES = 01 in HID4 */
|
||||
mfspr r0,SPRN_HID4
|
||||
clrldi r0,r0,1 /* clear LPES0 */
|
||||
ori r0,r0,HID4_LPES1 /* set LPES1 */
|
||||
sync
|
||||
mtspr SPRN_HID4,r0
|
||||
isync
|
||||
|
||||
/* Save away cpu state */
|
||||
LOAD_REG_ADDR(r5,cpu_state_storage)
|
||||
|
||||
@@ -117,11 +125,21 @@ load_hids:
|
||||
std r3,CS_HID0(r5)
|
||||
mfspr r3,SPRN_HID1
|
||||
std r3,CS_HID1(r5)
|
||||
mfspr r3,SPRN_HID4
|
||||
std r3,CS_HID4(r5)
|
||||
mfspr r4,SPRN_HID4
|
||||
std r4,CS_HID4(r5)
|
||||
mfspr r3,SPRN_HID5
|
||||
std r3,CS_HID5(r5)
|
||||
|
||||
/* See if we successfully set LPES1 to 1; if not we are in Apple mode */
|
||||
andi. r4,r4,HID4_LPES1
|
||||
bnelr
|
||||
|
||||
no_hv_mode:
|
||||
/* Disable CPU_FTR_HVMODE and exit, since we don't have HV mode */
|
||||
ld r5,CPU_SPEC_FEATURES(r4)
|
||||
LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
|
||||
andc r5,r5,r6
|
||||
std r5,CPU_SPEC_FEATURES(r4)
|
||||
blr
|
||||
|
||||
/* Called with no MMU context (typically MSR:IR/DR off) to
|
||||
|
@@ -40,7 +40,6 @@ __start_interrupts:
|
||||
.globl system_reset_pSeries;
|
||||
system_reset_pSeries:
|
||||
HMT_MEDIUM;
|
||||
DO_KVM 0x100;
|
||||
SET_SCRATCH0(r13)
|
||||
#ifdef CONFIG_PPC_P7_NAP
|
||||
BEGIN_FTR_SECTION
|
||||
@@ -50,82 +49,73 @@ BEGIN_FTR_SECTION
|
||||
* state loss at this time.
|
||||
*/
|
||||
mfspr r13,SPRN_SRR1
|
||||
rlwinm r13,r13,47-31,30,31
|
||||
cmpwi cr0,r13,1
|
||||
bne 1f
|
||||
b .power7_wakeup_noloss
|
||||
1: cmpwi cr0,r13,2
|
||||
bne 1f
|
||||
b .power7_wakeup_loss
|
||||
rlwinm. r13,r13,47-31,30,31
|
||||
beq 9f
|
||||
|
||||
/* waking up from powersave (nap) state */
|
||||
cmpwi cr1,r13,2
|
||||
/* Total loss of HV state is fatal, we could try to use the
|
||||
* PIR to locate a PACA, then use an emergency stack etc...
|
||||
* but for now, let's just stay stuck here
|
||||
*/
|
||||
1: cmpwi cr0,r13,3
|
||||
beq .
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
|
||||
bgt cr1,.
|
||||
GET_PACA(r13)
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
lbz r0,PACAPROCSTART(r13)
|
||||
cmpwi r0,0x80
|
||||
bne 1f
|
||||
li r0,0
|
||||
stb r0,PACAPROCSTART(r13)
|
||||
b kvm_start_guest
|
||||
1:
|
||||
#endif
|
||||
|
||||
beq cr1,2f
|
||||
b .power7_wakeup_noloss
|
||||
2: b .power7_wakeup_loss
|
||||
9:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
|
||||
#endif /* CONFIG_PPC_P7_NAP */
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
|
||||
NOTEST, 0x100)
|
||||
|
||||
. = 0x200
|
||||
_machine_check_pSeries:
|
||||
HMT_MEDIUM
|
||||
DO_KVM 0x200
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
|
||||
machine_check_pSeries_1:
|
||||
/* This is moved out of line as it can be patched by FW, but
|
||||
* some code path might still want to branch into the original
|
||||
* vector
|
||||
*/
|
||||
b machine_check_pSeries
|
||||
|
||||
. = 0x300
|
||||
.globl data_access_pSeries
|
||||
data_access_pSeries:
|
||||
HMT_MEDIUM
|
||||
DO_KVM 0x300
|
||||
SET_SCRATCH0(r13)
|
||||
#ifndef CONFIG_POWER4_ONLY
|
||||
BEGIN_FTR_SECTION
|
||||
GET_PACA(r13)
|
||||
std r9,PACA_EXSLB+EX_R9(r13)
|
||||
std r10,PACA_EXSLB+EX_R10(r13)
|
||||
mfspr r10,SPRN_DAR
|
||||
mfspr r9,SPRN_DSISR
|
||||
srdi r10,r10,60
|
||||
rlwimi r10,r9,16,0x20
|
||||
mfcr r9
|
||||
cmpwi r10,0x2c
|
||||
beq do_stab_bolted_pSeries
|
||||
ld r10,PACA_EXSLB+EX_R10(r13)
|
||||
std r11,PACA_EXGEN+EX_R11(r13)
|
||||
ld r11,PACA_EXSLB+EX_R9(r13)
|
||||
std r12,PACA_EXGEN+EX_R12(r13)
|
||||
GET_SCRATCH0(r12)
|
||||
std r10,PACA_EXGEN+EX_R10(r13)
|
||||
std r11,PACA_EXGEN+EX_R9(r13)
|
||||
std r12,PACA_EXGEN+EX_R13(r13)
|
||||
EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
|
||||
FTR_SECTION_ELSE
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
|
||||
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
|
||||
b data_access_check_stab
|
||||
data_access_not_stab:
|
||||
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
|
||||
#endif
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
|
||||
KVMTEST_PR, 0x300)
|
||||
|
||||
. = 0x380
|
||||
.globl data_access_slb_pSeries
|
||||
data_access_slb_pSeries:
|
||||
HMT_MEDIUM
|
||||
DO_KVM 0x380
|
||||
SET_SCRATCH0(r13)
|
||||
GET_PACA(r13)
|
||||
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
|
||||
std r3,PACA_EXSLB+EX_R3(r13)
|
||||
mfspr r3,SPRN_DAR
|
||||
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
|
||||
mfcr r9
|
||||
#ifdef __DISABLED__
|
||||
/* Keep that around for when we re-implement dynamic VSIDs */
|
||||
cmpdi r3,0
|
||||
bge slb_miss_user_pseries
|
||||
#endif /* __DISABLED__ */
|
||||
std r10,PACA_EXSLB+EX_R10(r13)
|
||||
std r11,PACA_EXSLB+EX_R11(r13)
|
||||
std r12,PACA_EXSLB+EX_R12(r13)
|
||||
GET_SCRATCH0(r10)
|
||||
std r10,PACA_EXSLB+EX_R13(r13)
|
||||
mfspr r12,SPRN_SRR1 /* and SRR1 */
|
||||
mfspr r12,SPRN_SRR1
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
b .slb_miss_realmode
|
||||
#else
|
||||
@@ -147,24 +137,16 @@ data_access_slb_pSeries:
|
||||
.globl instruction_access_slb_pSeries
|
||||
instruction_access_slb_pSeries:
|
||||
HMT_MEDIUM
|
||||
DO_KVM 0x480
|
||||
SET_SCRATCH0(r13)
|
||||
GET_PACA(r13)
|
||||
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
|
||||
std r3,PACA_EXSLB+EX_R3(r13)
|
||||
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
|
||||
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
|
||||
mfcr r9
|
||||
#ifdef __DISABLED__
|
||||
/* Keep that around for when we re-implement dynamic VSIDs */
|
||||
cmpdi r3,0
|
||||
bge slb_miss_user_pseries
|
||||
#endif /* __DISABLED__ */
|
||||
std r10,PACA_EXSLB+EX_R10(r13)
|
||||
std r11,PACA_EXSLB+EX_R11(r13)
|
||||
std r12,PACA_EXSLB+EX_R12(r13)
|
||||
GET_SCRATCH0(r10)
|
||||
std r10,PACA_EXSLB+EX_R13(r13)
|
||||
mfspr r12,SPRN_SRR1 /* and SRR1 */
|
||||
mfspr r12,SPRN_SRR1
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
b .slb_miss_realmode
|
||||
#else
|
||||
@@ -184,26 +166,46 @@ instruction_access_slb_pSeries:
|
||||
hardware_interrupt_pSeries:
|
||||
hardware_interrupt_hv:
|
||||
BEGIN_FTR_SECTION
|
||||
_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD)
|
||||
_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
|
||||
EXC_HV, SOFTEN_TEST_HV)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
|
||||
FTR_SECTION_ELSE
|
||||
_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV)
|
||||
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)
|
||||
_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
|
||||
EXC_STD, SOFTEN_TEST_HV_201)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
|
||||
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
|
||||
|
||||
STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
|
||||
|
||||
STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
|
||||
|
||||
STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
|
||||
|
||||
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
|
||||
MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer)
|
||||
MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
|
||||
|
||||
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
|
||||
|
||||
STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
|
||||
|
||||
. = 0xc00
|
||||
.globl system_call_pSeries
|
||||
system_call_pSeries:
|
||||
HMT_MEDIUM
|
||||
DO_KVM 0xc00
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
SET_SCRATCH0(r13)
|
||||
GET_PACA(r13)
|
||||
std r9,PACA_EXGEN+EX_R9(r13)
|
||||
std r10,PACA_EXGEN+EX_R10(r13)
|
||||
mfcr r9
|
||||
KVMTEST(0xc00)
|
||||
GET_SCRATCH0(r13)
|
||||
#endif
|
||||
BEGIN_FTR_SECTION
|
||||
cmpdi r0,0x1ebe
|
||||
beq- 1f
|
||||
@@ -220,6 +222,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
|
||||
|
||||
/* Fast LE/BE switch system call */
|
||||
1: mfspr r12,SPRN_SRR1
|
||||
xori r12,r12,MSR_LE
|
||||
@@ -228,6 +232,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
|
||||
b .
|
||||
|
||||
STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
|
||||
|
||||
/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
|
||||
* out of line to handle them
|
||||
@@ -262,30 +267,93 @@ vsx_unavailable_pSeries_1:
|
||||
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
|
||||
KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
|
||||
#endif /* CONFIG_CBE_RAS */
|
||||
|
||||
STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
|
||||
KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
|
||||
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
|
||||
KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
|
||||
#endif /* CONFIG_CBE_RAS */
|
||||
|
||||
STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
|
||||
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
|
||||
KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
|
||||
#endif /* CONFIG_CBE_RAS */
|
||||
|
||||
. = 0x3000
|
||||
|
||||
/*** Out of line interrupts support ***/
|
||||
|
||||
/* moved from 0x200 */
|
||||
machine_check_pSeries:
|
||||
.globl machine_check_fwnmi
|
||||
machine_check_fwnmi:
|
||||
HMT_MEDIUM
|
||||
SET_SCRATCH0(r13) /* save r13 */
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
|
||||
EXC_STD, KVMTEST, 0x200)
|
||||
KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
|
||||
|
||||
#ifndef CONFIG_POWER4_ONLY
|
||||
/* moved from 0x300 */
|
||||
data_access_check_stab:
|
||||
GET_PACA(r13)
|
||||
std r9,PACA_EXSLB+EX_R9(r13)
|
||||
std r10,PACA_EXSLB+EX_R10(r13)
|
||||
mfspr r10,SPRN_DAR
|
||||
mfspr r9,SPRN_DSISR
|
||||
srdi r10,r10,60
|
||||
rlwimi r10,r9,16,0x20
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
lbz r9,HSTATE_IN_GUEST(r13)
|
||||
rlwimi r10,r9,8,0x300
|
||||
#endif
|
||||
mfcr r9
|
||||
cmpwi r10,0x2c
|
||||
beq do_stab_bolted_pSeries
|
||||
mtcrf 0x80,r9
|
||||
ld r9,PACA_EXSLB+EX_R9(r13)
|
||||
ld r10,PACA_EXSLB+EX_R10(r13)
|
||||
b data_access_not_stab
|
||||
do_stab_bolted_pSeries:
|
||||
std r11,PACA_EXSLB+EX_R11(r13)
|
||||
std r12,PACA_EXSLB+EX_R12(r13)
|
||||
GET_SCRATCH0(r10)
|
||||
std r10,PACA_EXSLB+EX_R13(r13)
|
||||
EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
|
||||
#endif /* CONFIG_POWER4_ONLY */
|
||||
|
||||
KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
|
||||
KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
|
||||
KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
|
||||
|
||||
.align 7
|
||||
/* moved from 0xe00 */
|
||||
STD_EXCEPTION_HV(., 0xe00, h_data_storage)
|
||||
STD_EXCEPTION_HV(., 0xe20, h_instr_storage)
|
||||
STD_EXCEPTION_HV(., 0xe40, emulation_assist)
|
||||
STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */
|
||||
STD_EXCEPTION_HV(., 0xe02, h_data_storage)
|
||||
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
|
||||
STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
|
||||
STD_EXCEPTION_HV(., 0xe42, emulation_assist)
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
|
||||
STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
|
||||
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
|
||||
|
||||
/* moved from 0xf00 */
|
||||
STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
|
||||
STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
|
||||
STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
|
||||
|
||||
/*
|
||||
* An interrupt came in while soft-disabled; clear EE in SRR1,
|
||||
@@ -317,14 +385,6 @@ masked_Hinterrupt:
|
||||
hrfid
|
||||
b .
|
||||
|
||||
.align 7
|
||||
do_stab_bolted_pSeries:
|
||||
std r11,PACA_EXSLB+EX_R11(r13)
|
||||
std r12,PACA_EXSLB+EX_R12(r13)
|
||||
GET_SCRATCH0(r10)
|
||||
std r10,PACA_EXSLB+EX_R13(r13)
|
||||
EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
/*
|
||||
* Vectors for the FWNMI option. Share common code.
|
||||
@@ -334,14 +394,8 @@ do_stab_bolted_pSeries:
|
||||
system_reset_fwnmi:
|
||||
HMT_MEDIUM
|
||||
SET_SCRATCH0(r13) /* save r13 */
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
|
||||
|
||||
.globl machine_check_fwnmi
|
||||
.align 7
|
||||
machine_check_fwnmi:
|
||||
HMT_MEDIUM
|
||||
SET_SCRATCH0(r13) /* save r13 */
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
|
||||
NOTEST, 0x100)
|
||||
|
||||
#endif /* CONFIG_PPC_PSERIES */
|
||||
|
||||
@@ -376,7 +430,11 @@ slb_miss_user_pseries:
|
||||
/* KVM's trampoline code needs to be close to the interrupt handlers */
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
#include "../kvm/book3s_rmhandlers.S"
|
||||
#else
|
||||
#include "../kvm/book3s_hv_rmhandlers.S"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
.align 7
|
||||
|
@@ -656,7 +656,7 @@ load_up_spe:
|
||||
cmpi 0,r4,0
|
||||
beq 1f
|
||||
addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
|
||||
SAVE_32EVRS(0,r10,r4)
|
||||
SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
|
||||
evxor evr10, evr10, evr10 /* clear out evr10 */
|
||||
evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
|
||||
li r5,THREAD_ACC
|
||||
@@ -676,7 +676,7 @@ load_up_spe:
|
||||
stw r4,THREAD_USED_SPE(r5)
|
||||
evlddx evr4,r10,r5
|
||||
evmra evr4,evr4
|
||||
REST_32EVRS(0,r10,r5)
|
||||
REST_32EVRS(0,r10,r5,THREAD_EVR0)
|
||||
#ifndef CONFIG_SMP
|
||||
subi r4,r5,THREAD
|
||||
stw r4,last_task_used_spe@l(r3)
|
||||
@@ -787,13 +787,11 @@ _GLOBAL(giveup_spe)
|
||||
addi r3,r3,THREAD /* want THREAD of task */
|
||||
lwz r5,PT_REGS(r3)
|
||||
cmpi 0,r5,0
|
||||
SAVE_32EVRS(0, r4, r3)
|
||||
SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
|
||||
evxor evr6, evr6, evr6 /* clear out evr6 */
|
||||
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
|
||||
li r4,THREAD_ACC
|
||||
evstddx evr6, r4, r3 /* save off accumulator */
|
||||
mfspr r6,SPRN_SPEFSCR
|
||||
stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
|
||||
beq 1f
|
||||
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
lis r3,MSR_SPE@h
|
||||
|
@@ -73,7 +73,6 @@ _GLOBAL(power7_idle)
|
||||
b .
|
||||
|
||||
_GLOBAL(power7_wakeup_loss)
|
||||
GET_PACA(r13)
|
||||
ld r1,PACAR1(r13)
|
||||
REST_NVGPRS(r1)
|
||||
REST_GPR(2, r1)
|
||||
@@ -87,7 +86,6 @@ _GLOBAL(power7_wakeup_loss)
|
||||
rfid
|
||||
|
||||
_GLOBAL(power7_wakeup_noloss)
|
||||
GET_PACA(r13)
|
||||
ld r1,PACAR1(r13)
|
||||
ld r4,_MSR(r1)
|
||||
ld r5,_NIP(r1)
|
||||
|
@@ -167,7 +167,7 @@ void setup_paca(struct paca_struct *new_paca)
|
||||
* if we do a GET_PACA() before the feature fixups have been
|
||||
* applied
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE_206))
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE))
|
||||
mtspr(SPRN_SPRG_HPACA, local_paca);
|
||||
#endif
|
||||
mtspr(SPRN_SPRG_PACA, local_paca);
|
||||
|
@@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_fp_to_thread);
|
||||
|
||||
void enable_kernel_fp(void)
|
||||
{
|
||||
@@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
@@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
@@ -213,6 +216,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
|
||||
#ifdef CONFIG_SMP
|
||||
BUG_ON(tsk != current);
|
||||
#endif
|
||||
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
|
||||
giveup_spe(tsk);
|
||||
}
|
||||
preempt_enable();
|
||||
|
@@ -375,6 +375,9 @@ void __init check_for_initrd(void)
|
||||
|
||||
int threads_per_core, threads_shift;
|
||||
cpumask_t threads_core_mask;
|
||||
EXPORT_SYMBOL_GPL(threads_per_core);
|
||||
EXPORT_SYMBOL_GPL(threads_shift);
|
||||
EXPORT_SYMBOL_GPL(threads_core_mask);
|
||||
|
||||
static void __init cpu_init_thread_core_maps(int tpc)
|
||||
{
|
||||
|
@@ -63,6 +63,7 @@
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
|
||||
#include "setup.h"
|
||||
|
||||
@@ -580,6 +581,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
/* Initialize the MMU context management stuff */
|
||||
mmu_context_init();
|
||||
|
||||
kvm_rma_init();
|
||||
|
||||
ppc64_boot_msg(0x15, "Setup Done");
|
||||
}
|
||||
|
||||
|
@@ -243,6 +243,7 @@ void smp_send_reschedule(int cpu)
|
||||
if (likely(smp_ops))
|
||||
smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_send_reschedule);
|
||||
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
|
@@ -1387,10 +1387,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
|
||||
int code = 0;
|
||||
int err;
|
||||
|
||||
preempt_disable();
|
||||
if (regs->msr & MSR_SPE)
|
||||
giveup_spe(current);
|
||||
preempt_enable();
|
||||
flush_spe_to_thread(current);
|
||||
|
||||
spefscr = current->thread.spefscr;
|
||||
fpexc_mode = current->thread.fpexc_mode;
|
||||
|
Reference in New Issue
Block a user