123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734 |
- #include <asm/asm-offsets.h>
- #include <asm/bug.h>
- #ifdef CONFIG_PPC_BOOK3S
- #include <asm/exception-64s.h>
- #else
- #include <asm/exception-64e.h>
- #endif
- #include <asm/feature-fixups.h>
- #include <asm/head-64.h>
- #include <asm/hw_irq.h>
- #include <asm/kup.h>
- #include <asm/mmu.h>
- #include <asm/ppc_asm.h>
- #include <asm/ptrace.h>
- .align 7
- .macro DEBUG_SRR_VALID srr
- #ifdef CONFIG_PPC_RFI_SRR_DEBUG
- .ifc \srr,srr
- mfspr r11,SPRN_SRR0
- ld r12,_NIP(r1)
- clrrdi r11,r11,2
- clrrdi r12,r12,2
- 100: tdne r11,r12
- EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
- mfspr r11,SPRN_SRR1
- ld r12,_MSR(r1)
- 100: tdne r11,r12
- EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
- .else
- mfspr r11,SPRN_HSRR0
- ld r12,_NIP(r1)
- clrrdi r11,r11,2
- clrrdi r12,r12,2
- 100: tdne r11,r12
- EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
- mfspr r11,SPRN_HSRR1
- ld r12,_MSR(r1)
- 100: tdne r11,r12
- EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
- .endif
- #endif
- .endm
- #ifdef CONFIG_PPC_BOOK3S
- .macro system_call_vectored name trapnr
- .globl system_call_vectored_\name
- system_call_vectored_\name:
- _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
- SCV_INTERRUPT_TO_KERNEL
- mr r10,r1
- ld r1,PACAKSAVE(r13)
- std r10,0(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- std r0,GPR0(r1)
- std r10,GPR1(r1)
- std r2,GPR2(r1)
- LOAD_PACA_TOC()
- mfcr r12
- li r11,0
- /* Save syscall parameters in r3-r8 */
- SAVE_GPRS(3, 8, r1)
- /* Zero r9-r12, this should only be required when restoring all GPRs */
- std r11,GPR9(r1)
- std r11,GPR10(r1)
- std r11,GPR11(r1)
- std r11,GPR12(r1)
- std r9,GPR13(r1)
- SAVE_NVGPRS(r1)
- std r11,_XER(r1)
- std r11,_LINK(r1)
- std r11,_CTR(r1)
- li r11,\trapnr
- std r11,_TRAP(r1)
- std r12,_CCR(r1)
- std r3,ORIG_GPR3(r1)
- /* Calling convention has r3 = regs, r4 = orig r0 */
- addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r0
- LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
- std r11,-16(r3) /* "regshere" marker */
- BEGIN_FTR_SECTION
- HMT_MEDIUM
- END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- /*
- * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
- * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
- * and interrupts may be masked and pending already.
- * system_call_exception() will call trace_hardirqs_off() which means
- * interrupts could already have been blocked before trace_hardirqs_off,
- * but this is the best we can do.
- */
- bl system_call_exception
- .Lsyscall_vectored_\name\()_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r5,1 /* scv */
- bl syscall_exit_prepare
- std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
- .Lsyscall_vectored_\name\()_rst_start:
- lbz r11,PACAIRQHAPPENED(r13)
- andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
- bne- syscall_vectored_\name\()_restart
- li r11,IRQS_ENABLED
- stb r11,PACAIRQSOFTMASK(r13)
- li r11,0
- stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
- ld r2,_CCR(r1)
- ld r4,_NIP(r1)
- ld r5,_MSR(r1)
- BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
- END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- BEGIN_FTR_SECTION
- HMT_MEDIUM_LOW
- END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- cmpdi r3,0
- bne .Lsyscall_vectored_\name\()_restore_regs
- /* rfscv returns with LR->NIA and CTR->MSR */
- mtlr r4
- mtctr r5
- /* Could zero these as per ABI, but we may consider a stricter ABI
- * which preserves these if libc implementations can benefit, so
- * restore them for now until further measurement is done. */
- REST_GPR(0, r1)
- REST_GPRS(4, 8, r1)
- /* Zero volatile regs that may contain sensitive kernel data */
- ZEROIZE_GPRS(9, 12)
- mtspr SPRN_XER,r0
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtcr r2
- REST_GPRS(2, 3, r1)
- REST_GPR(13, r1)
- REST_GPR(1, r1)
- RFSCV_TO_USER
- b . /* prevent speculative execution */
- .Lsyscall_vectored_\name\()_restore_regs:
- mtspr SPRN_SRR0,r4
- mtspr SPRN_SRR1,r5
- ld r3,_CTR(r1)
- ld r4,_LINK(r1)
- ld r5,_XER(r1)
- REST_NVGPRS(r1)
- REST_GPR(0, r1)
- mtcr r2
- mtctr r3
- mtlr r4
- mtspr SPRN_XER,r5
- REST_GPRS(2, 13, r1)
- REST_GPR(1, r1)
- RFI_TO_USER
- .Lsyscall_vectored_\name\()_rst_end:
- syscall_vectored_\name\()_restart:
- _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
- GET_PACA(r13)
- ld r1,PACA_EXIT_SAVE_R1(r13)
- LOAD_PACA_TOC()
- ld r3,RESULT(r1)
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r11,IRQS_ALL_DISABLED
- stb r11,PACAIRQSOFTMASK(r13)
- bl syscall_exit_restart
- std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
- b .Lsyscall_vectored_\name\()_rst_start
- 1:
- SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
- RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
- .endm
- system_call_vectored common 0x3000
- /*
- * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
- * which is tested by system_call_exception when r0 is -1 (as set by vector
- * entry code).
- */
- system_call_vectored sigill 0x7ff0
- #endif /* CONFIG_PPC_BOOK3S */
- .balign IFETCH_ALIGN_BYTES
- .globl system_call_common_real
- system_call_common_real:
- _ASM_NOKPROBE_SYMBOL(system_call_common_real)
- ld r10,PACAKMSR(r13) /* get MSR value for kernel */
- mtmsrd r10
- .balign IFETCH_ALIGN_BYTES
- .globl system_call_common
- system_call_common:
- _ASM_NOKPROBE_SYMBOL(system_call_common)
- mr r10,r1
- ld r1,PACAKSAVE(r13)
- std r10,0(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- std r0,GPR0(r1)
- std r10,GPR1(r1)
- std r2,GPR2(r1)
- #ifdef CONFIG_PPC_E500
- START_BTB_FLUSH_SECTION
- BTB_FLUSH(r10)
- END_BTB_FLUSH_SECTION
- #endif
- LOAD_PACA_TOC()
- mfcr r12
- li r11,0
- /* Save syscall parameters in r3-r8 */
- SAVE_GPRS(3, 8, r1)
- /* Zero r9-r12, this should only be required when restoring all GPRs */
- std r11,GPR9(r1)
- std r11,GPR10(r1)
- std r11,GPR11(r1)
- std r11,GPR12(r1)
- std r9,GPR13(r1)
- SAVE_NVGPRS(r1)
- std r11,_XER(r1)
- std r11,_CTR(r1)
- mflr r10
- /*
- * This clears CR0.SO (bit 28), which is the error indication on
- * return from this system call.
- */
- rldimi r12,r11,28,(63-28)
- li r11,0xc00
- std r10,_LINK(r1)
- std r11,_TRAP(r1)
- std r12,_CCR(r1)
- std r3,ORIG_GPR3(r1)
- /* Calling convention has r3 = regs, r4 = orig r0 */
- addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r0
- LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
- std r11,-16(r3) /* "regshere" marker */
- #ifdef CONFIG_PPC_BOOK3S
- li r11,1
- stb r11,PACASRR_VALID(r13)
- #endif
- /*
- * We always enter kernel from userspace with irq soft-mask enabled and
- * nothing pending. system_call_exception() will call
- * trace_hardirqs_off().
- */
- li r11,IRQS_ALL_DISABLED
- stb r11,PACAIRQSOFTMASK(r13)
- #ifdef CONFIG_PPC_BOOK3S
- li r12,-1 /* Set MSR_EE and MSR_RI */
- mtmsrd r12,1
- #else
- wrteei 1
- #endif
- bl system_call_exception
- .Lsyscall_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r5,0 /* !scv */
- bl syscall_exit_prepare
- std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
- #ifdef CONFIG_PPC_BOOK3S
- .Lsyscall_rst_start:
- lbz r11,PACAIRQHAPPENED(r13)
- andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
- bne- syscall_restart
- #endif
- li r11,IRQS_ENABLED
- stb r11,PACAIRQSOFTMASK(r13)
- li r11,0
- stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
- ld r2,_CCR(r1)
- ld r6,_LINK(r1)
- mtlr r6
- #ifdef CONFIG_PPC_BOOK3S
- lbz r4,PACASRR_VALID(r13)
- cmpdi r4,0
- bne 1f
- li r4,0
- stb r4,PACASRR_VALID(r13)
- #endif
- ld r4,_NIP(r1)
- ld r5,_MSR(r1)
- mtspr SPRN_SRR0,r4
- mtspr SPRN_SRR1,r5
- 1:
- DEBUG_SRR_VALID srr
- BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
- END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- cmpdi r3,0
- bne .Lsyscall_restore_regs
- /* Zero volatile regs that may contain sensitive kernel data */
- ZEROIZE_GPR(0)
- ZEROIZE_GPRS(4, 12)
- mtctr r0
- mtspr SPRN_XER,r0
- .Lsyscall_restore_regs_cont:
- BEGIN_FTR_SECTION
- HMT_MEDIUM_LOW
- END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtcr r2
- REST_GPRS(2, 3, r1)
- REST_GPR(13, r1)
- REST_GPR(1, r1)
- RFI_TO_USER
- b . /* prevent speculative execution */
- .Lsyscall_restore_regs:
- ld r3,_CTR(r1)
- ld r4,_XER(r1)
- REST_NVGPRS(r1)
- mtctr r3
- mtspr SPRN_XER,r4
- REST_GPR(0, r1)
- REST_GPRS(4, 12, r1)
- b .Lsyscall_restore_regs_cont
- .Lsyscall_rst_end:
- #ifdef CONFIG_PPC_BOOK3S
- syscall_restart:
- _ASM_NOKPROBE_SYMBOL(syscall_restart)
- GET_PACA(r13)
- ld r1,PACA_EXIT_SAVE_R1(r13)
- LOAD_PACA_TOC()
- ld r3,RESULT(r1)
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r11,IRQS_ALL_DISABLED
- stb r11,PACAIRQSOFTMASK(r13)
- bl syscall_exit_restart
- std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
- b .Lsyscall_rst_start
- 1:
- SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
- RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
- #endif
- /*
- * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
- * touched, no exit work created, then this can be used.
- */
- .balign IFETCH_ALIGN_BYTES
- .globl fast_interrupt_return_srr
- fast_interrupt_return_srr:
- _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
- kuap_check_amr r3, r4
- ld r5,_MSR(r1)
- andi. r0,r5,MSR_PR
- #ifdef CONFIG_PPC_BOOK3S
- beq 1f
- kuap_user_restore r3, r4
- b .Lfast_user_interrupt_return_srr
- 1: kuap_kernel_restore r3, r4
- andi. r0,r5,MSR_RI
- li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
- bne+ .Lfast_kernel_interrupt_return_srr
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- b . /* should not get here */
- #else
- bne .Lfast_user_interrupt_return_srr
- b .Lfast_kernel_interrupt_return_srr
- #endif
- .macro interrupt_return_macro srr
- .balign IFETCH_ALIGN_BYTES
- .globl interrupt_return_\srr
- interrupt_return_\srr\():
- _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
- ld r4,_MSR(r1)
- andi. r0,r4,MSR_PR
- beq interrupt_return_\srr\()_kernel
- interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
- _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl interrupt_exit_user_prepare
- cmpdi r3,0
- bne- .Lrestore_nvgprs_\srr
- .Lrestore_nvgprs_\srr\()_cont:
- std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
- #ifdef CONFIG_PPC_BOOK3S
- .Linterrupt_return_\srr\()_user_rst_start:
- lbz r11,PACAIRQHAPPENED(r13)
- andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
- bne- interrupt_return_\srr\()_user_restart
- #endif
- li r11,IRQS_ENABLED
- stb r11,PACAIRQSOFTMASK(r13)
- li r11,0
- stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
- .Lfast_user_interrupt_return_\srr\():
- #ifdef CONFIG_PPC_BOOK3S
- .ifc \srr,srr
- lbz r4,PACASRR_VALID(r13)
- .else
- lbz r4,PACAHSRR_VALID(r13)
- .endif
- cmpdi r4,0
- li r4,0
- bne 1f
- #endif
- ld r11,_NIP(r1)
- ld r12,_MSR(r1)
- .ifc \srr,srr
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
- 1:
- #ifdef CONFIG_PPC_BOOK3S
- stb r4,PACASRR_VALID(r13)
- #endif
- .else
- mtspr SPRN_HSRR0,r11
- mtspr SPRN_HSRR1,r12
- 1:
- #ifdef CONFIG_PPC_BOOK3S
- stb r4,PACAHSRR_VALID(r13)
- #endif
- .endif
- DEBUG_SRR_VALID \srr
- #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- lbz r4,PACAIRQSOFTMASK(r13)
- tdnei r4,IRQS_ENABLED
- #endif
- BEGIN_FTR_SECTION
- ld r10,_PPR(r1)
- mtspr SPRN_PPR,r10
- END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
- FTR_SECTION_ELSE
- ldarx r0,0,r1
- ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- ld r3,_CCR(r1)
- ld r4,_LINK(r1)
- ld r5,_CTR(r1)
- ld r6,_XER(r1)
- li r0,0
- REST_GPRS(7, 13, r1)
- mtcr r3
- mtlr r4
- mtctr r5
- mtspr SPRN_XER,r6
- REST_GPRS(2, 6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- .ifc \srr,srr
- RFI_TO_USER
- .else
- HRFI_TO_USER
- .endif
- b . /* prevent speculative execution */
- .Linterrupt_return_\srr\()_user_rst_end:
- .Lrestore_nvgprs_\srr\():
- REST_NVGPRS(r1)
- b .Lrestore_nvgprs_\srr\()_cont
- #ifdef CONFIG_PPC_BOOK3S
- interrupt_return_\srr\()_user_restart:
- _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
- GET_PACA(r13)
- ld r1,PACA_EXIT_SAVE_R1(r13)
- LOAD_PACA_TOC()
- addi r3,r1,STACK_FRAME_OVERHEAD
- li r11,IRQS_ALL_DISABLED
- stb r11,PACAIRQSOFTMASK(r13)
- bl interrupt_exit_user_restart
- std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
- b .Linterrupt_return_\srr\()_user_rst_start
- 1:
- SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
- RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
- #endif
- .balign IFETCH_ALIGN_BYTES
- interrupt_return_\srr\()_kernel:
- _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl interrupt_exit_kernel_prepare
- std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
- .Linterrupt_return_\srr\()_kernel_rst_start:
- ld r11,SOFTE(r1)
- cmpwi r11,IRQS_ENABLED
- stb r11,PACAIRQSOFTMASK(r13)
- beq .Linterrupt_return_\srr\()_soft_enabled
- /*
- * Returning to soft-disabled context.
- * Check if a MUST_HARD_MASK interrupt has become pending, in which
- * case we need to disable MSR[EE] in the return context.
- *
- * The MSR[EE] check catches among other things the short incoherency
- * in hard_irq_disable() between clearing MSR[EE] and setting
- * PACA_IRQ_HARD_DIS.
- */
- ld r12,_MSR(r1)
- andi. r10,r12,MSR_EE
- beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
- lbz r11,PACAIRQHAPPENED(r13)
- andi. r10,r11,PACA_IRQ_MUST_HARD_MASK
- bne 1f // HARD_MASK is pending
- // No HARD_MASK pending, clear possible HARD_DIS set by interrupt
- andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
- stb r11,PACAIRQHAPPENED(r13)
- b .Lfast_kernel_interrupt_return_\srr\()
- 1: /* Must clear MSR_EE from _MSR */
- #ifdef CONFIG_PPC_BOOK3S
- li r10,0
- /* Clear valid before changing _MSR */
- .ifc \srr,srr
- stb r10,PACASRR_VALID(r13)
- .else
- stb r10,PACAHSRR_VALID(r13)
- .endif
- #endif
- xori r12,r12,MSR_EE
- std r12,_MSR(r1)
- b .Lfast_kernel_interrupt_return_\srr\()
- .Linterrupt_return_\srr\()_soft_enabled:
- /*
- * In the soft-enabled case, need to double-check that we have no
- * pending interrupts that might have come in before we reached the
- * restart section of code, and restart the exit so those can be
- * handled.
- *
- * If there are none, it is be possible that the interrupt still
- * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
- * interrupted context. This clear will not clobber a new pending
- * interrupt coming in, because we're in the restart section, so
- * such would return to the restart location.
- */
- #ifdef CONFIG_PPC_BOOK3S
- lbz r11,PACAIRQHAPPENED(r13)
- andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
- bne- interrupt_return_\srr\()_kernel_restart
- #endif
- li r11,0
- stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
- .Lfast_kernel_interrupt_return_\srr\():
- cmpdi cr1,r3,0
- #ifdef CONFIG_PPC_BOOK3S
- .ifc \srr,srr
- lbz r4,PACASRR_VALID(r13)
- .else
- lbz r4,PACAHSRR_VALID(r13)
- .endif
- cmpdi r4,0
- li r4,0
- bne 1f
- #endif
- ld r11,_NIP(r1)
- ld r12,_MSR(r1)
- .ifc \srr,srr
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
- 1:
- #ifdef CONFIG_PPC_BOOK3S
- stb r4,PACASRR_VALID(r13)
- #endif
- .else
- mtspr SPRN_HSRR0,r11
- mtspr SPRN_HSRR1,r12
- 1:
- #ifdef CONFIG_PPC_BOOK3S
- stb r4,PACAHSRR_VALID(r13)
- #endif
- .endif
- DEBUG_SRR_VALID \srr
- BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
- FTR_SECTION_ELSE
- ldarx r0,0,r1
- ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- ld r3,_LINK(r1)
- ld r4,_CTR(r1)
- ld r5,_XER(r1)
- ld r6,_CCR(r1)
- li r0,0
- REST_GPRS(7, 12, r1)
- mtlr r3
- mtctr r4
- mtspr SPRN_XER,r5
- /*
- * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
- * the reliable stack unwinder later on. Clear it.
- */
- std r0,STACK_FRAME_OVERHEAD-16(r1)
- REST_GPRS(2, 5, r1)
- bne- cr1,1f /* emulate stack store */
- mtcr r6
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- .ifc \srr,srr
- RFI_TO_KERNEL
- .else
- HRFI_TO_KERNEL
- .endif
- b . /* prevent speculative execution */
- 1: /*
- * Emulate stack store with update. New r1 value was already calculated
- * and updated in our interrupt regs by emulate_loadstore, but we can't
- * store the previous value of r1 to the stack before re-loading our
- * registers from it, otherwise they could be clobbered. Use
- * PACA_EXGEN as temporary storage to hold the store data, as
- * interrupts are disabled here so it won't be clobbered.
- */
- mtcr r6
- std r9,PACA_EXGEN+0(r13)
- addi r9,r1,INT_FRAME_SIZE /* get original r1 */
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- std r9,0(r1) /* perform store component of stdu */
- ld r9,PACA_EXGEN+0(r13)
- .ifc \srr,srr
- RFI_TO_KERNEL
- .else
- HRFI_TO_KERNEL
- .endif
- b . /* prevent speculative execution */
- .Linterrupt_return_\srr\()_kernel_rst_end:
- #ifdef CONFIG_PPC_BOOK3S
- interrupt_return_\srr\()_kernel_restart:
- _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
- GET_PACA(r13)
- ld r1,PACA_EXIT_SAVE_R1(r13)
- LOAD_PACA_TOC()
- addi r3,r1,STACK_FRAME_OVERHEAD
- li r11,IRQS_ALL_DISABLED
- stb r11,PACAIRQSOFTMASK(r13)
- bl interrupt_exit_kernel_restart
- std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
- b .Linterrupt_return_\srr\()_kernel_rst_start
- 1:
- SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
- RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
- #endif
- .endm
- interrupt_return_macro srr
- #ifdef CONFIG_PPC_BOOK3S
- interrupt_return_macro hsrr
- .globl __end_soft_masked
- __end_soft_masked:
- DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
- #endif /* CONFIG_PPC_BOOK3S */
- #ifdef CONFIG_PPC_BOOK3S
- _GLOBAL(ret_from_fork_scv)
- bl schedule_tail
- REST_NVGPRS(r1)
- li r3,0 /* fork() return value */
- b .Lsyscall_vectored_common_exit
- #endif
- _GLOBAL(ret_from_fork)
- bl schedule_tail
- REST_NVGPRS(r1)
- li r3,0 /* fork() return value */
- b .Lsyscall_exit
- _GLOBAL(ret_from_kernel_thread)
- bl schedule_tail
- REST_NVGPRS(r1)
- mtctr r14
- mr r3,r15
- #ifdef CONFIG_PPC64_ELF_ABI_V2
- mr r12,r14
- #endif
- bctrl
- li r3,0
- b .Lsyscall_exit
|