entry_32.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * PowerPC version
  4. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  5. * Rewritten by Cort Dougan ([email protected]) for PReP
  6. * Copyright (C) 1996 Cort Dougan <[email protected]>
  7. * Adapted for Power Macintosh by Paul Mackerras.
  8. * Low-level exception handlers and MMU support
  9. * rewritten by Paul Mackerras.
  10. * Copyright (C) 1996 Paul Mackerras.
  11. * MPC8xx modifications Copyright (C) 1997 Dan Malek ([email protected]).
  12. *
  13. * This file contains the system call entry code, context switch
  14. * code, and exception/interrupt return code for PowerPC.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/err.h>
  18. #include <linux/sys.h>
  19. #include <linux/threads.h>
  20. #include <asm/reg.h>
  21. #include <asm/page.h>
  22. #include <asm/mmu.h>
  23. #include <asm/cputable.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/ppc_asm.h>
  26. #include <asm/asm-offsets.h>
  27. #include <asm/unistd.h>
  28. #include <asm/ptrace.h>
  29. #include <asm/export.h>
  30. #include <asm/feature-fixups.h>
  31. #include <asm/barrier.h>
  32. #include <asm/kup.h>
  33. #include <asm/bug.h>
  34. #include <asm/interrupt.h>
  35. #include "head_32.h"
  36. /*
  37. * powerpc relies on return from interrupt/syscall being context synchronising
  38. * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
  39. * synchronisation instructions.
  40. */
  41. /*
  42. * Align to 4k in order to ensure that all functions modyfing srr0/srr1
  43. * fit into one page in order to not encounter a TLB miss between the
  44. * modification of srr0/srr1 and the associated rfi.
  45. */
  46. .align 12
  47. #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
  48. .globl prepare_transfer_to_handler
  49. prepare_transfer_to_handler:
  50. /* if from kernel, check interrupted DOZE/NAP mode */
  51. lwz r12,TI_LOCAL_FLAGS(r2)
  52. mtcrf 0x01,r12
  53. bt- 31-TLF_NAPPING,4f
  54. bt- 31-TLF_SLEEPING,7f
  55. blr
  56. 4: rlwinm r12,r12,0,~_TLF_NAPPING
  57. stw r12,TI_LOCAL_FLAGS(r2)
  58. b power_save_ppc32_restore
  59. 7: rlwinm r12,r12,0,~_TLF_SLEEPING
  60. stw r12,TI_LOCAL_FLAGS(r2)
  61. lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
  62. rlwinm r9,r9,0,~MSR_EE
  63. lwz r12,_LINK(r11) /* and return to address in LR */
  64. REST_GPR(2, r11)
  65. b fast_exception_return
  66. _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
  67. #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
  68. #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
  69. .globl __kuep_lock
  70. __kuep_lock:
  71. lwz r9, THREAD+THSR0(r2)
  72. update_user_segments_by_4 r9, r10, r11, r12
  73. blr
  74. __kuep_unlock:
  75. lwz r9, THREAD+THSR0(r2)
  76. rlwinm r9,r9,0,~SR_NX
  77. update_user_segments_by_4 r9, r10, r11, r12
  78. blr
  79. .macro kuep_lock
  80. bl __kuep_lock
  81. .endm
  82. .macro kuep_unlock
  83. bl __kuep_unlock
  84. .endm
  85. #else
  86. .macro kuep_lock
  87. .endm
  88. .macro kuep_unlock
  89. .endm
  90. #endif
  91. .globl transfer_to_syscall
  92. transfer_to_syscall:
  93. stw r3, ORIG_GPR3(r1)
  94. stw r11, GPR1(r1)
  95. stw r11, 0(r1)
  96. mflr r12
  97. stw r12, _LINK(r1)
  98. #ifdef CONFIG_BOOKE_OR_40x
  99. rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
  100. #endif
  101. lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
  102. SAVE_GPR(2, r1)
  103. addi r12,r12,STACK_FRAME_REGS_MARKER@l
  104. stw r9,_MSR(r1)
  105. li r2, INTERRUPT_SYSCALL
  106. stw r12,8(r1)
  107. stw r2,_TRAP(r1)
  108. SAVE_GPR(0, r1)
  109. SAVE_GPRS(3, 8, r1)
  110. addi r2,r10,-THREAD
  111. SAVE_NVGPRS(r1)
  112. kuep_lock
  113. /* Calling convention has r3 = regs, r4 = orig r0 */
  114. addi r3,r1,STACK_FRAME_OVERHEAD
  115. mr r4,r0
  116. bl system_call_exception
  117. ret_from_syscall:
  118. addi r4,r1,STACK_FRAME_OVERHEAD
  119. li r5,0
  120. bl syscall_exit_prepare
  121. #ifdef CONFIG_PPC_47x
  122. lis r4,icache_44x_need_flush@ha
  123. lwz r5,icache_44x_need_flush@l(r4)
  124. cmplwi cr0,r5,0
  125. bne- .L44x_icache_flush
  126. #endif /* CONFIG_PPC_47x */
  127. .L44x_icache_flush_return:
  128. kuep_unlock
  129. lwz r4,_LINK(r1)
  130. lwz r5,_CCR(r1)
  131. mtlr r4
  132. lwz r7,_NIP(r1)
  133. lwz r8,_MSR(r1)
  134. cmpwi r3,0
  135. REST_GPR(3, r1)
  136. syscall_exit_finish:
  137. mtspr SPRN_SRR0,r7
  138. mtspr SPRN_SRR1,r8
  139. bne 3f
  140. mtcr r5
  141. 1: REST_GPR(2, r1)
  142. REST_GPR(1, r1)
  143. rfi
  144. #ifdef CONFIG_40x
  145. b . /* Prevent prefetch past rfi */
  146. #endif
  147. 3: mtcr r5
  148. lwz r4,_CTR(r1)
  149. lwz r5,_XER(r1)
  150. REST_NVGPRS(r1)
  151. mtctr r4
  152. mtxer r5
  153. REST_GPR(0, r1)
  154. REST_GPRS(3, 12, r1)
  155. b 1b
  156. #ifdef CONFIG_44x
  157. .L44x_icache_flush:
  158. li r7,0
  159. iccci r0,r0
  160. stw r7,icache_44x_need_flush@l(r4)
  161. b .L44x_icache_flush_return
  162. #endif /* CONFIG_44x */
  163. .globl ret_from_fork
  164. ret_from_fork:
  165. REST_NVGPRS(r1)
  166. bl schedule_tail
  167. li r3,0
  168. b ret_from_syscall
  169. .globl ret_from_kernel_thread
  170. ret_from_kernel_thread:
  171. REST_NVGPRS(r1)
  172. bl schedule_tail
  173. mtctr r14
  174. mr r3,r15
  175. PPC440EP_ERR42
  176. bctrl
  177. li r3,0
  178. b ret_from_syscall
  179. /*
  180. * This routine switches between two different tasks. The process
  181. * state of one is saved on its kernel stack. Then the state
  182. * of the other is restored from its kernel stack. The memory
  183. * management hardware is updated to the second process's state.
  184. * Finally, we can return to the second process.
  185. * On entry, r3 points to the THREAD for the current task, r4
  186. * points to the THREAD for the new task.
  187. *
  188. * This routine is always called with interrupts disabled.
  189. *
  190. * Note: there are two ways to get to the "going out" portion
  191. * of this code; either by coming in via the entry (_switch)
  192. * or via "fork" which must set up an environment equivalent
  193. * to the "_switch" path. If you change this , you'll have to
  194. * change the fork code also.
  195. *
  196. * The code which creates the new task context is in 'copy_thread'
  197. * in arch/ppc/kernel/process.c
  198. */
  199. _GLOBAL(_switch)
  200. stwu r1,-INT_FRAME_SIZE(r1)
  201. mflr r0
  202. stw r0,INT_FRAME_SIZE+4(r1)
  203. /* r3-r12 are caller saved -- Cort */
  204. SAVE_NVGPRS(r1)
  205. stw r0,_NIP(r1) /* Return to switch caller */
  206. mfcr r10
  207. stw r10,_CCR(r1)
  208. stw r1,KSP(r3) /* Set old stack pointer */
  209. #ifdef CONFIG_SMP
  210. /* We need a sync somewhere here to make sure that if the
  211. * previous task gets rescheduled on another CPU, it sees all
  212. * stores it has performed on this one.
  213. */
  214. sync
  215. #endif /* CONFIG_SMP */
  216. tophys(r0,r4)
  217. mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
  218. lwz r1,KSP(r4) /* Load new stack pointer */
  219. /* save the old current 'last' for return value */
  220. mr r3,r2
  221. addi r2,r4,-THREAD /* Update current */
  222. lwz r0,_CCR(r1)
  223. mtcrf 0xFF,r0
  224. /* r3-r12 are destroyed -- Cort */
  225. REST_NVGPRS(r1)
  226. lwz r4,_NIP(r1) /* Return to _switch caller in new task */
  227. mtlr r4
  228. addi r1,r1,INT_FRAME_SIZE
  229. blr
  230. .globl fast_exception_return
  231. fast_exception_return:
  232. #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
  233. andi. r10,r9,MSR_RI /* check for recoverable interrupt */
  234. beq 3f /* if not, we've got problems */
  235. #endif
  236. 2: lwz r10,_CCR(r11)
  237. REST_GPRS(1, 6, r11)
  238. mtcr r10
  239. lwz r10,_LINK(r11)
  240. mtlr r10
  241. /* Clear the exception marker on the stack to avoid confusing stacktrace */
  242. li r10, 0
  243. stw r10, 8(r11)
  244. REST_GPR(10, r11)
  245. #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
  246. mtspr SPRN_NRI, r0
  247. #endif
  248. mtspr SPRN_SRR1,r9
  249. mtspr SPRN_SRR0,r12
  250. REST_GPR(9, r11)
  251. REST_GPR(12, r11)
  252. REST_GPR(11, r11)
  253. rfi
  254. #ifdef CONFIG_40x
  255. b . /* Prevent prefetch past rfi */
  256. #endif
  257. _ASM_NOKPROBE_SYMBOL(fast_exception_return)
  258. /* aargh, a nonrecoverable interrupt, panic */
  259. /* aargh, we don't know which trap this is */
  260. 3:
  261. li r10,-1
  262. stw r10,_TRAP(r11)
  263. prepare_transfer_to_handler
  264. bl unrecoverable_exception
  265. trap /* should not get here */
  266. .globl interrupt_return
  267. interrupt_return:
  268. lwz r4,_MSR(r1)
  269. addi r3,r1,STACK_FRAME_OVERHEAD
  270. andi. r0,r4,MSR_PR
  271. beq .Lkernel_interrupt_return
  272. bl interrupt_exit_user_prepare
  273. cmpwi r3,0
  274. kuep_unlock
  275. bne- .Lrestore_nvgprs
  276. .Lfast_user_interrupt_return:
  277. lwz r11,_NIP(r1)
  278. lwz r12,_MSR(r1)
  279. mtspr SPRN_SRR0,r11
  280. mtspr SPRN_SRR1,r12
  281. BEGIN_FTR_SECTION
  282. stwcx. r0,0,r1 /* to clear the reservation */
  283. FTR_SECTION_ELSE
  284. lwarx r0,0,r1
  285. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  286. lwz r3,_CCR(r1)
  287. lwz r4,_LINK(r1)
  288. lwz r5,_CTR(r1)
  289. lwz r6,_XER(r1)
  290. li r0,0
  291. /*
  292. * Leaving a stale exception marker on the stack can confuse
  293. * the reliable stack unwinder later on. Clear it.
  294. */
  295. stw r0,8(r1)
  296. REST_GPRS(7, 12, r1)
  297. mtcr r3
  298. mtlr r4
  299. mtctr r5
  300. mtspr SPRN_XER,r6
  301. REST_GPRS(2, 6, r1)
  302. REST_GPR(0, r1)
  303. REST_GPR(1, r1)
  304. rfi
  305. #ifdef CONFIG_40x
  306. b . /* Prevent prefetch past rfi */
  307. #endif
  308. .Lrestore_nvgprs:
  309. REST_NVGPRS(r1)
  310. b .Lfast_user_interrupt_return
  311. .Lkernel_interrupt_return:
  312. bl interrupt_exit_kernel_prepare
  313. .Lfast_kernel_interrupt_return:
  314. cmpwi cr1,r3,0
  315. lwz r11,_NIP(r1)
  316. lwz r12,_MSR(r1)
  317. mtspr SPRN_SRR0,r11
  318. mtspr SPRN_SRR1,r12
  319. BEGIN_FTR_SECTION
  320. stwcx. r0,0,r1 /* to clear the reservation */
  321. FTR_SECTION_ELSE
  322. lwarx r0,0,r1
  323. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  324. lwz r3,_LINK(r1)
  325. lwz r4,_CTR(r1)
  326. lwz r5,_XER(r1)
  327. lwz r6,_CCR(r1)
  328. li r0,0
  329. REST_GPRS(7, 12, r1)
  330. mtlr r3
  331. mtctr r4
  332. mtspr SPRN_XER,r5
  333. /*
  334. * Leaving a stale exception marker on the stack can confuse
  335. * the reliable stack unwinder later on. Clear it.
  336. */
  337. stw r0,8(r1)
  338. REST_GPRS(2, 5, r1)
  339. bne- cr1,1f /* emulate stack store */
  340. mtcr r6
  341. REST_GPR(6, r1)
  342. REST_GPR(0, r1)
  343. REST_GPR(1, r1)
  344. rfi
  345. #ifdef CONFIG_40x
  346. b . /* Prevent prefetch past rfi */
  347. #endif
  348. 1: /*
  349. * Emulate stack store with update. New r1 value was already calculated
  350. * and updated in our interrupt regs by emulate_loadstore, but we can't
  351. * store the previous value of r1 to the stack before re-loading our
  352. * registers from it, otherwise they could be clobbered. Use
  353. * SPRG Scratch0 as temporary storage to hold the store
  354. * data, as interrupts are disabled here so it won't be clobbered.
  355. */
  356. mtcr r6
  357. #ifdef CONFIG_BOOKE
  358. mtspr SPRN_SPRG_WSCRATCH0, r9
  359. #else
  360. mtspr SPRN_SPRG_SCRATCH0, r9
  361. #endif
  362. addi r9,r1,INT_FRAME_SIZE /* get original r1 */
  363. REST_GPR(6, r1)
  364. REST_GPR(0, r1)
  365. REST_GPR(1, r1)
  366. stw r9,0(r1) /* perform store component of stwu */
  367. #ifdef CONFIG_BOOKE
  368. mfspr r9, SPRN_SPRG_RSCRATCH0
  369. #else
  370. mfspr r9, SPRN_SPRG_SCRATCH0
  371. #endif
  372. rfi
  373. #ifdef CONFIG_40x
  374. b . /* Prevent prefetch past rfi */
  375. #endif
  376. _ASM_NOKPROBE_SYMBOL(interrupt_return)
  377. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  378. /*
  379. * Returning from a critical interrupt in user mode doesn't need
  380. * to be any different from a normal exception. For a critical
  381. * interrupt in the kernel, we just return (without checking for
  382. * preemption) since the interrupt may have happened at some crucial
  383. * place (e.g. inside the TLB miss handler), and because we will be
  384. * running with r1 pointing into critical_stack, not the current
  385. * process's kernel stack (and therefore current_thread_info() will
  386. * give the wrong answer).
  387. * We have to restore various SPRs that may have been in use at the
  388. * time of the critical interrupt.
  389. *
  390. */
  391. #ifdef CONFIG_40x
  392. #define PPC_40x_TURN_OFF_MSR_DR \
  393. /* avoid any possible TLB misses here by turning off MSR.DR, we \
  394. * assume the instructions here are mapped by a pinned TLB entry */ \
  395. li r10,MSR_IR; \
  396. mtmsr r10; \
  397. isync; \
  398. tophys(r1, r1);
  399. #else
  400. #define PPC_40x_TURN_OFF_MSR_DR
  401. #endif
  402. #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
  403. REST_NVGPRS(r1); \
  404. lwz r3,_MSR(r1); \
  405. andi. r3,r3,MSR_PR; \
  406. bne interrupt_return; \
  407. REST_GPR(0, r1); \
  408. REST_GPRS(2, 8, r1); \
  409. lwz r10,_XER(r1); \
  410. lwz r11,_CTR(r1); \
  411. mtspr SPRN_XER,r10; \
  412. mtctr r11; \
  413. stwcx. r0,0,r1; /* to clear the reservation */ \
  414. lwz r11,_LINK(r1); \
  415. mtlr r11; \
  416. lwz r10,_CCR(r1); \
  417. mtcrf 0xff,r10; \
  418. PPC_40x_TURN_OFF_MSR_DR; \
  419. lwz r9,_DEAR(r1); \
  420. lwz r10,_ESR(r1); \
  421. mtspr SPRN_DEAR,r9; \
  422. mtspr SPRN_ESR,r10; \
  423. lwz r11,_NIP(r1); \
  424. lwz r12,_MSR(r1); \
  425. mtspr exc_lvl_srr0,r11; \
  426. mtspr exc_lvl_srr1,r12; \
  427. REST_GPRS(9, 12, r1); \
  428. REST_GPR(1, r1); \
  429. exc_lvl_rfi; \
  430. b .; /* prevent prefetch past exc_lvl_rfi */
  431. #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
  432. lwz r9,_##exc_lvl_srr0(r1); \
  433. lwz r10,_##exc_lvl_srr1(r1); \
  434. mtspr SPRN_##exc_lvl_srr0,r9; \
  435. mtspr SPRN_##exc_lvl_srr1,r10;
  436. #if defined(CONFIG_PPC_E500)
  437. #ifdef CONFIG_PHYS_64BIT
  438. #define RESTORE_MAS7 \
  439. lwz r11,MAS7(r1); \
  440. mtspr SPRN_MAS7,r11;
  441. #else
  442. #define RESTORE_MAS7
  443. #endif /* CONFIG_PHYS_64BIT */
  444. #define RESTORE_MMU_REGS \
  445. lwz r9,MAS0(r1); \
  446. lwz r10,MAS1(r1); \
  447. lwz r11,MAS2(r1); \
  448. mtspr SPRN_MAS0,r9; \
  449. lwz r9,MAS3(r1); \
  450. mtspr SPRN_MAS1,r10; \
  451. lwz r10,MAS6(r1); \
  452. mtspr SPRN_MAS2,r11; \
  453. mtspr SPRN_MAS3,r9; \
  454. mtspr SPRN_MAS6,r10; \
  455. RESTORE_MAS7;
  456. #elif defined(CONFIG_44x)
  457. #define RESTORE_MMU_REGS \
  458. lwz r9,MMUCR(r1); \
  459. mtspr SPRN_MMUCR,r9;
  460. #else
  461. #define RESTORE_MMU_REGS
  462. #endif
  463. #ifdef CONFIG_40x
  464. .globl ret_from_crit_exc
  465. ret_from_crit_exc:
  466. lis r9,crit_srr0@ha;
  467. lwz r9,crit_srr0@l(r9);
  468. lis r10,crit_srr1@ha;
  469. lwz r10,crit_srr1@l(r10);
  470. mtspr SPRN_SRR0,r9;
  471. mtspr SPRN_SRR1,r10;
  472. RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
  473. _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
  474. #endif /* CONFIG_40x */
  475. #ifdef CONFIG_BOOKE
  476. .globl ret_from_crit_exc
  477. ret_from_crit_exc:
  478. RESTORE_xSRR(SRR0,SRR1);
  479. RESTORE_MMU_REGS;
  480. RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
  481. _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
  482. .globl ret_from_debug_exc
  483. ret_from_debug_exc:
  484. RESTORE_xSRR(SRR0,SRR1);
  485. RESTORE_xSRR(CSRR0,CSRR1);
  486. RESTORE_MMU_REGS;
  487. RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
  488. _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
  489. .globl ret_from_mcheck_exc
  490. ret_from_mcheck_exc:
  491. RESTORE_xSRR(SRR0,SRR1);
  492. RESTORE_xSRR(CSRR0,CSRR1);
  493. RESTORE_xSRR(DSRR0,DSRR1);
  494. RESTORE_MMU_REGS;
  495. RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
  496. _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
  497. #endif /* CONFIG_BOOKE */
  498. #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */