interrupt_64.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. #include <asm/asm-offsets.h>
  2. #include <asm/bug.h>
  3. #ifdef CONFIG_PPC_BOOK3S
  4. #include <asm/exception-64s.h>
  5. #else
  6. #include <asm/exception-64e.h>
  7. #endif
  8. #include <asm/feature-fixups.h>
  9. #include <asm/head-64.h>
  10. #include <asm/hw_irq.h>
  11. #include <asm/kup.h>
  12. #include <asm/mmu.h>
  13. #include <asm/ppc_asm.h>
  14. #include <asm/ptrace.h>
  15. .align 7
  16. .macro DEBUG_SRR_VALID srr
  17. #ifdef CONFIG_PPC_RFI_SRR_DEBUG
  18. .ifc \srr,srr
  19. mfspr r11,SPRN_SRR0
  20. ld r12,_NIP(r1)
  21. clrrdi r11,r11,2
  22. clrrdi r12,r12,2
  23. 100: tdne r11,r12
  24. EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
  25. mfspr r11,SPRN_SRR1
  26. ld r12,_MSR(r1)
  27. 100: tdne r11,r12
  28. EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
  29. .else
  30. mfspr r11,SPRN_HSRR0
  31. ld r12,_NIP(r1)
  32. clrrdi r11,r11,2
  33. clrrdi r12,r12,2
  34. 100: tdne r11,r12
  35. EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
  36. mfspr r11,SPRN_HSRR1
  37. ld r12,_MSR(r1)
  38. 100: tdne r11,r12
  39. EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
  40. .endif
  41. #endif
  42. .endm
  43. #ifdef CONFIG_PPC_BOOK3S
  44. .macro system_call_vectored name trapnr
  45. .globl system_call_vectored_\name
  46. system_call_vectored_\name:
  47. _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
  48. SCV_INTERRUPT_TO_KERNEL
  49. mr r10,r1
  50. ld r1,PACAKSAVE(r13)
  51. std r10,0(r1)
  52. std r11,_NIP(r1)
  53. std r12,_MSR(r1)
  54. std r0,GPR0(r1)
  55. std r10,GPR1(r1)
  56. std r2,GPR2(r1)
  57. LOAD_PACA_TOC()
  58. mfcr r12
  59. li r11,0
  60. /* Save syscall parameters in r3-r8 */
  61. SAVE_GPRS(3, 8, r1)
  62. /* Zero r9-r12, this should only be required when restoring all GPRs */
  63. std r11,GPR9(r1)
  64. std r11,GPR10(r1)
  65. std r11,GPR11(r1)
  66. std r11,GPR12(r1)
  67. std r9,GPR13(r1)
  68. SAVE_NVGPRS(r1)
  69. std r11,_XER(r1)
  70. std r11,_LINK(r1)
  71. std r11,_CTR(r1)
  72. li r11,\trapnr
  73. std r11,_TRAP(r1)
  74. std r12,_CCR(r1)
  75. std r3,ORIG_GPR3(r1)
  76. /* Calling convention has r3 = regs, r4 = orig r0 */
  77. addi r3,r1,STACK_FRAME_OVERHEAD
  78. mr r4,r0
  79. LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
  80. std r11,-16(r3) /* "regshere" marker */
  81. BEGIN_FTR_SECTION
  82. HMT_MEDIUM
  83. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  84. /*
  85. * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
  86. * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
  87. * and interrupts may be masked and pending already.
  88. * system_call_exception() will call trace_hardirqs_off() which means
  89. * interrupts could already have been blocked before trace_hardirqs_off,
  90. * but this is the best we can do.
  91. */
  92. bl system_call_exception
  93. .Lsyscall_vectored_\name\()_exit:
  94. addi r4,r1,STACK_FRAME_OVERHEAD
  95. li r5,1 /* scv */
  96. bl syscall_exit_prepare
  97. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  98. .Lsyscall_vectored_\name\()_rst_start:
  99. lbz r11,PACAIRQHAPPENED(r13)
  100. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  101. bne- syscall_vectored_\name\()_restart
  102. li r11,IRQS_ENABLED
  103. stb r11,PACAIRQSOFTMASK(r13)
  104. li r11,0
  105. stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
  106. ld r2,_CCR(r1)
  107. ld r4,_NIP(r1)
  108. ld r5,_MSR(r1)
  109. BEGIN_FTR_SECTION
  110. stdcx. r0,0,r1 /* to clear the reservation */
  111. END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  112. BEGIN_FTR_SECTION
  113. HMT_MEDIUM_LOW
  114. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  115. cmpdi r3,0
  116. bne .Lsyscall_vectored_\name\()_restore_regs
  117. /* rfscv returns with LR->NIA and CTR->MSR */
  118. mtlr r4
  119. mtctr r5
  120. /* Could zero these as per ABI, but we may consider a stricter ABI
  121. * which preserves these if libc implementations can benefit, so
  122. * restore them for now until further measurement is done. */
  123. REST_GPR(0, r1)
  124. REST_GPRS(4, 8, r1)
  125. /* Zero volatile regs that may contain sensitive kernel data */
  126. ZEROIZE_GPRS(9, 12)
  127. mtspr SPRN_XER,r0
  128. /*
  129. * We don't need to restore AMR on the way back to userspace for KUAP.
  130. * The value of AMR only matters while we're in the kernel.
  131. */
  132. mtcr r2
  133. REST_GPRS(2, 3, r1)
  134. REST_GPR(13, r1)
  135. REST_GPR(1, r1)
  136. RFSCV_TO_USER
  137. b . /* prevent speculative execution */
  138. .Lsyscall_vectored_\name\()_restore_regs:
  139. mtspr SPRN_SRR0,r4
  140. mtspr SPRN_SRR1,r5
  141. ld r3,_CTR(r1)
  142. ld r4,_LINK(r1)
  143. ld r5,_XER(r1)
  144. REST_NVGPRS(r1)
  145. REST_GPR(0, r1)
  146. mtcr r2
  147. mtctr r3
  148. mtlr r4
  149. mtspr SPRN_XER,r5
  150. REST_GPRS(2, 13, r1)
  151. REST_GPR(1, r1)
  152. RFI_TO_USER
  153. .Lsyscall_vectored_\name\()_rst_end:
  154. syscall_vectored_\name\()_restart:
  155. _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
  156. GET_PACA(r13)
  157. ld r1,PACA_EXIT_SAVE_R1(r13)
  158. LOAD_PACA_TOC()
  159. ld r3,RESULT(r1)
  160. addi r4,r1,STACK_FRAME_OVERHEAD
  161. li r11,IRQS_ALL_DISABLED
  162. stb r11,PACAIRQSOFTMASK(r13)
  163. bl syscall_exit_restart
  164. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  165. b .Lsyscall_vectored_\name\()_rst_start
  166. 1:
  167. SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
  168. RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
  169. .endm
  170. system_call_vectored common 0x3000
  171. /*
  172. * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
  173. * which is tested by system_call_exception when r0 is -1 (as set by vector
  174. * entry code).
  175. */
  176. system_call_vectored sigill 0x7ff0
  177. #endif /* CONFIG_PPC_BOOK3S */
  178. .balign IFETCH_ALIGN_BYTES
  179. .globl system_call_common_real
  180. system_call_common_real:
  181. _ASM_NOKPROBE_SYMBOL(system_call_common_real)
  182. ld r10,PACAKMSR(r13) /* get MSR value for kernel */
  183. mtmsrd r10
  184. .balign IFETCH_ALIGN_BYTES
  185. .globl system_call_common
  186. system_call_common:
  187. _ASM_NOKPROBE_SYMBOL(system_call_common)
  188. mr r10,r1
  189. ld r1,PACAKSAVE(r13)
  190. std r10,0(r1)
  191. std r11,_NIP(r1)
  192. std r12,_MSR(r1)
  193. std r0,GPR0(r1)
  194. std r10,GPR1(r1)
  195. std r2,GPR2(r1)
  196. #ifdef CONFIG_PPC_E500
  197. START_BTB_FLUSH_SECTION
  198. BTB_FLUSH(r10)
  199. END_BTB_FLUSH_SECTION
  200. #endif
  201. LOAD_PACA_TOC()
  202. mfcr r12
  203. li r11,0
  204. /* Save syscall parameters in r3-r8 */
  205. SAVE_GPRS(3, 8, r1)
  206. /* Zero r9-r12, this should only be required when restoring all GPRs */
  207. std r11,GPR9(r1)
  208. std r11,GPR10(r1)
  209. std r11,GPR11(r1)
  210. std r11,GPR12(r1)
  211. std r9,GPR13(r1)
  212. SAVE_NVGPRS(r1)
  213. std r11,_XER(r1)
  214. std r11,_CTR(r1)
  215. mflr r10
  216. /*
  217. * This clears CR0.SO (bit 28), which is the error indication on
  218. * return from this system call.
  219. */
  220. rldimi r12,r11,28,(63-28)
  221. li r11,0xc00
  222. std r10,_LINK(r1)
  223. std r11,_TRAP(r1)
  224. std r12,_CCR(r1)
  225. std r3,ORIG_GPR3(r1)
  226. /* Calling convention has r3 = regs, r4 = orig r0 */
  227. addi r3,r1,STACK_FRAME_OVERHEAD
  228. mr r4,r0
  229. LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
  230. std r11,-16(r3) /* "regshere" marker */
  231. #ifdef CONFIG_PPC_BOOK3S
  232. li r11,1
  233. stb r11,PACASRR_VALID(r13)
  234. #endif
  235. /*
  236. * We always enter kernel from userspace with irq soft-mask enabled and
  237. * nothing pending. system_call_exception() will call
  238. * trace_hardirqs_off().
  239. */
  240. li r11,IRQS_ALL_DISABLED
  241. stb r11,PACAIRQSOFTMASK(r13)
  242. #ifdef CONFIG_PPC_BOOK3S
  243. li r12,-1 /* Set MSR_EE and MSR_RI */
  244. mtmsrd r12,1
  245. #else
  246. wrteei 1
  247. #endif
  248. bl system_call_exception
  249. .Lsyscall_exit:
  250. addi r4,r1,STACK_FRAME_OVERHEAD
  251. li r5,0 /* !scv */
  252. bl syscall_exit_prepare
  253. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  254. #ifdef CONFIG_PPC_BOOK3S
  255. .Lsyscall_rst_start:
  256. lbz r11,PACAIRQHAPPENED(r13)
  257. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  258. bne- syscall_restart
  259. #endif
  260. li r11,IRQS_ENABLED
  261. stb r11,PACAIRQSOFTMASK(r13)
  262. li r11,0
  263. stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
  264. ld r2,_CCR(r1)
  265. ld r6,_LINK(r1)
  266. mtlr r6
  267. #ifdef CONFIG_PPC_BOOK3S
  268. lbz r4,PACASRR_VALID(r13)
  269. cmpdi r4,0
  270. bne 1f
  271. li r4,0
  272. stb r4,PACASRR_VALID(r13)
  273. #endif
  274. ld r4,_NIP(r1)
  275. ld r5,_MSR(r1)
  276. mtspr SPRN_SRR0,r4
  277. mtspr SPRN_SRR1,r5
  278. 1:
  279. DEBUG_SRR_VALID srr
  280. BEGIN_FTR_SECTION
  281. stdcx. r0,0,r1 /* to clear the reservation */
  282. END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  283. cmpdi r3,0
  284. bne .Lsyscall_restore_regs
  285. /* Zero volatile regs that may contain sensitive kernel data */
  286. ZEROIZE_GPR(0)
  287. ZEROIZE_GPRS(4, 12)
  288. mtctr r0
  289. mtspr SPRN_XER,r0
  290. .Lsyscall_restore_regs_cont:
  291. BEGIN_FTR_SECTION
  292. HMT_MEDIUM_LOW
  293. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  294. /*
  295. * We don't need to restore AMR on the way back to userspace for KUAP.
  296. * The value of AMR only matters while we're in the kernel.
  297. */
  298. mtcr r2
  299. REST_GPRS(2, 3, r1)
  300. REST_GPR(13, r1)
  301. REST_GPR(1, r1)
  302. RFI_TO_USER
  303. b . /* prevent speculative execution */
  304. .Lsyscall_restore_regs:
  305. ld r3,_CTR(r1)
  306. ld r4,_XER(r1)
  307. REST_NVGPRS(r1)
  308. mtctr r3
  309. mtspr SPRN_XER,r4
  310. REST_GPR(0, r1)
  311. REST_GPRS(4, 12, r1)
  312. b .Lsyscall_restore_regs_cont
  313. .Lsyscall_rst_end:
  314. #ifdef CONFIG_PPC_BOOK3S
  315. syscall_restart:
  316. _ASM_NOKPROBE_SYMBOL(syscall_restart)
  317. GET_PACA(r13)
  318. ld r1,PACA_EXIT_SAVE_R1(r13)
  319. LOAD_PACA_TOC()
  320. ld r3,RESULT(r1)
  321. addi r4,r1,STACK_FRAME_OVERHEAD
  322. li r11,IRQS_ALL_DISABLED
  323. stb r11,PACAIRQSOFTMASK(r13)
  324. bl syscall_exit_restart
  325. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  326. b .Lsyscall_rst_start
  327. 1:
  328. SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
  329. RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
  330. #endif
  331. /*
  332. * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
  333. * touched, no exit work created, then this can be used.
  334. */
  335. .balign IFETCH_ALIGN_BYTES
  336. .globl fast_interrupt_return_srr
  337. fast_interrupt_return_srr:
  338. _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
  339. kuap_check_amr r3, r4
  340. ld r5,_MSR(r1)
  341. andi. r0,r5,MSR_PR
  342. #ifdef CONFIG_PPC_BOOK3S
  343. beq 1f
  344. kuap_user_restore r3, r4
  345. b .Lfast_user_interrupt_return_srr
  346. 1: kuap_kernel_restore r3, r4
  347. andi. r0,r5,MSR_RI
  348. li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
  349. bne+ .Lfast_kernel_interrupt_return_srr
  350. addi r3,r1,STACK_FRAME_OVERHEAD
  351. bl unrecoverable_exception
  352. b . /* should not get here */
  353. #else
  354. bne .Lfast_user_interrupt_return_srr
  355. b .Lfast_kernel_interrupt_return_srr
  356. #endif
  357. .macro interrupt_return_macro srr
  358. .balign IFETCH_ALIGN_BYTES
  359. .globl interrupt_return_\srr
  360. interrupt_return_\srr\():
  361. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
  362. ld r4,_MSR(r1)
  363. andi. r0,r4,MSR_PR
  364. beq interrupt_return_\srr\()_kernel
  365. interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
  366. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
  367. addi r3,r1,STACK_FRAME_OVERHEAD
  368. bl interrupt_exit_user_prepare
  369. cmpdi r3,0
  370. bne- .Lrestore_nvgprs_\srr
  371. .Lrestore_nvgprs_\srr\()_cont:
  372. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  373. #ifdef CONFIG_PPC_BOOK3S
  374. .Linterrupt_return_\srr\()_user_rst_start:
  375. lbz r11,PACAIRQHAPPENED(r13)
  376. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  377. bne- interrupt_return_\srr\()_user_restart
  378. #endif
  379. li r11,IRQS_ENABLED
  380. stb r11,PACAIRQSOFTMASK(r13)
  381. li r11,0
  382. stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
  383. .Lfast_user_interrupt_return_\srr\():
  384. #ifdef CONFIG_PPC_BOOK3S
  385. .ifc \srr,srr
  386. lbz r4,PACASRR_VALID(r13)
  387. .else
  388. lbz r4,PACAHSRR_VALID(r13)
  389. .endif
  390. cmpdi r4,0
  391. li r4,0
  392. bne 1f
  393. #endif
  394. ld r11,_NIP(r1)
  395. ld r12,_MSR(r1)
  396. .ifc \srr,srr
  397. mtspr SPRN_SRR0,r11
  398. mtspr SPRN_SRR1,r12
  399. 1:
  400. #ifdef CONFIG_PPC_BOOK3S
  401. stb r4,PACASRR_VALID(r13)
  402. #endif
  403. .else
  404. mtspr SPRN_HSRR0,r11
  405. mtspr SPRN_HSRR1,r12
  406. 1:
  407. #ifdef CONFIG_PPC_BOOK3S
  408. stb r4,PACAHSRR_VALID(r13)
  409. #endif
  410. .endif
  411. DEBUG_SRR_VALID \srr
  412. #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
  413. lbz r4,PACAIRQSOFTMASK(r13)
  414. tdnei r4,IRQS_ENABLED
  415. #endif
  416. BEGIN_FTR_SECTION
  417. ld r10,_PPR(r1)
  418. mtspr SPRN_PPR,r10
  419. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  420. BEGIN_FTR_SECTION
  421. stdcx. r0,0,r1 /* to clear the reservation */
  422. FTR_SECTION_ELSE
  423. ldarx r0,0,r1
  424. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  425. ld r3,_CCR(r1)
  426. ld r4,_LINK(r1)
  427. ld r5,_CTR(r1)
  428. ld r6,_XER(r1)
  429. li r0,0
  430. REST_GPRS(7, 13, r1)
  431. mtcr r3
  432. mtlr r4
  433. mtctr r5
  434. mtspr SPRN_XER,r6
  435. REST_GPRS(2, 6, r1)
  436. REST_GPR(0, r1)
  437. REST_GPR(1, r1)
  438. .ifc \srr,srr
  439. RFI_TO_USER
  440. .else
  441. HRFI_TO_USER
  442. .endif
  443. b . /* prevent speculative execution */
  444. .Linterrupt_return_\srr\()_user_rst_end:
  445. .Lrestore_nvgprs_\srr\():
  446. REST_NVGPRS(r1)
  447. b .Lrestore_nvgprs_\srr\()_cont
  448. #ifdef CONFIG_PPC_BOOK3S
  449. interrupt_return_\srr\()_user_restart:
  450. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
  451. GET_PACA(r13)
  452. ld r1,PACA_EXIT_SAVE_R1(r13)
  453. LOAD_PACA_TOC()
  454. addi r3,r1,STACK_FRAME_OVERHEAD
  455. li r11,IRQS_ALL_DISABLED
  456. stb r11,PACAIRQSOFTMASK(r13)
  457. bl interrupt_exit_user_restart
  458. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  459. b .Linterrupt_return_\srr\()_user_rst_start
  460. 1:
  461. SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
  462. RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
  463. #endif
  464. .balign IFETCH_ALIGN_BYTES
  465. interrupt_return_\srr\()_kernel:
  466. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
  467. addi r3,r1,STACK_FRAME_OVERHEAD
  468. bl interrupt_exit_kernel_prepare
  469. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  470. .Linterrupt_return_\srr\()_kernel_rst_start:
  471. ld r11,SOFTE(r1)
  472. cmpwi r11,IRQS_ENABLED
  473. stb r11,PACAIRQSOFTMASK(r13)
  474. beq .Linterrupt_return_\srr\()_soft_enabled
  475. /*
  476. * Returning to soft-disabled context.
  477. * Check if a MUST_HARD_MASK interrupt has become pending, in which
  478. * case we need to disable MSR[EE] in the return context.
  479. *
  480. * The MSR[EE] check catches among other things the short incoherency
  481. * in hard_irq_disable() between clearing MSR[EE] and setting
  482. * PACA_IRQ_HARD_DIS.
  483. */
  484. ld r12,_MSR(r1)
  485. andi. r10,r12,MSR_EE
  486. beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
  487. lbz r11,PACAIRQHAPPENED(r13)
  488. andi. r10,r11,PACA_IRQ_MUST_HARD_MASK
  489. bne 1f // HARD_MASK is pending
  490. // No HARD_MASK pending, clear possible HARD_DIS set by interrupt
  491. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  492. stb r11,PACAIRQHAPPENED(r13)
  493. b .Lfast_kernel_interrupt_return_\srr\()
  494. 1: /* Must clear MSR_EE from _MSR */
  495. #ifdef CONFIG_PPC_BOOK3S
  496. li r10,0
  497. /* Clear valid before changing _MSR */
  498. .ifc \srr,srr
  499. stb r10,PACASRR_VALID(r13)
  500. .else
  501. stb r10,PACAHSRR_VALID(r13)
  502. .endif
  503. #endif
  504. xori r12,r12,MSR_EE
  505. std r12,_MSR(r1)
  506. b .Lfast_kernel_interrupt_return_\srr\()
  507. .Linterrupt_return_\srr\()_soft_enabled:
  508. /*
  509. * In the soft-enabled case, need to double-check that we have no
  510. * pending interrupts that might have come in before we reached the
  511. * restart section of code, and restart the exit so those can be
  512. * handled.
  513. *
  514. * If there are none, it is be possible that the interrupt still
  515. * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
  516. * interrupted context. This clear will not clobber a new pending
  517. * interrupt coming in, because we're in the restart section, so
  518. * such would return to the restart location.
  519. */
  520. #ifdef CONFIG_PPC_BOOK3S
  521. lbz r11,PACAIRQHAPPENED(r13)
  522. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  523. bne- interrupt_return_\srr\()_kernel_restart
  524. #endif
  525. li r11,0
  526. stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
  527. .Lfast_kernel_interrupt_return_\srr\():
  528. cmpdi cr1,r3,0
  529. #ifdef CONFIG_PPC_BOOK3S
  530. .ifc \srr,srr
  531. lbz r4,PACASRR_VALID(r13)
  532. .else
  533. lbz r4,PACAHSRR_VALID(r13)
  534. .endif
  535. cmpdi r4,0
  536. li r4,0
  537. bne 1f
  538. #endif
  539. ld r11,_NIP(r1)
  540. ld r12,_MSR(r1)
  541. .ifc \srr,srr
  542. mtspr SPRN_SRR0,r11
  543. mtspr SPRN_SRR1,r12
  544. 1:
  545. #ifdef CONFIG_PPC_BOOK3S
  546. stb r4,PACASRR_VALID(r13)
  547. #endif
  548. .else
  549. mtspr SPRN_HSRR0,r11
  550. mtspr SPRN_HSRR1,r12
  551. 1:
  552. #ifdef CONFIG_PPC_BOOK3S
  553. stb r4,PACAHSRR_VALID(r13)
  554. #endif
  555. .endif
  556. DEBUG_SRR_VALID \srr
  557. BEGIN_FTR_SECTION
  558. stdcx. r0,0,r1 /* to clear the reservation */
  559. FTR_SECTION_ELSE
  560. ldarx r0,0,r1
  561. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  562. ld r3,_LINK(r1)
  563. ld r4,_CTR(r1)
  564. ld r5,_XER(r1)
  565. ld r6,_CCR(r1)
  566. li r0,0
  567. REST_GPRS(7, 12, r1)
  568. mtlr r3
  569. mtctr r4
  570. mtspr SPRN_XER,r5
  571. /*
  572. * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
  573. * the reliable stack unwinder later on. Clear it.
  574. */
  575. std r0,STACK_FRAME_OVERHEAD-16(r1)
  576. REST_GPRS(2, 5, r1)
  577. bne- cr1,1f /* emulate stack store */
  578. mtcr r6
  579. REST_GPR(6, r1)
  580. REST_GPR(0, r1)
  581. REST_GPR(1, r1)
  582. .ifc \srr,srr
  583. RFI_TO_KERNEL
  584. .else
  585. HRFI_TO_KERNEL
  586. .endif
  587. b . /* prevent speculative execution */
  588. 1: /*
  589. * Emulate stack store with update. New r1 value was already calculated
  590. * and updated in our interrupt regs by emulate_loadstore, but we can't
  591. * store the previous value of r1 to the stack before re-loading our
  592. * registers from it, otherwise they could be clobbered. Use
  593. * PACA_EXGEN as temporary storage to hold the store data, as
  594. * interrupts are disabled here so it won't be clobbered.
  595. */
  596. mtcr r6
  597. std r9,PACA_EXGEN+0(r13)
  598. addi r9,r1,INT_FRAME_SIZE /* get original r1 */
  599. REST_GPR(6, r1)
  600. REST_GPR(0, r1)
  601. REST_GPR(1, r1)
  602. std r9,0(r1) /* perform store component of stdu */
  603. ld r9,PACA_EXGEN+0(r13)
  604. .ifc \srr,srr
  605. RFI_TO_KERNEL
  606. .else
  607. HRFI_TO_KERNEL
  608. .endif
  609. b . /* prevent speculative execution */
  610. .Linterrupt_return_\srr\()_kernel_rst_end:
  611. #ifdef CONFIG_PPC_BOOK3S
  612. interrupt_return_\srr\()_kernel_restart:
  613. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
  614. GET_PACA(r13)
  615. ld r1,PACA_EXIT_SAVE_R1(r13)
  616. LOAD_PACA_TOC()
  617. addi r3,r1,STACK_FRAME_OVERHEAD
  618. li r11,IRQS_ALL_DISABLED
  619. stb r11,PACAIRQSOFTMASK(r13)
  620. bl interrupt_exit_kernel_restart
  621. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  622. b .Linterrupt_return_\srr\()_kernel_rst_start
  623. 1:
  624. SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
  625. RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
  626. #endif
  627. .endm
  628. interrupt_return_macro srr
  629. #ifdef CONFIG_PPC_BOOK3S
  630. interrupt_return_macro hsrr
  631. .globl __end_soft_masked
  632. __end_soft_masked:
  633. DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
  634. #endif /* CONFIG_PPC_BOOK3S */
  635. #ifdef CONFIG_PPC_BOOK3S
  636. _GLOBAL(ret_from_fork_scv)
  637. bl schedule_tail
  638. REST_NVGPRS(r1)
  639. li r3,0 /* fork() return value */
  640. b .Lsyscall_vectored_common_exit
  641. #endif
  642. _GLOBAL(ret_from_fork)
  643. bl schedule_tail
  644. REST_NVGPRS(r1)
  645. li r3,0 /* fork() return value */
  646. b .Lsyscall_exit
  647. _GLOBAL(ret_from_kernel_thread)
  648. bl schedule_tail
  649. REST_NVGPRS(r1)
  650. mtctr r14
  651. mr r3,r15
  652. #ifdef CONFIG_PPC64_ELF_ABI_V2
  653. mr r12,r14
  654. #endif
  655. bctrl
  656. li r3,0
  657. b .Lsyscall_exit