entry.S 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. /* -*- mode: asm -*-
  2. *
  3. * linux/arch/m68k/kernel/entry.S
  4. *
  5. * Copyright (C) 1991, 1992 Linus Torvalds
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file README.legal in the main directory of this archive
  9. * for more details.
  10. *
  11. * Linux/m68k support by Hamish Macdonald
  12. *
  13. * 68060 fixes by Jesper Skov
  14. *
  15. */
  16. /*
  17. * entry.S contains the system-call and fault low-level handling routines.
  18. * This also contains the timer-interrupt handler, as well as all interrupts
  19. * and faults that can result in a task-switch.
  20. *
  21. * NOTE: This code handles signal-recognition, which happens every time
  22. * after a timer-interrupt and after each system call.
  23. *
  24. */
  25. /*
  26. * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
  27. * all pointers that used to be 'current' are now entry
  28. * number 0 in the 'current_set' list.
  29. *
  30. * 6/05/00 RZ: addedd writeback completion after return from sighandler
  31. * for 68040
  32. */
  33. #include <linux/linkage.h>
  34. #include <asm/errno.h>
  35. #include <asm/setup.h>
  36. #include <asm/traps.h>
  37. #include <asm/unistd.h>
  38. #include <asm/asm-offsets.h>
  39. #include <asm/entry.h>
  40. .globl system_call, buserr, trap, resume
  41. .globl sys_call_table
  42. .globl __sys_fork, __sys_clone, __sys_vfork
  43. .globl bad_interrupt
  44. .globl auto_irqhandler_fixup
  45. .globl user_irqvec_fixup
  46. .text
  47. ENTRY(__sys_fork)
  48. SAVE_SWITCH_STACK
  49. jbsr sys_fork
  50. lea %sp@(24),%sp
  51. rts
  52. ENTRY(__sys_clone)
  53. SAVE_SWITCH_STACK
  54. pea %sp@(SWITCH_STACK_SIZE)
  55. jbsr m68k_clone
  56. lea %sp@(28),%sp
  57. rts
  58. ENTRY(__sys_vfork)
  59. SAVE_SWITCH_STACK
  60. jbsr sys_vfork
  61. lea %sp@(24),%sp
  62. rts
  63. ENTRY(__sys_clone3)
  64. SAVE_SWITCH_STACK
  65. pea %sp@(SWITCH_STACK_SIZE)
  66. jbsr m68k_clone3
  67. lea %sp@(28),%sp
  68. rts
  69. ENTRY(sys_sigreturn)
  70. SAVE_SWITCH_STACK
  71. movel %sp,%a1 | switch_stack pointer
  72. lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
  73. lea %sp@(-84),%sp | leave a gap
  74. movel %a1,%sp@-
  75. movel %a0,%sp@-
  76. jbsr do_sigreturn
  77. jra 1f | shared with rt_sigreturn()
  78. ENTRY(sys_rt_sigreturn)
  79. SAVE_SWITCH_STACK
  80. movel %sp,%a1 | switch_stack pointer
  81. lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
  82. lea %sp@(-84),%sp | leave a gap
  83. movel %a1,%sp@-
  84. movel %a0,%sp@-
  85. | stack contents:
  86. | [original pt_regs address] [original switch_stack address]
  87. | [gap] [switch_stack] [pt_regs] [exception frame]
  88. jbsr do_rt_sigreturn
  89. 1:
  90. | stack contents now:
  91. | [original pt_regs address] [original switch_stack address]
  92. | [unused part of the gap] [moved switch_stack] [moved pt_regs]
  93. | [replacement exception frame]
  94. | return value of do_{rt_,}sigreturn() points to moved switch_stack.
  95. movel %d0,%sp | discard the leftover junk
  96. RESTORE_SWITCH_STACK
  97. | stack contents now is just [syscall return address] [pt_regs] [frame]
  98. | return pt_regs.d0
  99. movel %sp@(PT_OFF_D0+4),%d0
  100. rts
  101. ENTRY(buserr)
  102. SAVE_ALL_INT
  103. GET_CURRENT(%d0)
  104. movel %sp,%sp@- | stack frame pointer argument
  105. jbsr buserr_c
  106. addql #4,%sp
  107. jra ret_from_exception
  108. ENTRY(trap)
  109. SAVE_ALL_INT
  110. GET_CURRENT(%d0)
  111. movel %sp,%sp@- | stack frame pointer argument
  112. jbsr trap_c
  113. addql #4,%sp
  114. jra ret_from_exception
  115. | After a fork we jump here directly from resume,
  116. | so that %d1 contains the previous task
  117. | schedule_tail now used regardless of CONFIG_SMP
  118. ENTRY(ret_from_fork)
  119. movel %d1,%sp@-
  120. jsr schedule_tail
  121. addql #4,%sp
  122. jra ret_from_exception
  123. ENTRY(ret_from_kernel_thread)
  124. | a3 contains the kernel thread payload, d7 - its argument
  125. movel %d1,%sp@-
  126. jsr schedule_tail
  127. movel %d7,(%sp)
  128. jsr %a3@
  129. addql #4,%sp
  130. jra ret_from_exception
  131. #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
  132. #ifdef TRAP_DBG_INTERRUPT
  133. .globl dbginterrupt
  134. ENTRY(dbginterrupt)
  135. SAVE_ALL_INT
  136. GET_CURRENT(%d0)
  137. movel %sp,%sp@- /* stack frame pointer argument */
  138. jsr dbginterrupt_c
  139. addql #4,%sp
  140. jra ret_from_exception
  141. #endif
  142. ENTRY(reschedule)
  143. /* save top of frame */
  144. pea %sp@
  145. jbsr set_esp0
  146. addql #4,%sp
  147. pea ret_from_exception
  148. jmp schedule
  149. ENTRY(ret_from_user_signal)
  150. moveq #__NR_sigreturn,%d0
  151. trap #0
  152. ENTRY(ret_from_user_rt_signal)
  153. movel #__NR_rt_sigreturn,%d0
  154. trap #0
  155. #else
  156. do_trace_entry:
  157. movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
  158. subql #4,%sp
  159. SAVE_SWITCH_STACK
  160. jbsr syscall_trace_enter
  161. RESTORE_SWITCH_STACK
  162. addql #4,%sp
  163. addql #1,%d0 | optimization for cmpil #-1,%d0
  164. jeq ret_from_syscall
  165. movel %sp@(PT_OFF_ORIG_D0),%d0
  166. cmpl #NR_syscalls,%d0
  167. jcs syscall
  168. jra ret_from_syscall
  169. badsys:
  170. movel #-ENOSYS,%sp@(PT_OFF_D0)
  171. jra ret_from_syscall
  172. do_trace_exit:
  173. subql #4,%sp
  174. SAVE_SWITCH_STACK
  175. jbsr syscall_trace_leave
  176. RESTORE_SWITCH_STACK
  177. addql #4,%sp
  178. jra .Lret_from_exception
  179. ENTRY(system_call)
  180. SAVE_ALL_SYS
  181. GET_CURRENT(%d1)
  182. movel %d1,%a1
  183. | save top of frame
  184. movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
  185. | syscall trace?
  186. tstb %a1@(TINFO_FLAGS+2)
  187. jmi do_trace_entry
  188. cmpl #NR_syscalls,%d0
  189. jcc badsys
  190. syscall:
  191. jbsr @(sys_call_table,%d0:l:4)@(0)
  192. movel %d0,%sp@(PT_OFF_D0) | save the return value
  193. ret_from_syscall:
  194. |oriw #0x0700,%sr
  195. movel %curptr@(TASK_STACK),%a1
  196. movew %a1@(TINFO_FLAGS+2),%d0
  197. jne syscall_exit_work
  198. 1: RESTORE_ALL
  199. syscall_exit_work:
  200. btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
  201. bnes 1b | if so, skip resched, signals
  202. lslw #1,%d0
  203. jcs do_trace_exit
  204. jmi do_delayed_trace
  205. lslw #8,%d0
  206. jne do_signal_return
  207. pea resume_userspace
  208. jra schedule
  209. ENTRY(ret_from_exception)
  210. .Lret_from_exception:
  211. btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
  212. bnes 1f | if so, skip resched, signals
  213. | only allow interrupts when we are really the last one on the
  214. | kernel stack, otherwise stack overflow can occur during
  215. | heavy interrupt load
  216. andw #ALLOWINT,%sr
  217. resume_userspace:
  218. movel %curptr@(TASK_STACK),%a1
  219. moveb %a1@(TINFO_FLAGS+3),%d0
  220. jne exit_work
  221. 1: RESTORE_ALL
  222. exit_work:
  223. | save top of frame
  224. movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
  225. lslb #1,%d0
  226. jne do_signal_return
  227. pea resume_userspace
  228. jra schedule
  229. do_signal_return:
  230. |andw #ALLOWINT,%sr
  231. subql #4,%sp | dummy return address
  232. SAVE_SWITCH_STACK
  233. pea %sp@(SWITCH_STACK_SIZE)
  234. bsrl do_notify_resume
  235. addql #4,%sp
  236. RESTORE_SWITCH_STACK
  237. addql #4,%sp
  238. jbra resume_userspace
  239. do_delayed_trace:
  240. bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
  241. pea 1 | send SIGTRAP
  242. movel %curptr,%sp@-
  243. pea LSIGTRAP
  244. jbsr send_sig
  245. addql #8,%sp
  246. addql #4,%sp
  247. jbra resume_userspace
  248. /* This is the main interrupt handler for autovector interrupts */
  249. ENTRY(auto_inthandler)
  250. SAVE_ALL_INT
  251. GET_CURRENT(%d0)
  252. | put exception # in d0
  253. bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
  254. subw #VEC_SPUR,%d0
  255. movel %sp,%sp@-
  256. movel %d0,%sp@- | put vector # on stack
  257. auto_irqhandler_fixup = . + 2
  258. jsr do_IRQ | process the IRQ
  259. addql #8,%sp | pop parameters off stack
  260. jra ret_from_exception
  261. /* Handler for user defined interrupt vectors */
  262. ENTRY(user_inthandler)
  263. SAVE_ALL_INT
  264. GET_CURRENT(%d0)
  265. | put exception # in d0
  266. bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
  267. user_irqvec_fixup = . + 2
  268. subw #VEC_USER,%d0
  269. movel %sp,%sp@-
  270. movel %d0,%sp@- | put vector # on stack
  271. jsr do_IRQ | process the IRQ
  272. addql #8,%sp | pop parameters off stack
  273. jra ret_from_exception
  274. /* Handler for uninitialized and spurious interrupts */
  275. ENTRY(bad_inthandler)
  276. SAVE_ALL_INT
  277. GET_CURRENT(%d0)
  278. movel %sp,%sp@-
  279. jsr handle_badint
  280. addql #4,%sp
  281. jra ret_from_exception
  282. resume:
  283. /*
  284. * Beware - when entering resume, prev (the current task) is
  285. * in a0, next (the new task) is in a1,so don't change these
  286. * registers until their contents are no longer needed.
  287. */
  288. /* save sr */
  289. movew %sr,%a0@(TASK_THREAD+THREAD_SR)
  290. /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
  291. movec %sfc,%d0
  292. movew %d0,%a0@(TASK_THREAD+THREAD_FC)
  293. /* save usp */
  294. /* it is better to use a movel here instead of a movew 8*) */
  295. movec %usp,%d0
  296. movel %d0,%a0@(TASK_THREAD+THREAD_USP)
  297. /* save non-scratch registers on stack */
  298. SAVE_SWITCH_STACK
  299. /* save current kernel stack pointer */
  300. movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
  301. /* save floating point context */
  302. #ifndef CONFIG_M68KFPU_EMU_ONLY
  303. #ifdef CONFIG_M68KFPU_EMU
  304. tstl m68k_fputype
  305. jeq 3f
  306. #endif
  307. fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
  308. #if defined(CONFIG_M68060)
  309. #if !defined(CPU_M68060_ONLY)
  310. btst #3,m68k_cputype+3
  311. beqs 1f
  312. #endif
  313. /* The 060 FPU keeps status in bits 15-8 of the first longword */
  314. tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
  315. jeq 3f
  316. #if !defined(CPU_M68060_ONLY)
  317. jra 2f
  318. #endif
  319. #endif /* CONFIG_M68060 */
  320. #if !defined(CPU_M68060_ONLY)
  321. 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
  322. jeq 3f
  323. #endif
  324. 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
  325. fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
  326. 3:
  327. #endif /* CONFIG_M68KFPU_EMU_ONLY */
  328. /* Return previous task in %d1 */
  329. movel %curptr,%d1
  330. /* switch to new task (a1 contains new task) */
  331. movel %a1,%curptr
  332. /* restore floating point context */
  333. #ifndef CONFIG_M68KFPU_EMU_ONLY
  334. #ifdef CONFIG_M68KFPU_EMU
  335. tstl m68k_fputype
  336. jeq 4f
  337. #endif
  338. #if defined(CONFIG_M68060)
  339. #if !defined(CPU_M68060_ONLY)
  340. btst #3,m68k_cputype+3
  341. beqs 1f
  342. #endif
  343. /* The 060 FPU keeps status in bits 15-8 of the first longword */
  344. tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
  345. jeq 3f
  346. #if !defined(CPU_M68060_ONLY)
  347. jra 2f
  348. #endif
  349. #endif /* CONFIG_M68060 */
  350. #if !defined(CPU_M68060_ONLY)
  351. 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
  352. jeq 3f
  353. #endif
  354. 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
  355. fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
  356. 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
  357. 4:
  358. #endif /* CONFIG_M68KFPU_EMU_ONLY */
  359. /* restore the kernel stack pointer */
  360. movel %a1@(TASK_THREAD+THREAD_KSP),%sp
  361. /* restore non-scratch registers */
  362. RESTORE_SWITCH_STACK
  363. /* restore user stack pointer */
  364. movel %a1@(TASK_THREAD+THREAD_USP),%a0
  365. movel %a0,%usp
  366. /* restore fs (sfc,%dfc) */
  367. movew %a1@(TASK_THREAD+THREAD_FC),%a0
  368. movec %a0,%sfc
  369. movec %a0,%dfc
  370. /* restore status register */
  371. movew %a1@(TASK_THREAD+THREAD_SR),%sr
  372. rts
  373. #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */