etrap_32.S 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * etrap.S: Sparc trap window preparation for entry into the
  4. * Linux kernel.
  5. *
  6. * Copyright (C) 1995 David S. Miller ([email protected])
  7. */
  8. #include <asm/head.h>
  9. #include <asm/asi.h>
  10. #include <asm/contregs.h>
  11. #include <asm/page.h>
  12. #include <asm/psr.h>
  13. #include <asm/ptrace.h>
  14. #include <asm/winmacro.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/thread_info.h>
  17. /* Registers to not touch at all. */
  18. #define t_psr l0 /* Set by caller */
  19. #define t_pc l1 /* Set by caller */
  20. #define t_npc l2 /* Set by caller */
  21. #define t_wim l3 /* Set by caller */
  22. #define t_twinmask l4 /* Set at beginning of this entry routine. */
  23. #define t_kstack l5 /* Set right before pt_regs frame is built */
  24. #define t_retpc l6 /* If you change this, change winmacro.h header file */
  25. #define t_systable l7 /* Never touch this, could be the syscall table ptr. */
  26. #define curptr g6 /* Set after pt_regs frame is built */
  27. .text
  28. .align 4
  29. /* SEVEN WINDOW PATCH INSTRUCTIONS */
  30. .globl tsetup_7win_patch1, tsetup_7win_patch2
  31. .globl tsetup_7win_patch3, tsetup_7win_patch4
  32. .globl tsetup_7win_patch5, tsetup_7win_patch6
  33. tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
  34. tsetup_7win_patch2: and %g2, 0x7f, %g2
  35. tsetup_7win_patch3: and %g2, 0x7f, %g2
  36. tsetup_7win_patch4: and %g1, 0x7f, %g1
  37. tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
  38. tsetup_7win_patch6: and %g2, 0x7f, %g2
  39. /* END OF PATCH INSTRUCTIONS */
  40. /* At trap time, interrupts and all generic traps do the
  41. * following:
  42. *
  43. * rd %psr, %l0
  44. * b some_handler
  45. * rd %wim, %l3
  46. * nop
  47. *
  48. * Then 'some_handler' if it needs a trap frame (ie. it has
  49. * to call c-code and the trap cannot be handled in-window)
  50. * then it does the SAVE_ALL macro in entry.S which does
  51. *
  52. * sethi %hi(trap_setup), %l4
  53. * jmpl %l4 + %lo(trap_setup), %l6
  54. * nop
  55. */
  56. /* 2 3 4 window number
  57. * -----
  58. * O T S mnemonic
  59. *
  60. * O == Current window before trap
  61. * T == Window entered when trap occurred
  62. * S == Window we will need to save if (1<<T) == %wim
  63. *
  64. * Before execution gets here, it must be guaranteed that
  65. * %l0 contains trap time %psr, %l1 and %l2 contain the
  66. * trap pc and npc, and %l3 contains the trap time %wim.
  67. */
  68. .globl trap_setup, tsetup_patch1, tsetup_patch2
  69. .globl tsetup_patch3, tsetup_patch4
  70. .globl tsetup_patch5, tsetup_patch6
  71. trap_setup:
  72. /* Calculate mask of trap window. See if from user
  73. * or kernel and branch conditionally.
  74. */
  75. mov 1, %t_twinmask
  76. andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
  77. be trap_setup_from_user ! nope, from user mode
  78. sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
  79. /* From kernel, allocate more kernel stack and
  80. * build a pt_regs trap frame.
  81. */
  82. sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
  83. STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
  84. /* See if we are in the trap window. */
  85. andcc %t_twinmask, %t_wim, %g0
  86. bne trap_setup_kernel_spill ! in trap window, clean up
  87. nop
  88. /* Trap from kernel with a window available.
  89. * Just do it...
  90. */
  91. jmpl %t_retpc + 0x8, %g0 ! return to caller
  92. mov %t_kstack, %sp ! jump onto new stack
  93. trap_setup_kernel_spill:
  94. ld [%curptr + TI_UWINMASK], %g1
  95. orcc %g0, %g1, %g0
  96. bne trap_setup_user_spill ! there are some user windows, yuck
  97. /* Spill from kernel, but only kernel windows, adjust
  98. * %wim and go.
  99. */
  100. srl %t_wim, 0x1, %g2 ! begin computation of new %wim
  101. tsetup_patch1:
  102. sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
  103. or %t_wim, %g2, %g2
  104. tsetup_patch2:
  105. and %g2, 0xff, %g2 ! patched on 7 window Sparcs
  106. save %g0, %g0, %g0
  107. /* Set new %wim value */
  108. wr %g2, 0x0, %wim
  109. /* Save the kernel window onto the corresponding stack. */
  110. STORE_WINDOW(sp)
  111. restore %g0, %g0, %g0
  112. jmpl %t_retpc + 0x8, %g0 ! return to caller
  113. mov %t_kstack, %sp ! and onto new kernel stack
  114. #define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
  115. trap_setup_from_user:
  116. /* We can't use %curptr yet. */
  117. LOAD_CURRENT(t_kstack, t_twinmask)
  118. sethi %hi(STACK_OFFSET), %t_twinmask
  119. or %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
  120. add %t_kstack, %t_twinmask, %t_kstack
  121. mov 1, %t_twinmask
  122. sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
  123. /* Build pt_regs frame. */
  124. STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
  125. #if 0
  126. /* If we're sure every task_struct is THREAD_SIZE aligned,
  127. we can speed this up. */
  128. sethi %hi(STACK_OFFSET), %curptr
  129. or %curptr, %lo(STACK_OFFSET), %curptr
  130. sub %t_kstack, %curptr, %curptr
  131. #else
  132. sethi %hi(~(THREAD_SIZE - 1)), %curptr
  133. and %t_kstack, %curptr, %curptr
  134. #endif
  135. /* Clear current_thread_info->w_saved */
  136. st %g0, [%curptr + TI_W_SAVED]
  137. /* See if we are in the trap window. */
  138. andcc %t_twinmask, %t_wim, %g0
  139. bne trap_setup_user_spill ! yep we are
  140. orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
  141. /* Trap from user, but not into the invalid window.
  142. * Calculate new umask. The way this works is,
  143. * any window from the %wim at trap time until
  144. * the window right before the one we are in now,
  145. * is a user window. A diagram:
  146. *
  147. * 7 6 5 4 3 2 1 0 window number
  148. * ---------------
  149. * I L T mnemonic
  150. *
  151. * Window 'I' is the invalid window in our example,
  152. * window 'L' is the window the user was in when
  153. * the trap occurred, window T is the trap window
  154. * we are in now. So therefore, windows 5, 4 and
  155. * 3 are user windows. The following sequence
  156. * computes the user winmask to represent this.
  157. */
  158. subcc %t_wim, %t_twinmask, %g2
  159. bneg,a 1f
  160. sub %g2, 0x1, %g2
  161. 1:
  162. andn %g2, %t_twinmask, %g2
  163. tsetup_patch3:
  164. and %g2, 0xff, %g2 ! patched on 7win Sparcs
  165. st %g2, [%curptr + TI_UWINMASK] ! store new umask
  166. jmpl %t_retpc + 0x8, %g0 ! return to caller
  167. mov %t_kstack, %sp ! and onto kernel stack
  168. trap_setup_user_spill:
  169. /* A spill occurred from either kernel or user mode
  170. * and there exist some user windows to deal with.
  171. * A mask of the currently valid user windows
  172. * is in %g1 upon entry to here.
  173. */
  174. tsetup_patch4:
  175. and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
  176. srl %t_wim, 0x1, %g2 ! compute new %wim
  177. tsetup_patch5:
  178. sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
  179. or %t_wim, %g2, %g2 ! %g2 is new %wim
  180. tsetup_patch6:
  181. and %g2, 0xff, %g2 ! patched on 7win Sparcs
  182. andn %g1, %g2, %g1 ! clear this bit in %g1
  183. st %g1, [%curptr + TI_UWINMASK]
  184. save %g0, %g0, %g0
  185. wr %g2, 0x0, %wim
  186. /* Call MMU-architecture dependent stack checking
  187. * routine.
  188. */
  189. b tsetup_srmmu_stackchk
  190. andcc %sp, 0x7, %g0
  191. /* Architecture specific stack checking routines. When either
  192. * of these routines are called, the globals are free to use
  193. * as they have been safely stashed on the new kernel stack
  194. * pointer. Thus the definition below for simplicity.
  195. */
  196. #define glob_tmp g1
  197. .globl tsetup_srmmu_stackchk
  198. tsetup_srmmu_stackchk:
  199. /* Check results of callers andcc %sp, 0x7, %g0 */
  200. bne trap_setup_user_stack_is_bolixed
  201. sethi %hi(PAGE_OFFSET), %glob_tmp
  202. cmp %glob_tmp, %sp
  203. bleu,a 1f
  204. LEON_PI( lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
  205. SUN_PI_( lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
  206. trap_setup_user_stack_is_bolixed:
  207. /* From user/kernel into invalid window w/bad user
  208. * stack. Save bad user stack, and return to caller.
  209. */
  210. SAVE_BOLIXED_USER_STACK(curptr, g3)
  211. restore %g0, %g0, %g0
  212. jmpl %t_retpc + 0x8, %g0
  213. mov %t_kstack, %sp
  214. 1:
  215. /* Clear the fault status and turn on the no_fault bit. */
  216. or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
  217. LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
  218. SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
  219. /* Dump the registers and cross fingers. */
  220. STORE_WINDOW(sp)
  221. /* Clear the no_fault bit and check the status. */
  222. andn %glob_tmp, 0x2, %glob_tmp
  223. LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
  224. SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
  225. mov AC_M_SFAR, %glob_tmp
  226. LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
  227. SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
  228. mov AC_M_SFSR, %glob_tmp
  229. LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
  230. SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) ! save away status of winstore
  231. andcc %glob_tmp, 0x2, %g0 ! did we fault?
  232. bne trap_setup_user_stack_is_bolixed ! failure
  233. nop
  234. restore %g0, %g0, %g0
  235. jmpl %t_retpc + 0x8, %g0
  236. mov %t_kstack, %sp