mca_asm.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * File: mca_asm.h
  4. * Purpose: Machine check handling specific defines
  5. *
  6. * Copyright (C) 1999 Silicon Graphics, Inc.
  7. * Copyright (C) Vijay Chander <[email protected]>
  8. * Copyright (C) Srinivasa Thirumalachar <[email protected]>
  9. * Copyright (C) 2000 Hewlett-Packard Co.
  10. * Copyright (C) 2000 David Mosberger-Tang <[email protected]>
  11. * Copyright (C) 2002 Intel Corp.
  12. * Copyright (C) 2002 Jenna Hall <[email protected]>
  13. * Copyright (C) 2005 Silicon Graphics, Inc
  14. * Copyright (C) 2005 Keith Owens <[email protected]>
  15. */
  16. #ifndef _ASM_IA64_MCA_ASM_H
  17. #define _ASM_IA64_MCA_ASM_H
  18. #include <asm/percpu.h>
  19. #define PSR_IC 13
  20. #define PSR_I 14
  21. #define PSR_DT 17
  22. #define PSR_RT 27
  23. #define PSR_MC 35
  24. #define PSR_IT 36
  25. #define PSR_BN 44
  26. /*
  27. * This macro converts a instruction virtual address to a physical address
  28. * Right now for simulation purposes the virtual addresses are
  29. * direct mapped to physical addresses.
  30. * 1. Lop off bits 61 thru 63 in the virtual address
  31. */
  32. #define INST_VA_TO_PA(addr) \
  33. dep addr = 0, addr, 61, 3
  34. /*
  35. * This macro converts a data virtual address to a physical address
  36. * Right now for simulation purposes the virtual addresses are
  37. * direct mapped to physical addresses.
  38. * 1. Lop off bits 61 thru 63 in the virtual address
  39. */
  40. #define DATA_VA_TO_PA(addr) \
  41. tpa addr = addr
  42. /*
  43. * This macro converts a data physical address to a virtual address
  44. * Right now for simulation purposes the virtual addresses are
  45. * direct mapped to physical addresses.
  46. * 1. Put 0x7 in bits 61 thru 63.
  47. */
  48. #define DATA_PA_TO_VA(addr,temp) \
  49. mov temp = 0x7 ;; \
  50. dep addr = temp, addr, 61, 3
  51. #define GET_THIS_PADDR(reg, var) \
  52. mov reg = IA64_KR(PER_CPU_DATA);; \
  53. addl reg = THIS_CPU(var), reg
  54. /*
  55. * This macro jumps to the instruction at the given virtual address
  56. * and starts execution in physical mode with all the address
  57. * translations turned off.
  58. * 1. Save the current psr
  59. * 2. Make sure that all the upper 32 bits are off
  60. *
  61. * 3. Clear the interrupt enable and interrupt state collection bits
  62. * in the psr before updating the ipsr and iip.
  63. *
  64. * 4. Turn off the instruction, data and rse translation bits of the psr
  65. * and store the new value into ipsr
  66. * Also make sure that the interrupts are disabled.
  67. * Ensure that we are in little endian mode.
  68. * [psr.{rt, it, dt, i, be} = 0]
  69. *
  70. * 5. Get the physical address corresponding to the virtual address
  71. * of the next instruction bundle and put it in iip.
  72. * (Using magic numbers 24 and 40 in the deposint instruction since
  73. * the IA64_SDK code directly maps to lower 24bits as physical address
  74. * from a virtual address).
  75. *
  76. * 6. Do an rfi to move the values from ipsr to psr and iip to ip.
  77. */
  78. #define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
  79. mov old_psr = psr; \
  80. ;; \
  81. dep old_psr = 0, old_psr, 32, 32; \
  82. \
  83. mov ar.rsc = 0 ; \
  84. ;; \
  85. srlz.d; \
  86. mov temp2 = ar.bspstore; \
  87. ;; \
  88. DATA_VA_TO_PA(temp2); \
  89. ;; \
  90. mov temp1 = ar.rnat; \
  91. ;; \
  92. mov ar.bspstore = temp2; \
  93. ;; \
  94. mov ar.rnat = temp1; \
  95. mov temp1 = psr; \
  96. mov temp2 = psr; \
  97. ;; \
  98. \
  99. dep temp2 = 0, temp2, PSR_IC, 2; \
  100. ;; \
  101. mov psr.l = temp2; \
  102. ;; \
  103. srlz.d; \
  104. dep temp1 = 0, temp1, 32, 32; \
  105. ;; \
  106. dep temp1 = 0, temp1, PSR_IT, 1; \
  107. ;; \
  108. dep temp1 = 0, temp1, PSR_DT, 1; \
  109. ;; \
  110. dep temp1 = 0, temp1, PSR_RT, 1; \
  111. ;; \
  112. dep temp1 = 0, temp1, PSR_I, 1; \
  113. ;; \
  114. dep temp1 = 0, temp1, PSR_IC, 1; \
  115. ;; \
  116. dep temp1 = -1, temp1, PSR_MC, 1; \
  117. ;; \
  118. mov cr.ipsr = temp1; \
  119. ;; \
  120. LOAD_PHYSICAL(p0, temp2, start_addr); \
  121. ;; \
  122. mov cr.iip = temp2; \
  123. mov cr.ifs = r0; \
  124. DATA_VA_TO_PA(sp); \
  125. DATA_VA_TO_PA(gp); \
  126. ;; \
  127. srlz.i; \
  128. ;; \
  129. nop 1; \
  130. nop 2; \
  131. nop 1; \
  132. nop 2; \
  133. rfi; \
  134. ;;
  135. /*
  136. * This macro jumps to the instruction at the given virtual address
  137. * and starts execution in virtual mode with all the address
  138. * translations turned on.
  139. * 1. Get the old saved psr
  140. *
  141. * 2. Clear the interrupt state collection bit in the current psr.
  142. *
  143. * 3. Set the instruction translation bit back in the old psr
  144. * Note we have to do this since we are right now saving only the
  145. * lower 32-bits of old psr.(Also the old psr has the data and
  146. * rse translation bits on)
  147. *
  148. * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
  149. *
  150. * 5. Reset the current thread pointer (r13).
  151. *
  152. * 6. Set iip to the virtual address of the next instruction bundle.
  153. *
  154. * 7. Do an rfi to move ipsr to psr and iip to ip.
  155. */
  156. #define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
  157. mov temp2 = psr; \
  158. ;; \
  159. mov old_psr = temp2; \
  160. ;; \
  161. dep temp2 = 0, temp2, PSR_IC, 2; \
  162. ;; \
  163. mov psr.l = temp2; \
  164. mov ar.rsc = 0; \
  165. ;; \
  166. srlz.d; \
  167. mov r13 = ar.k6; \
  168. mov temp2 = ar.bspstore; \
  169. ;; \
  170. DATA_PA_TO_VA(temp2,temp1); \
  171. ;; \
  172. mov temp1 = ar.rnat; \
  173. ;; \
  174. mov ar.bspstore = temp2; \
  175. ;; \
  176. mov ar.rnat = temp1; \
  177. ;; \
  178. mov temp1 = old_psr; \
  179. ;; \
  180. mov temp2 = 1; \
  181. ;; \
  182. dep temp1 = temp2, temp1, PSR_IC, 1; \
  183. ;; \
  184. dep temp1 = temp2, temp1, PSR_IT, 1; \
  185. ;; \
  186. dep temp1 = temp2, temp1, PSR_DT, 1; \
  187. ;; \
  188. dep temp1 = temp2, temp1, PSR_RT, 1; \
  189. ;; \
  190. dep temp1 = temp2, temp1, PSR_BN, 1; \
  191. ;; \
  192. \
  193. mov cr.ipsr = temp1; \
  194. movl temp2 = start_addr; \
  195. ;; \
  196. mov cr.iip = temp2; \
  197. movl gp = __gp \
  198. ;; \
  199. DATA_PA_TO_VA(sp, temp1); \
  200. srlz.i; \
  201. ;; \
  202. nop 1; \
  203. nop 2; \
  204. nop 1; \
  205. rfi \
  206. ;;
  207. /*
  208. * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
  209. * stacks, except that the SAL/OS state and a switch_stack are stored near the
  210. * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as
  211. * well as MCA over INIT, each event needs its own SAL/OS state. All entries
  212. * are 16 byte aligned.
  213. *
  214. * +---------------------------+
  215. * | pt_regs |
  216. * +---------------------------+
  217. * | switch_stack |
  218. * +---------------------------+
  219. * | SAL/OS state |
  220. * +---------------------------+
  221. * | 16 byte scratch area |
  222. * +---------------------------+ <-------- SP at start of C MCA handler
  223. * | ..... |
  224. * +---------------------------+
  225. * | RBS for MCA/INIT handler |
  226. * +---------------------------+
  227. * | struct task for MCA/INIT |
  228. * +---------------------------+ <-------- Bottom of MCA/INIT stack
  229. */
  230. #define ALIGN16(x) ((x)&~15)
  231. #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
  232. #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
  233. #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
  234. #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16)
  235. #endif /* _ASM_IA64_MCA_ASM_H */