exceptions-64e.S 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Boot code and exception vectors for Book3E processors
  4. *
  5. * Copyright (C) 2007 Ben. Herrenschmidt ([email protected]), IBM Corp.
  6. */
  7. #include <linux/threads.h>
  8. #include <asm/reg.h>
  9. #include <asm/page.h>
  10. #include <asm/ppc_asm.h>
  11. #include <asm/asm-offsets.h>
  12. #include <asm/cputable.h>
  13. #include <asm/setup.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/reg_a2.h>
  16. #include <asm/exception-64e.h>
  17. #include <asm/bug.h>
  18. #include <asm/irqflags.h>
  19. #include <asm/ptrace.h>
  20. #include <asm/ppc-opcode.h>
  21. #include <asm/mmu.h>
  22. #include <asm/hw_irq.h>
  23. #include <asm/kvm_asm.h>
  24. #include <asm/kvm_booke_hv_asm.h>
  25. #include <asm/feature-fixups.h>
  26. #include <asm/context_tracking.h>
  27. /* 64e interrupt returns always use SRR registers */
  28. #define fast_interrupt_return fast_interrupt_return_srr
  29. #define interrupt_return interrupt_return_srr
  30. /* XXX This will ultimately add space for a special exception save
  31. * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
  32. * when taking special interrupts. For now we don't support that,
  33. * special interrupts from within a non-standard level will probably
  34. * blow you up
  35. */
  36. #define SPECIAL_EXC_SRR0 0
  37. #define SPECIAL_EXC_SRR1 1
  38. #define SPECIAL_EXC_SPRG_GEN 2
  39. #define SPECIAL_EXC_SPRG_TLB 3
  40. #define SPECIAL_EXC_MAS0 4
  41. #define SPECIAL_EXC_MAS1 5
  42. #define SPECIAL_EXC_MAS2 6
  43. #define SPECIAL_EXC_MAS3 7
  44. #define SPECIAL_EXC_MAS6 8
  45. #define SPECIAL_EXC_MAS7 9
  46. #define SPECIAL_EXC_MAS5 10 /* E.HV only */
  47. #define SPECIAL_EXC_MAS8 11 /* E.HV only */
  48. #define SPECIAL_EXC_IRQHAPPENED 12
  49. #define SPECIAL_EXC_DEAR 13
  50. #define SPECIAL_EXC_ESR 14
  51. #define SPECIAL_EXC_SOFTE 15
  52. #define SPECIAL_EXC_CSRR0 16
  53. #define SPECIAL_EXC_CSRR1 17
  54. /* must be even to keep 16-byte stack alignment */
  55. #define SPECIAL_EXC_END 18
  56. #define SPECIAL_EXC_FRAME_SIZE (INT_FRAME_SIZE + SPECIAL_EXC_END * 8)
  57. #define SPECIAL_EXC_FRAME_OFFS (INT_FRAME_SIZE - 288)
  58. #define SPECIAL_EXC_STORE(reg, name) \
  59. std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
  60. #define SPECIAL_EXC_LOAD(reg, name) \
  61. ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
  62. special_reg_save:
  63. /*
  64. * We only need (or have stack space) to save this stuff if
  65. * we interrupted the kernel.
  66. */
  67. ld r3,_MSR(r1)
  68. andi. r3,r3,MSR_PR
  69. bnelr
  70. /*
  71. * Advance to the next TLB exception frame for handler
  72. * types that don't do it automatically.
  73. */
  74. LOAD_REG_ADDR(r11,extlb_level_exc)
  75. lwz r12,0(r11)
  76. mfspr r10,SPRN_SPRG_TLB_EXFRAME
  77. add r10,r10,r12
  78. mtspr SPRN_SPRG_TLB_EXFRAME,r10
  79. /*
  80. * Save registers needed to allow nesting of certain exceptions
  81. * (such as TLB misses) inside special exception levels
  82. */
  83. mfspr r10,SPRN_SRR0
  84. SPECIAL_EXC_STORE(r10,SRR0)
  85. mfspr r10,SPRN_SRR1
  86. SPECIAL_EXC_STORE(r10,SRR1)
  87. mfspr r10,SPRN_SPRG_GEN_SCRATCH
  88. SPECIAL_EXC_STORE(r10,SPRG_GEN)
  89. mfspr r10,SPRN_SPRG_TLB_SCRATCH
  90. SPECIAL_EXC_STORE(r10,SPRG_TLB)
  91. mfspr r10,SPRN_MAS0
  92. SPECIAL_EXC_STORE(r10,MAS0)
  93. mfspr r10,SPRN_MAS1
  94. SPECIAL_EXC_STORE(r10,MAS1)
  95. mfspr r10,SPRN_MAS2
  96. SPECIAL_EXC_STORE(r10,MAS2)
  97. mfspr r10,SPRN_MAS3
  98. SPECIAL_EXC_STORE(r10,MAS3)
  99. mfspr r10,SPRN_MAS6
  100. SPECIAL_EXC_STORE(r10,MAS6)
  101. mfspr r10,SPRN_MAS7
  102. SPECIAL_EXC_STORE(r10,MAS7)
  103. BEGIN_FTR_SECTION
  104. mfspr r10,SPRN_MAS5
  105. SPECIAL_EXC_STORE(r10,MAS5)
  106. mfspr r10,SPRN_MAS8
  107. SPECIAL_EXC_STORE(r10,MAS8)
  108. /* MAS5/8 could have inappropriate values if we interrupted KVM code */
  109. li r10,0
  110. mtspr SPRN_MAS5,r10
  111. mtspr SPRN_MAS8,r10
  112. END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
  113. mfspr r10,SPRN_DEAR
  114. SPECIAL_EXC_STORE(r10,DEAR)
  115. mfspr r10,SPRN_ESR
  116. SPECIAL_EXC_STORE(r10,ESR)
  117. ld r10,_NIP(r1)
  118. SPECIAL_EXC_STORE(r10,CSRR0)
  119. ld r10,_MSR(r1)
  120. SPECIAL_EXC_STORE(r10,CSRR1)
  121. blr
  122. ret_from_level_except:
  123. ld r3,_MSR(r1)
  124. andi. r3,r3,MSR_PR
  125. beq 1f
  126. REST_NVGPRS(r1)
  127. b interrupt_return
  128. 1:
  129. LOAD_REG_ADDR(r11,extlb_level_exc)
  130. lwz r12,0(r11)
  131. mfspr r10,SPRN_SPRG_TLB_EXFRAME
  132. sub r10,r10,r12
  133. mtspr SPRN_SPRG_TLB_EXFRAME,r10
  134. /*
  135. * It's possible that the special level exception interrupted a
  136. * TLB miss handler, and inserted the same entry that the
  137. * interrupted handler was about to insert. On CPUs without TLB
  138. * write conditional, this can result in a duplicate TLB entry.
  139. * Wipe all non-bolted entries to be safe.
  140. *
  141. * Note that this doesn't protect against any TLB misses
  142. * we may take accessing the stack from here to the end of
  143. * the special level exception. It's not clear how we can
  144. * reasonably protect against that, but only CPUs with
  145. * neither TLB write conditional nor bolted kernel memory
  146. * are affected. Do any such CPUs even exist?
  147. */
  148. PPC_TLBILX_ALL(0,R0)
  149. REST_NVGPRS(r1)
  150. SPECIAL_EXC_LOAD(r10,SRR0)
  151. mtspr SPRN_SRR0,r10
  152. SPECIAL_EXC_LOAD(r10,SRR1)
  153. mtspr SPRN_SRR1,r10
  154. SPECIAL_EXC_LOAD(r10,SPRG_GEN)
  155. mtspr SPRN_SPRG_GEN_SCRATCH,r10
  156. SPECIAL_EXC_LOAD(r10,SPRG_TLB)
  157. mtspr SPRN_SPRG_TLB_SCRATCH,r10
  158. SPECIAL_EXC_LOAD(r10,MAS0)
  159. mtspr SPRN_MAS0,r10
  160. SPECIAL_EXC_LOAD(r10,MAS1)
  161. mtspr SPRN_MAS1,r10
  162. SPECIAL_EXC_LOAD(r10,MAS2)
  163. mtspr SPRN_MAS2,r10
  164. SPECIAL_EXC_LOAD(r10,MAS3)
  165. mtspr SPRN_MAS3,r10
  166. SPECIAL_EXC_LOAD(r10,MAS6)
  167. mtspr SPRN_MAS6,r10
  168. SPECIAL_EXC_LOAD(r10,MAS7)
  169. mtspr SPRN_MAS7,r10
  170. BEGIN_FTR_SECTION
  171. SPECIAL_EXC_LOAD(r10,MAS5)
  172. mtspr SPRN_MAS5,r10
  173. SPECIAL_EXC_LOAD(r10,MAS8)
  174. mtspr SPRN_MAS8,r10
  175. END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
  176. SPECIAL_EXC_LOAD(r10,DEAR)
  177. mtspr SPRN_DEAR,r10
  178. SPECIAL_EXC_LOAD(r10,ESR)
  179. mtspr SPRN_ESR,r10
  180. stdcx. r0,0,r1 /* to clear the reservation */
  181. REST_GPRS(2, 9, r1)
  182. ld r10,_CTR(r1)
  183. ld r11,_XER(r1)
  184. mtctr r10
  185. mtxer r11
  186. blr
  187. .macro ret_from_level srr0 srr1 paca_ex scratch
  188. bl ret_from_level_except
  189. ld r10,_LINK(r1)
  190. ld r11,_CCR(r1)
  191. ld r0,GPR13(r1)
  192. mtlr r10
  193. mtcr r11
  194. REST_GPRS(10, 12, r1)
  195. mtspr \scratch,r0
  196. std r10,\paca_ex+EX_R10(r13);
  197. std r11,\paca_ex+EX_R11(r13);
  198. ld r10,_NIP(r1)
  199. ld r11,_MSR(r1)
  200. REST_GPR(0, r1)
  201. REST_GPR(1, r1)
  202. mtspr \srr0,r10
  203. mtspr \srr1,r11
  204. ld r10,\paca_ex+EX_R10(r13)
  205. ld r11,\paca_ex+EX_R11(r13)
  206. mfspr r13,\scratch
  207. .endm
  208. ret_from_crit_except:
  209. ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
  210. rfci
  211. ret_from_mc_except:
  212. ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
  213. rfmci
  214. /* Exception prolog code for all exceptions */
  215. #define EXCEPTION_PROLOG(n, intnum, type, addition) \
  216. mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \
  217. mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \
  218. std r10,PACA_EX##type+EX_R10(r13); \
  219. std r11,PACA_EX##type+EX_R11(r13); \
  220. mfcr r10; /* save CR */ \
  221. mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
  222. DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \
  223. stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
  224. addition; /* additional code for that exc. */ \
  225. std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \
  226. type##_SET_KSTACK; /* get special stack if necessary */\
  227. andi. r10,r11,MSR_PR; /* save stack pointer */ \
  228. beq 1f; /* branch around if supervisor */ \
  229. ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
  230. 1: type##_BTB_FLUSH \
  231. cmpdi cr1,r1,0; /* check if SP makes sense */ \
  232. bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
  233. mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
  234. /* Exception type-specific macros */
  235. #define GEN_SET_KSTACK \
  236. subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */
  237. #define SPRN_GEN_SRR0 SPRN_SRR0
  238. #define SPRN_GEN_SRR1 SPRN_SRR1
  239. #define GDBELL_SET_KSTACK GEN_SET_KSTACK
  240. #define SPRN_GDBELL_SRR0 SPRN_GSRR0
  241. #define SPRN_GDBELL_SRR1 SPRN_GSRR1
  242. #define CRIT_SET_KSTACK \
  243. ld r1,PACA_CRIT_STACK(r13); \
  244. subi r1,r1,SPECIAL_EXC_FRAME_SIZE
  245. #define SPRN_CRIT_SRR0 SPRN_CSRR0
  246. #define SPRN_CRIT_SRR1 SPRN_CSRR1
  247. #define DBG_SET_KSTACK \
  248. ld r1,PACA_DBG_STACK(r13); \
  249. subi r1,r1,SPECIAL_EXC_FRAME_SIZE
  250. #define SPRN_DBG_SRR0 SPRN_DSRR0
  251. #define SPRN_DBG_SRR1 SPRN_DSRR1
  252. #define MC_SET_KSTACK \
  253. ld r1,PACA_MC_STACK(r13); \
  254. subi r1,r1,SPECIAL_EXC_FRAME_SIZE
  255. #define SPRN_MC_SRR0 SPRN_MCSRR0
  256. #define SPRN_MC_SRR1 SPRN_MCSRR1
  257. #define GEN_BTB_FLUSH \
  258. START_BTB_FLUSH_SECTION \
  259. beq 1f; \
  260. BTB_FLUSH(r10) \
  261. 1: \
  262. END_BTB_FLUSH_SECTION
  263. #define CRIT_BTB_FLUSH \
  264. START_BTB_FLUSH_SECTION \
  265. BTB_FLUSH(r10) \
  266. END_BTB_FLUSH_SECTION
  267. #define DBG_BTB_FLUSH CRIT_BTB_FLUSH
  268. #define MC_BTB_FLUSH CRIT_BTB_FLUSH
  269. #define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
  270. #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
  271. EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
  272. #define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \
  273. EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
  274. #define DBG_EXCEPTION_PROLOG(n, intnum, addition) \
  275. EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
  276. #define MC_EXCEPTION_PROLOG(n, intnum, addition) \
  277. EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
  278. #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \
  279. EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
  280. /* Variants of the "addition" argument for the prolog
  281. */
  282. #define PROLOG_ADDITION_NONE_GEN(n)
  283. #define PROLOG_ADDITION_NONE_GDBELL(n)
  284. #define PROLOG_ADDITION_NONE_CRIT(n)
  285. #define PROLOG_ADDITION_NONE_DBG(n)
  286. #define PROLOG_ADDITION_NONE_MC(n)
  287. #define PROLOG_ADDITION_MASKABLE_GEN(n) \
  288. lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \
  289. andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
  290. bne masked_interrupt_book3e_##n
  291. /*
  292. * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is
  293. * called, because that does SAVE_NVGPRS which must see the original register
  294. * values, otherwise the scratch values might be restored when exiting the
  295. * interrupt.
  296. */
  297. #define PROLOG_ADDITION_2REGS_GEN(n) \
  298. std r14,PACA_EXGEN+EX_R14(r13); \
  299. std r15,PACA_EXGEN+EX_R15(r13)
  300. #define PROLOG_ADDITION_1REG_GEN(n) \
  301. std r14,PACA_EXGEN+EX_R14(r13);
  302. #define PROLOG_ADDITION_2REGS_CRIT(n) \
  303. std r14,PACA_EXCRIT+EX_R14(r13); \
  304. std r15,PACA_EXCRIT+EX_R15(r13)
  305. #define PROLOG_ADDITION_2REGS_DBG(n) \
  306. std r14,PACA_EXDBG+EX_R14(r13); \
  307. std r15,PACA_EXDBG+EX_R15(r13)
  308. #define PROLOG_ADDITION_2REGS_MC(n) \
  309. std r14,PACA_EXMC+EX_R14(r13); \
  310. std r15,PACA_EXMC+EX_R15(r13)
  311. /* Core exception code for all exceptions except TLB misses. */
  312. #define EXCEPTION_COMMON_LVL(n, scratch, excf) \
  313. exc_##n##_common: \
  314. SAVE_GPR(0, r1); /* save r0 in stackframe */ \
  315. SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \
  316. std r10,_NIP(r1); /* save SRR0 to stackframe */ \
  317. std r11,_MSR(r1); /* save SRR1 to stackframe */ \
  318. beq 2f; /* if from kernel mode */ \
  319. 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \
  320. ld r4,excf+EX_R11(r13); /* get back r11 */ \
  321. mfspr r5,scratch; /* get back r13 */ \
  322. SAVE_GPR(12, r1); /* save r12 in stackframe */ \
  323. LOAD_PACA_TOC(); /* get kernel TOC into r2 */ \
  324. mflr r6; /* save LR in stackframe */ \
  325. mfctr r7; /* save CTR in stackframe */ \
  326. mfspr r8,SPRN_XER; /* save XER in stackframe */ \
  327. ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \
  328. lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \
  329. lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \
  330. LOAD_REG_IMMEDIATE(r12, STACK_FRAME_REGS_MARKER); \
  331. ZEROIZE_GPR(0); \
  332. std r3,GPR10(r1); /* save r10 to stackframe */ \
  333. std r4,GPR11(r1); /* save r11 to stackframe */ \
  334. std r5,GPR13(r1); /* save it to stackframe */ \
  335. std r6,_LINK(r1); \
  336. std r7,_CTR(r1); \
  337. std r8,_XER(r1); \
  338. li r3,(n); /* regs.trap vector */ \
  339. std r9,0(r1); /* store stack frame back link */ \
  340. std r10,_CCR(r1); /* store orig CR in stackframe */ \
  341. std r9,GPR1(r1); /* store stack frame back link */ \
  342. std r11,SOFTE(r1); /* and save it to stackframe */ \
  343. std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
  344. std r3,_TRAP(r1); /* set trap number */ \
  345. std r0,RESULT(r1); /* clear regs->result */ \
  346. SAVE_NVGPRS(r1);
  347. #define EXCEPTION_COMMON(n) \
  348. EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
  349. #define EXCEPTION_COMMON_CRIT(n) \
  350. EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT)
  351. #define EXCEPTION_COMMON_MC(n) \
  352. EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC)
  353. #define EXCEPTION_COMMON_DBG(n) \
  354. EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)
  355. /* XXX FIXME: Restore r14/r15 when necessary */
  356. #define BAD_STACK_TRAMPOLINE(n) \
  357. exc_##n##_bad_stack: \
  358. li r1,(n); /* get exception number */ \
  359. sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \
  360. b bad_stack_book3e; /* bad stack error */
  361. /* WARNING: If you change the layout of this stub, make sure you check
  362. * the debug exception handler which handles single stepping
  363. * into exceptions from userspace, and the MM code in
  364. * arch/powerpc/mm/tlb_nohash.c which patches the branch here
  365. * and would need to be updated if that branch is moved
  366. */
  367. #define EXCEPTION_STUB(loc, label) \
  368. . = interrupt_base_book3e + loc; \
  369. nop; /* To make debug interrupts happy */ \
  370. b exc_##label##_book3e;
  371. #define ACK_NONE(r)
  372. #define ACK_DEC(r) \
  373. lis r,TSR_DIS@h; \
  374. mtspr SPRN_TSR,r
  375. #define ACK_FIT(r) \
  376. lis r,TSR_FIS@h; \
  377. mtspr SPRN_TSR,r
  378. /* Used by asynchronous interrupt that may happen in the idle loop.
  379. *
  380. * This check if the thread was in the idle loop, and if yes, returns
  381. * to the caller rather than the PC. This is to avoid a race if
  382. * interrupts happen before the wait instruction.
  383. */
  384. #define CHECK_NAPPING() \
  385. ld r11, PACA_THREAD_INFO(r13); \
  386. ld r10,TI_LOCAL_FLAGS(r11); \
  387. andi. r9,r10,_TLF_NAPPING; \
  388. beq+ 1f; \
  389. ld r8,_LINK(r1); \
  390. rlwinm r7,r10,0,~_TLF_NAPPING; \
  391. std r8,_NIP(r1); \
  392. std r7,TI_LOCAL_FLAGS(r11); \
  393. 1:
  394. #define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \
  395. START_EXCEPTION(label); \
  396. NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
  397. EXCEPTION_COMMON(trapnum) \
  398. ack(r8); \
  399. CHECK_NAPPING(); \
  400. addi r3,r1,STACK_FRAME_OVERHEAD; \
  401. bl hdlr; \
  402. b interrupt_return
  403. /*
  404. * And here we have the exception vectors !
  405. */
  406. .text
  407. .balign 0x1000
  408. .globl interrupt_base_book3e
  409. interrupt_base_book3e: /* fake trap */
  410. EXCEPTION_STUB(0x000, machine_check)
  411. EXCEPTION_STUB(0x020, critical_input) /* 0x0100 */
  412. EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */
  413. EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */
  414. EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */
  415. EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */
  416. EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */
  417. EXCEPTION_STUB(0x0e0, program) /* 0x0700 */
  418. EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */
  419. EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */
  420. EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */
  421. EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */
  422. EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */
  423. EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */
  424. EXCEPTION_STUB(0x1c0, data_tlb_miss)
  425. EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
  426. EXCEPTION_STUB(0x200, altivec_unavailable)
  427. EXCEPTION_STUB(0x220, altivec_assist)
  428. EXCEPTION_STUB(0x260, perfmon)
  429. EXCEPTION_STUB(0x280, doorbell)
  430. EXCEPTION_STUB(0x2a0, doorbell_crit)
  431. EXCEPTION_STUB(0x2c0, guest_doorbell)
  432. EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
  433. EXCEPTION_STUB(0x300, hypercall)
  434. EXCEPTION_STUB(0x320, ehpriv)
  435. EXCEPTION_STUB(0x340, lrat_error)
  436. .globl __end_interrupts
  437. __end_interrupts:
  438. /* Critical Input Interrupt */
  439. START_EXCEPTION(critical_input);
  440. CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
  441. PROLOG_ADDITION_NONE)
  442. EXCEPTION_COMMON_CRIT(0x100)
  443. bl special_reg_save
  444. CHECK_NAPPING();
  445. addi r3,r1,STACK_FRAME_OVERHEAD
  446. bl unknown_nmi_exception
  447. b ret_from_crit_except
  448. /* Machine Check Interrupt */
  449. START_EXCEPTION(machine_check);
  450. MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
  451. PROLOG_ADDITION_NONE)
  452. EXCEPTION_COMMON_MC(0x000)
  453. bl special_reg_save
  454. CHECK_NAPPING();
  455. addi r3,r1,STACK_FRAME_OVERHEAD
  456. bl machine_check_exception
  457. b ret_from_mc_except
  458. /* Data Storage Interrupt */
  459. START_EXCEPTION(data_storage)
  460. NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
  461. PROLOG_ADDITION_2REGS)
  462. mfspr r14,SPRN_DEAR
  463. mfspr r15,SPRN_ESR
  464. std r14,_DEAR(r1)
  465. std r15,_ESR(r1)
  466. ld r14,PACA_EXGEN+EX_R14(r13)
  467. ld r15,PACA_EXGEN+EX_R15(r13)
  468. EXCEPTION_COMMON(0x300)
  469. b storage_fault_common
  470. /* Instruction Storage Interrupt */
  471. START_EXCEPTION(instruction_storage);
  472. NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
  473. PROLOG_ADDITION_2REGS)
  474. li r15,0
  475. mr r14,r10
  476. std r14,_DEAR(r1)
  477. std r15,_ESR(r1)
  478. ld r14,PACA_EXGEN+EX_R14(r13)
  479. ld r15,PACA_EXGEN+EX_R15(r13)
  480. EXCEPTION_COMMON(0x400)
  481. b storage_fault_common
  482. /* External Input Interrupt */
  483. MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
  484. external_input, do_IRQ, ACK_NONE)
  485. /* Alignment */
  486. START_EXCEPTION(alignment);
  487. NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
  488. PROLOG_ADDITION_2REGS)
  489. mfspr r14,SPRN_DEAR
  490. mfspr r15,SPRN_ESR
  491. std r14,_DEAR(r1)
  492. std r15,_ESR(r1)
  493. ld r14,PACA_EXGEN+EX_R14(r13)
  494. ld r15,PACA_EXGEN+EX_R15(r13)
  495. EXCEPTION_COMMON(0x600)
  496. b alignment_more /* no room, go out of line */
  497. /* Program Interrupt */
  498. START_EXCEPTION(program);
  499. NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
  500. PROLOG_ADDITION_1REG)
  501. mfspr r14,SPRN_ESR
  502. std r14,_ESR(r1)
  503. ld r14,PACA_EXGEN+EX_R14(r13)
  504. EXCEPTION_COMMON(0x700)
  505. addi r3,r1,STACK_FRAME_OVERHEAD
  506. bl program_check_exception
  507. REST_NVGPRS(r1)
  508. b interrupt_return
  509. /* Floating Point Unavailable Interrupt */
  510. START_EXCEPTION(fp_unavailable);
  511. NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
  512. PROLOG_ADDITION_NONE)
  513. /* we can probably do a shorter exception entry for that one... */
  514. EXCEPTION_COMMON(0x800)
  515. ld r12,_MSR(r1)
  516. andi. r0,r12,MSR_PR;
  517. beq- 1f
  518. bl load_up_fpu
  519. b fast_interrupt_return
  520. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  521. bl kernel_fp_unavailable_exception
  522. b interrupt_return
  523. /* Altivec Unavailable Interrupt */
  524. START_EXCEPTION(altivec_unavailable);
  525. NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
  526. PROLOG_ADDITION_NONE)
  527. /* we can probably do a shorter exception entry for that one... */
  528. EXCEPTION_COMMON(0x200)
  529. #ifdef CONFIG_ALTIVEC
  530. BEGIN_FTR_SECTION
  531. ld r12,_MSR(r1)
  532. andi. r0,r12,MSR_PR;
  533. beq- 1f
  534. bl load_up_altivec
  535. b fast_interrupt_return
  536. 1:
  537. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  538. #endif
  539. addi r3,r1,STACK_FRAME_OVERHEAD
  540. bl altivec_unavailable_exception
  541. b interrupt_return
  542. /* AltiVec Assist */
  543. START_EXCEPTION(altivec_assist);
  544. NORMAL_EXCEPTION_PROLOG(0x220,
  545. BOOKE_INTERRUPT_ALTIVEC_ASSIST,
  546. PROLOG_ADDITION_NONE)
  547. EXCEPTION_COMMON(0x220)
  548. addi r3,r1,STACK_FRAME_OVERHEAD
  549. #ifdef CONFIG_ALTIVEC
  550. BEGIN_FTR_SECTION
  551. bl altivec_assist_exception
  552. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  553. REST_NVGPRS(r1)
  554. #else
  555. bl unknown_exception
  556. #endif
  557. b interrupt_return
  558. /* Decrementer Interrupt */
  559. MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
  560. decrementer, timer_interrupt, ACK_DEC)
  561. /* Fixed Interval Timer Interrupt */
  562. MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
  563. fixed_interval, unknown_exception, ACK_FIT)
  564. /* Watchdog Timer Interrupt */
  565. START_EXCEPTION(watchdog);
  566. CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
  567. PROLOG_ADDITION_NONE)
  568. EXCEPTION_COMMON_CRIT(0x9f0)
  569. bl special_reg_save
  570. CHECK_NAPPING();
  571. addi r3,r1,STACK_FRAME_OVERHEAD
  572. #ifdef CONFIG_BOOKE_WDT
  573. bl WatchdogException
  574. #else
  575. bl unknown_nmi_exception
  576. #endif
  577. b ret_from_crit_except
  578. /* System Call Interrupt */
  579. START_EXCEPTION(system_call)
  580. mr r9,r13 /* keep a copy of userland r13 */
  581. mfspr r11,SPRN_SRR0 /* get return address */
  582. mfspr r12,SPRN_SRR1 /* get previous MSR */
  583. mfspr r13,SPRN_SPRG_PACA /* get our PACA */
  584. b system_call_common
  585. /* Auxiliary Processor Unavailable Interrupt */
  586. START_EXCEPTION(ap_unavailable);
  587. NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
  588. PROLOG_ADDITION_NONE)
  589. EXCEPTION_COMMON(0xf20)
  590. addi r3,r1,STACK_FRAME_OVERHEAD
  591. bl unknown_exception
  592. b interrupt_return
  593. /* Debug exception as a critical interrupt*/
  594. START_EXCEPTION(debug_crit);
  595. CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
  596. PROLOG_ADDITION_2REGS)
  597. /*
  598. * If there is a single step or branch-taken exception in an
  599. * exception entry sequence, it was probably meant to apply to
  600. * the code where the exception occurred (since exception entry
  601. * doesn't turn off DE automatically). We simulate the effect
  602. * of turning off DE on entry to an exception handler by turning
  603. * off DE in the CSRR1 value and clearing the debug status.
  604. */
  605. mfspr r14,SPRN_DBSR /* check single-step/branch taken */
  606. andis. r15,r14,(DBSR_IC|DBSR_BT)@h
  607. beq+ 1f
  608. #ifdef CONFIG_RELOCATABLE
  609. __LOAD_PACA_TOC(r15)
  610. LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e)
  611. LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts)
  612. cmpld cr0,r10,r14
  613. cmpld cr1,r10,r15
  614. #else
  615. LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
  616. cmpld cr0, r10, r14
  617. LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts)
  618. cmpld cr1, r10, r14
  619. #endif
  620. blt+ cr0,1f
  621. bge+ cr1,1f
  622. /* here it looks like we got an inappropriate debug exception. */
  623. lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
  624. rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */
  625. mtspr SPRN_DBSR,r14
  626. mtspr SPRN_CSRR1,r11
  627. lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */
  628. ld r1,PACA_EXCRIT+EX_R1(r13)
  629. ld r14,PACA_EXCRIT+EX_R14(r13)
  630. ld r15,PACA_EXCRIT+EX_R15(r13)
  631. mtcr r10
  632. ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */
  633. ld r11,PACA_EXCRIT+EX_R11(r13)
  634. mfspr r13,SPRN_SPRG_CRIT_SCRATCH
  635. rfci
  636. /* Normal debug exception */
  637. /* XXX We only handle coming from userspace for now since we can't
  638. * quite save properly an interrupted kernel state yet
  639. */
  640. 1: andi. r14,r11,MSR_PR; /* check for userspace again */
  641. beq kernel_dbg_exc; /* if from kernel mode */
  642. /* Now we mash up things to make it look like we are coming on a
  643. * normal exception
  644. */
  645. mfspr r14,SPRN_DBSR
  646. std r14,_DSISR(r1)
  647. ld r14,PACA_EXCRIT+EX_R14(r13)
  648. ld r15,PACA_EXCRIT+EX_R15(r13)
  649. EXCEPTION_COMMON_CRIT(0xd00)
  650. addi r3,r1,STACK_FRAME_OVERHEAD
  651. bl DebugException
  652. REST_NVGPRS(r1)
  653. b interrupt_return
  654. kernel_dbg_exc:
  655. b . /* NYI */
  656. /* Debug exception as a debug interrupt*/
  657. START_EXCEPTION(debug_debug);
  658. DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
  659. PROLOG_ADDITION_2REGS)
  660. /*
  661. * If there is a single step or branch-taken exception in an
  662. * exception entry sequence, it was probably meant to apply to
  663. * the code where the exception occurred (since exception entry
  664. * doesn't turn off DE automatically). We simulate the effect
  665. * of turning off DE on entry to an exception handler by turning
  666. * off DE in the DSRR1 value and clearing the debug status.
  667. */
  668. mfspr r14,SPRN_DBSR /* check single-step/branch taken */
  669. andis. r15,r14,(DBSR_IC|DBSR_BT)@h
  670. beq+ 1f
  671. #ifdef CONFIG_RELOCATABLE
  672. __LOAD_PACA_TOC(r15)
  673. LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e)
  674. LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts)
  675. cmpld cr0,r10,r14
  676. cmpld cr1,r10,r15
  677. #else
  678. LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
  679. cmpld cr0, r10, r14
  680. LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts)
  681. cmpld cr1, r10, r14
  682. #endif
  683. blt+ cr0,1f
  684. bge+ cr1,1f
  685. /* here it looks like we got an inappropriate debug exception. */
  686. lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
  687. rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
  688. mtspr SPRN_DBSR,r14
  689. mtspr SPRN_DSRR1,r11
  690. lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */
  691. ld r1,PACA_EXDBG+EX_R1(r13)
  692. ld r14,PACA_EXDBG+EX_R14(r13)
  693. ld r15,PACA_EXDBG+EX_R15(r13)
  694. mtcr r10
  695. ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */
  696. ld r11,PACA_EXDBG+EX_R11(r13)
  697. mfspr r13,SPRN_SPRG_DBG_SCRATCH
  698. rfdi
  699. /* Normal debug exception */
  700. /* XXX We only handle coming from userspace for now since we can't
  701. * quite save properly an interrupted kernel state yet
  702. */
  703. 1: andi. r14,r11,MSR_PR; /* check for userspace again */
  704. beq kernel_dbg_exc; /* if from kernel mode */
  705. /* Now we mash up things to make it look like we are coming on a
  706. * normal exception
  707. */
  708. mfspr r14,SPRN_DBSR
  709. std r14,_DSISR(r1)
  710. ld r14,PACA_EXDBG+EX_R14(r13)
  711. ld r15,PACA_EXDBG+EX_R15(r13)
  712. EXCEPTION_COMMON_DBG(0xd08)
  713. addi r3,r1,STACK_FRAME_OVERHEAD
  714. bl DebugException
  715. REST_NVGPRS(r1)
  716. b interrupt_return
  717. START_EXCEPTION(perfmon);
  718. NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
  719. PROLOG_ADDITION_NONE)
  720. EXCEPTION_COMMON(0x260)
  721. CHECK_NAPPING()
  722. addi r3,r1,STACK_FRAME_OVERHEAD
  723. /*
  724. * XXX: Returning from performance_monitor_exception taken as a
  725. * soft-NMI (Linux irqs disabled) may be risky to use interrupt_return
  726. * and could cause bugs in return or elsewhere. That case should just
  727. * restore registers and return. There is a workaround for one known
  728. * problem in interrupt_exit_kernel_prepare().
  729. */
  730. bl performance_monitor_exception
  731. b interrupt_return
  732. /* Doorbell interrupt */
  733. MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
  734. doorbell, doorbell_exception, ACK_NONE)
  735. /* Doorbell critical Interrupt */
  736. START_EXCEPTION(doorbell_crit);
  737. CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
  738. PROLOG_ADDITION_NONE)
  739. EXCEPTION_COMMON_CRIT(0x2a0)
  740. bl special_reg_save
  741. CHECK_NAPPING();
  742. addi r3,r1,STACK_FRAME_OVERHEAD
  743. bl unknown_nmi_exception
  744. b ret_from_crit_except
  745. /*
  746. * Guest doorbell interrupt
  747. * This general exception use GSRRx save/restore registers
  748. */
  749. START_EXCEPTION(guest_doorbell);
  750. GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
  751. PROLOG_ADDITION_NONE)
  752. EXCEPTION_COMMON(0x2c0)
  753. addi r3,r1,STACK_FRAME_OVERHEAD
  754. bl unknown_exception
  755. b interrupt_return
  756. /* Guest Doorbell critical Interrupt */
  757. START_EXCEPTION(guest_doorbell_crit);
  758. CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
  759. PROLOG_ADDITION_NONE)
  760. EXCEPTION_COMMON_CRIT(0x2e0)
  761. bl special_reg_save
  762. CHECK_NAPPING();
  763. addi r3,r1,STACK_FRAME_OVERHEAD
  764. bl unknown_nmi_exception
  765. b ret_from_crit_except
  766. /* Hypervisor call */
  767. START_EXCEPTION(hypercall);
  768. NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
  769. PROLOG_ADDITION_NONE)
  770. EXCEPTION_COMMON(0x310)
  771. addi r3,r1,STACK_FRAME_OVERHEAD
  772. bl unknown_exception
  773. b interrupt_return
  774. /* Embedded Hypervisor priviledged */
  775. START_EXCEPTION(ehpriv);
  776. NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
  777. PROLOG_ADDITION_NONE)
  778. EXCEPTION_COMMON(0x320)
  779. addi r3,r1,STACK_FRAME_OVERHEAD
  780. bl unknown_exception
  781. b interrupt_return
  782. /* LRAT Error interrupt */
  783. START_EXCEPTION(lrat_error);
  784. NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
  785. PROLOG_ADDITION_NONE)
  786. EXCEPTION_COMMON(0x340)
  787. addi r3,r1,STACK_FRAME_OVERHEAD
  788. bl unknown_exception
  789. b interrupt_return
  790. .macro SEARCH_RESTART_TABLE
  791. #ifdef CONFIG_RELOCATABLE
  792. __LOAD_PACA_TOC(r11)
  793. LOAD_REG_ADDR_ALTTOC(r14, r11, __start___restart_table)
  794. LOAD_REG_ADDR_ALTTOC(r15, r11, __stop___restart_table)
  795. #else
  796. LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table)
  797. LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table)
  798. #endif
  799. 300:
  800. cmpd r14,r15
  801. beq 302f
  802. ld r11,0(r14)
  803. cmpld r10,r11
  804. blt 301f
  805. ld r11,8(r14)
  806. cmpld r10,r11
  807. bge 301f
  808. ld r11,16(r14)
  809. b 303f
  810. 301:
  811. addi r14,r14,24
  812. b 300b
  813. 302:
  814. li r11,0
  815. 303:
  816. .endm
  817. /*
  818. * An interrupt came in while soft-disabled; We mark paca->irq_happened
  819. * accordingly and if the interrupt is level sensitive, we hard disable
  820. * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
  821. * keep these in synch.
  822. */
  823. .macro masked_interrupt_book3e paca_irq full_mask
  824. std r14,PACA_EXGEN+EX_R14(r13)
  825. std r15,PACA_EXGEN+EX_R15(r13)
  826. lbz r10,PACAIRQHAPPENED(r13)
  827. .if \full_mask == 1
  828. ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
  829. .else
  830. ori r10,r10,\paca_irq
  831. .endif
  832. stb r10,PACAIRQHAPPENED(r13)
  833. .if \full_mask == 1
  834. xori r11,r11,MSR_EE /* clear MSR_EE */
  835. mtspr SPRN_SRR1,r11
  836. .endif
  837. mfspr r10,SPRN_SRR0
  838. SEARCH_RESTART_TABLE
  839. cmpdi r11,0
  840. beq 1f
  841. mtspr SPRN_SRR0,r11 /* return to restart address */
  842. 1:
  843. lwz r11,PACA_EXGEN+EX_CR(r13)
  844. mtcr r11
  845. ld r10,PACA_EXGEN+EX_R10(r13)
  846. ld r11,PACA_EXGEN+EX_R11(r13)
  847. ld r14,PACA_EXGEN+EX_R14(r13)
  848. ld r15,PACA_EXGEN+EX_R15(r13)
  849. mfspr r13,SPRN_SPRG_GEN_SCRATCH
  850. rfi
  851. b .
  852. .endm
  853. masked_interrupt_book3e_0x500:
  854. masked_interrupt_book3e PACA_IRQ_EE 1
  855. masked_interrupt_book3e_0x900:
  856. ACK_DEC(r10);
  857. masked_interrupt_book3e PACA_IRQ_DEC 0
  858. masked_interrupt_book3e_0x980:
  859. ACK_FIT(r10);
  860. masked_interrupt_book3e PACA_IRQ_DEC 0
  861. masked_interrupt_book3e_0x280:
  862. masked_interrupt_book3e_0x2c0:
  863. masked_interrupt_book3e PACA_IRQ_DBELL 0
  864. /*
  865. * This is called from 0x300 and 0x400 handlers after the prologs with
  866. * r14 and r15 containing the fault address and error code, with the
  867. * original values stashed away in the PACA
  868. */
  869. storage_fault_common:
  870. addi r3,r1,STACK_FRAME_OVERHEAD
  871. bl do_page_fault
  872. b interrupt_return
  873. /*
  874. * Alignment exception doesn't fit entirely in the 0x100 bytes so it
  875. * continues here.
  876. */
  877. alignment_more:
  878. addi r3,r1,STACK_FRAME_OVERHEAD
  879. bl alignment_exception
  880. REST_NVGPRS(r1)
  881. b interrupt_return
  882. /*
  883. * Trampolines used when spotting a bad kernel stack pointer in
  884. * the exception entry code.
  885. *
  886. * TODO: move some bits like SRR0 read to trampoline, pass PACA
  887. * index around, etc... to handle crit & mcheck
  888. */
  889. BAD_STACK_TRAMPOLINE(0x000)
  890. BAD_STACK_TRAMPOLINE(0x100)
  891. BAD_STACK_TRAMPOLINE(0x200)
  892. BAD_STACK_TRAMPOLINE(0x220)
  893. BAD_STACK_TRAMPOLINE(0x260)
  894. BAD_STACK_TRAMPOLINE(0x280)
  895. BAD_STACK_TRAMPOLINE(0x2a0)
  896. BAD_STACK_TRAMPOLINE(0x2c0)
  897. BAD_STACK_TRAMPOLINE(0x2e0)
  898. BAD_STACK_TRAMPOLINE(0x300)
  899. BAD_STACK_TRAMPOLINE(0x310)
  900. BAD_STACK_TRAMPOLINE(0x320)
  901. BAD_STACK_TRAMPOLINE(0x340)
  902. BAD_STACK_TRAMPOLINE(0x400)
  903. BAD_STACK_TRAMPOLINE(0x500)
  904. BAD_STACK_TRAMPOLINE(0x600)
  905. BAD_STACK_TRAMPOLINE(0x700)
  906. BAD_STACK_TRAMPOLINE(0x800)
  907. BAD_STACK_TRAMPOLINE(0x900)
  908. BAD_STACK_TRAMPOLINE(0x980)
  909. BAD_STACK_TRAMPOLINE(0x9f0)
  910. BAD_STACK_TRAMPOLINE(0xa00)
  911. BAD_STACK_TRAMPOLINE(0xb00)
  912. BAD_STACK_TRAMPOLINE(0xc00)
  913. BAD_STACK_TRAMPOLINE(0xd00)
  914. BAD_STACK_TRAMPOLINE(0xd08)
  915. BAD_STACK_TRAMPOLINE(0xe00)
  916. BAD_STACK_TRAMPOLINE(0xf00)
  917. BAD_STACK_TRAMPOLINE(0xf20)
  918. .globl bad_stack_book3e
  919. bad_stack_book3e:
  920. /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
  921. mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */
  922. ld r1,PACAEMERGSP(r13)
  923. subi r1,r1,64+INT_FRAME_SIZE
  924. std r10,_NIP(r1)
  925. std r11,_MSR(r1)
  926. ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */
  927. lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */
  928. std r10,GPR1(r1)
  929. std r11,_CCR(r1)
  930. mfspr r10,SPRN_DEAR
  931. mfspr r11,SPRN_ESR
  932. std r10,_DEAR(r1)
  933. std r11,_ESR(r1)
  934. SAVE_GPR(0, r1); /* save r0 in stackframe */ \
  935. SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \
  936. ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \
  937. ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \
  938. mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
  939. std r3,GPR10(r1); /* save r10 to stackframe */ \
  940. std r4,GPR11(r1); /* save r11 to stackframe */ \
  941. SAVE_GPR(12, r1); /* save r12 in stackframe */ \
  942. std r5,GPR13(r1); /* save it to stackframe */ \
  943. mflr r10
  944. mfctr r11
  945. mfxer r12
  946. std r10,_LINK(r1)
  947. std r11,_CTR(r1)
  948. std r12,_XER(r1)
  949. SAVE_NVGPRS(r1)
  950. lhz r12,PACA_TRAP_SAVE(r13)
  951. std r12,_TRAP(r1)
  952. addi r11,r1,INT_FRAME_SIZE
  953. std r11,0(r1)
  954. ZEROIZE_GPR(12)
  955. std r12,0(r11)
  956. LOAD_PACA_TOC()
  957. 1: addi r3,r1,STACK_FRAME_OVERHEAD
  958. bl kernel_bad_stack
  959. b 1b
  960. /*
  961. * Setup the initial TLB for a core. This current implementation
  962. * assume that whatever we are running off will not conflict with
  963. * the new mapping at PAGE_OFFSET.
  964. */
  965. _GLOBAL(initial_tlb_book3e)
  966. /* Look for the first TLB with IPROT set */
  967. mfspr r4,SPRN_TLB0CFG
  968. andi. r3,r4,TLBnCFG_IPROT
  969. lis r3,MAS0_TLBSEL(0)@h
  970. bne found_iprot
  971. mfspr r4,SPRN_TLB1CFG
  972. andi. r3,r4,TLBnCFG_IPROT
  973. lis r3,MAS0_TLBSEL(1)@h
  974. bne found_iprot
  975. mfspr r4,SPRN_TLB2CFG
  976. andi. r3,r4,TLBnCFG_IPROT
  977. lis r3,MAS0_TLBSEL(2)@h
  978. bne found_iprot
  979. lis r3,MAS0_TLBSEL(3)@h
  980. mfspr r4,SPRN_TLB3CFG
  981. /* fall through */
  982. found_iprot:
  983. andi. r5,r4,TLBnCFG_HES
  984. bne have_hes
  985. mflr r8 /* save LR */
  986. /* 1. Find the index of the entry we're executing in
  987. *
  988. * r3 = MAS0_TLBSEL (for the iprot array)
  989. * r4 = SPRN_TLBnCFG
  990. */
  991. bcl 20,31,$+4 /* Find our address */
  992. invstr: mflr r6 /* Make it accessible */
  993. mfmsr r7
  994. rlwinm r5,r7,27,31,31 /* extract MSR[IS] */
  995. mfspr r7,SPRN_PID
  996. slwi r7,r7,16
  997. or r7,r7,r5
  998. mtspr SPRN_MAS6,r7
  999. tlbsx 0,r6 /* search MSR[IS], SPID=PID */
  1000. mfspr r3,SPRN_MAS0
  1001. rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */
  1002. mfspr r7,SPRN_MAS1 /* Insure IPROT set */
  1003. oris r7,r7,MAS1_IPROT@h
  1004. mtspr SPRN_MAS1,r7
  1005. tlbwe
  1006. /* 2. Invalidate all entries except the entry we're executing in
  1007. *
  1008. * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
  1009. * r4 = SPRN_TLBnCFG
  1010. * r5 = ESEL of entry we are running in
  1011. */
  1012. andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */
  1013. li r6,0 /* Set Entry counter to 0 */
  1014. 1: mr r7,r3 /* Set MAS0(TLBSEL) */
  1015. rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
  1016. mtspr SPRN_MAS0,r7
  1017. tlbre
  1018. mfspr r7,SPRN_MAS1
  1019. rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
  1020. cmpw r5,r6
  1021. beq skpinv /* Dont update the current execution TLB */
  1022. mtspr SPRN_MAS1,r7
  1023. tlbwe
  1024. isync
  1025. skpinv: addi r6,r6,1 /* Increment */
  1026. cmpw r6,r4 /* Are we done? */
  1027. bne 1b /* If not, repeat */
  1028. /* Invalidate all TLBs */
  1029. PPC_TLBILX_ALL(0,R0)
  1030. sync
  1031. isync
  1032. /* 3. Setup a temp mapping and jump to it
  1033. *
  1034. * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
  1035. * r5 = ESEL of entry we are running in
  1036. */
  1037. andi. r7,r5,0x1 /* Find an entry not used and is non-zero */
  1038. addi r7,r7,0x1
  1039. mr r4,r3 /* Set MAS0(TLBSEL) = 1 */
  1040. mtspr SPRN_MAS0,r4
  1041. tlbre
  1042. rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */
  1043. mtspr SPRN_MAS0,r4
  1044. mfspr r7,SPRN_MAS1
  1045. xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */
  1046. mtspr SPRN_MAS1,r6
  1047. tlbwe
  1048. mfmsr r6
  1049. xori r6,r6,MSR_IS
  1050. mtspr SPRN_SRR1,r6
  1051. bcl 20,31,$+4 /* Find our address */
  1052. 1: mflr r6
  1053. addi r6,r6,(2f - 1b)
  1054. mtspr SPRN_SRR0,r6
  1055. rfi
  1056. 2:
  1057. /* 4. Clear out PIDs & Search info
  1058. *
  1059. * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
  1060. * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  1061. * r5 = MAS3
  1062. */
  1063. li r6,0
  1064. mtspr SPRN_MAS6,r6
  1065. mtspr SPRN_PID,r6
  1066. /* 5. Invalidate mapping we started in
  1067. *
  1068. * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
  1069. * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  1070. * r5 = MAS3
  1071. */
  1072. mtspr SPRN_MAS0,r3
  1073. tlbre
  1074. mfspr r6,SPRN_MAS1
  1075. rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */
  1076. mtspr SPRN_MAS1,r6
  1077. tlbwe
  1078. sync
  1079. isync
  1080. /* 6. Setup KERNELBASE mapping in TLB[0]
  1081. *
  1082. * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
  1083. * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  1084. * r5 = MAS3
  1085. */
  1086. rlwinm r3,r3,0,16,3 /* clear ESEL */
  1087. mtspr SPRN_MAS0,r3
  1088. lis r6,(MAS1_VALID|MAS1_IPROT)@h
  1089. ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
  1090. mtspr SPRN_MAS1,r6
  1091. LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)
  1092. mtspr SPRN_MAS2,r6
  1093. rlwinm r5,r5,0,0,25
  1094. ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
  1095. mtspr SPRN_MAS3,r5
  1096. li r5,-1
  1097. rlwinm r5,r5,0,0,25
  1098. tlbwe
  1099. /* 7. Jump to KERNELBASE mapping
  1100. *
  1101. * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  1102. */
  1103. /* Now we branch the new virtual address mapped by this entry */
  1104. bcl 20,31,$+4 /* Find our address */
  1105. 1: mflr r6
  1106. addi r6,r6,(2f - 1b)
  1107. tovirt(r6,r6)
  1108. lis r7,MSR_KERNEL@h
  1109. ori r7,r7,MSR_KERNEL@l
  1110. mtspr SPRN_SRR0,r6
  1111. mtspr SPRN_SRR1,r7
  1112. rfi /* start execution out of TLB1[0] entry */
  1113. 2:
  1114. /* 8. Clear out the temp mapping
  1115. *
  1116. * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
  1117. */
  1118. mtspr SPRN_MAS0,r4
  1119. tlbre
  1120. mfspr r5,SPRN_MAS1
  1121. rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */
  1122. mtspr SPRN_MAS1,r5
  1123. tlbwe
  1124. sync
  1125. isync
  1126. /* We translate LR and return */
  1127. tovirt(r8,r8)
  1128. mtlr r8
  1129. blr
  1130. have_hes:
  1131. /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
  1132. * kernel linear mapping. We also set MAS8 once for all here though
  1133. * that will have to be made dependent on whether we are running under
  1134. * a hypervisor I suppose.
  1135. */
  1136. /* BEWARE, MAGIC
  1137. * This code is called as an ordinary function on the boot CPU. But to
  1138. * avoid duplication, this code is also used in SCOM bringup of
  1139. * secondary CPUs. We read the code between the initial_tlb_code_start
  1140. * and initial_tlb_code_end labels one instruction at a time and RAM it
  1141. * into the new core via SCOM. That doesn't process branches, so there
  1142. * must be none between those two labels. It also means if this code
  1143. * ever takes any parameters, the SCOM code must also be updated to
  1144. * provide them.
  1145. */
  1146. .globl a2_tlbinit_code_start
  1147. a2_tlbinit_code_start:
  1148. ori r11,r3,MAS0_WQ_ALLWAYS
  1149. oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
  1150. mtspr SPRN_MAS0,r11
  1151. lis r3,(MAS1_VALID | MAS1_IPROT)@h
  1152. ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
  1153. mtspr SPRN_MAS1,r3
  1154. LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M)
  1155. mtspr SPRN_MAS2,r3
  1156. li r3,MAS3_SR | MAS3_SW | MAS3_SX
  1157. mtspr SPRN_MAS7_MAS3,r3
  1158. li r3,0
  1159. mtspr SPRN_MAS8,r3
  1160. /* Write the TLB entry */
  1161. tlbwe
  1162. .globl a2_tlbinit_after_linear_map
  1163. a2_tlbinit_after_linear_map:
  1164. /* Now we branch the new virtual address mapped by this entry */
  1165. #ifdef CONFIG_RELOCATABLE
  1166. __LOAD_PACA_TOC(r5)
  1167. LOAD_REG_ADDR_ALTTOC(r3, r5, 1f)
  1168. #else
  1169. LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
  1170. #endif
  1171. mtctr r3
  1172. bctr
  1173. 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything
  1174. * else (including IPROTed things left by firmware)
  1175. * r4 = TLBnCFG
  1176. * r3 = current address (more or less)
  1177. */
  1178. li r5,0
  1179. mtspr SPRN_MAS6,r5
  1180. tlbsx 0,r3
  1181. rlwinm r9,r4,0,TLBnCFG_N_ENTRY
  1182. rlwinm r10,r4,8,0xff
  1183. addi r10,r10,-1 /* Get inner loop mask */
  1184. li r3,1
  1185. mfspr r5,SPRN_MAS1
  1186. rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
  1187. mfspr r6,SPRN_MAS2
  1188. rldicr r6,r6,0,51 /* Extract EPN */
  1189. mfspr r7,SPRN_MAS0
  1190. rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */
  1191. rlwinm r8,r7,16,0xfff /* Extract ESEL */
  1192. 2: add r4,r3,r8
  1193. and r4,r4,r10
  1194. rlwimi r7,r4,16,MAS0_ESEL_MASK
  1195. mtspr SPRN_MAS0,r7
  1196. mtspr SPRN_MAS1,r5
  1197. mtspr SPRN_MAS2,r6
  1198. tlbwe
  1199. addi r3,r3,1
  1200. and. r4,r3,r10
  1201. bne 3f
  1202. addis r6,r6,(1<<30)@h
  1203. 3:
  1204. cmpw r3,r9
  1205. blt 2b
  1206. .globl a2_tlbinit_after_iprot_flush
  1207. a2_tlbinit_after_iprot_flush:
  1208. PPC_TLBILX(0,0,R0)
  1209. sync
  1210. isync
  1211. .globl a2_tlbinit_code_end
  1212. a2_tlbinit_code_end:
  1213. /* We translate LR and return */
  1214. mflr r3
  1215. tovirt(r3,r3)
  1216. mtlr r3
  1217. blr
  1218. /*
  1219. * Main entry (boot CPU, thread 0)
  1220. *
  1221. * We enter here from head_64.S, possibly after the prom_init trampoline
  1222. * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits
  1223. * mode. Anything else is as it was left by the bootloader
  1224. *
  1225. * Initial requirements of this port:
  1226. *
  1227. * - Kernel loaded at 0 physical
  1228. * - A good lump of memory mapped 0:0 by UTLB entry 0
  1229. * - MSR:IS & MSR:DS set to 0
  1230. *
  1231. * Note that some of the above requirements will be relaxed in the future
  1232. * as the kernel becomes smarter at dealing with different initial conditions
  1233. * but for now you have to be careful
  1234. */
  1235. _GLOBAL(start_initialization_book3e)
  1236. mflr r28
  1237. /* First, we need to setup some initial TLBs to map the kernel
  1238. * text, data and bss at PAGE_OFFSET. We don't have a real mode
  1239. * and always use AS 0, so we just set it up to match our link
  1240. * address and never use 0 based addresses.
  1241. */
  1242. bl initial_tlb_book3e
  1243. /* Init global core bits */
  1244. bl init_core_book3e
  1245. /* Init per-thread bits */
  1246. bl init_thread_book3e
  1247. /* Return to common init code */
  1248. tovirt(r28,r28)
  1249. mtlr r28
  1250. blr
  1251. /*
  1252. * Secondary core/processor entry
  1253. *
  1254. * This is entered for thread 0 of a secondary core, all other threads
  1255. * are expected to be stopped. It's similar to start_initialization_book3e
  1256. * except that it's generally entered from the holding loop in head_64.S
  1257. * after CPUs have been gathered by Open Firmware.
  1258. *
  1259. * We assume we are in 32 bits mode running with whatever TLB entry was
  1260. * set for us by the firmware or POR engine.
  1261. */
  1262. _GLOBAL(book3e_secondary_core_init_tlb_set)
  1263. li r4,1
  1264. b generic_secondary_smp_init
  1265. _GLOBAL(book3e_secondary_core_init)
  1266. mflr r28
  1267. /* Do we need to setup initial TLB entry ? */
  1268. cmplwi r4,0
  1269. bne 2f
  1270. /* Setup TLB for this core */
  1271. bl initial_tlb_book3e
  1272. /* We can return from the above running at a different
  1273. * address, so recalculate r2 (TOC)
  1274. */
  1275. bl relative_toc
  1276. /* Init global core bits */
  1277. 2: bl init_core_book3e
  1278. /* Init per-thread bits */
  1279. 3: bl init_thread_book3e
  1280. /* Return to common init code at proper virtual address.
  1281. *
  1282. * Due to various previous assumptions, we know we entered this
  1283. * function at either the final PAGE_OFFSET mapping or using a
  1284. * 1:1 mapping at 0, so we don't bother doing a complicated check
  1285. * here, we just ensure the return address has the right top bits.
  1286. *
  1287. * Note that if we ever want to be smarter about where we can be
  1288. * started from, we have to be careful that by the time we reach
  1289. * the code below we may already be running at a different location
  1290. * than the one we were called from since initial_tlb_book3e can
  1291. * have moved us already.
  1292. */
  1293. cmpdi cr0,r28,0
  1294. blt 1f
  1295. lis r3,PAGE_OFFSET@highest
  1296. sldi r3,r3,32
  1297. or r28,r28,r3
  1298. 1: mtlr r28
  1299. blr
  1300. _GLOBAL(book3e_secondary_thread_init)
  1301. mflr r28
  1302. b 3b
  1303. .globl init_core_book3e
  1304. init_core_book3e:
  1305. /* Establish the interrupt vector base */
  1306. tovirt(r2,r2)
  1307. LOAD_REG_ADDR(r3, interrupt_base_book3e)
  1308. mtspr SPRN_IVPR,r3
  1309. sync
  1310. blr
  1311. init_thread_book3e:
  1312. lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
  1313. mtspr SPRN_EPCR,r3
  1314. /* Make sure interrupts are off */
  1315. wrteei 0
  1316. /* disable all timers and clear out status */
  1317. li r3,0
  1318. mtspr SPRN_TCR,r3
  1319. mfspr r3,SPRN_TSR
  1320. mtspr SPRN_TSR,r3
  1321. blr
  1322. _GLOBAL(__setup_base_ivors)
  1323. SET_IVOR(0, 0x020) /* Critical Input */
  1324. SET_IVOR(1, 0x000) /* Machine Check */
  1325. SET_IVOR(2, 0x060) /* Data Storage */
  1326. SET_IVOR(3, 0x080) /* Instruction Storage */
  1327. SET_IVOR(4, 0x0a0) /* External Input */
  1328. SET_IVOR(5, 0x0c0) /* Alignment */
  1329. SET_IVOR(6, 0x0e0) /* Program */
  1330. SET_IVOR(7, 0x100) /* FP Unavailable */
  1331. SET_IVOR(8, 0x120) /* System Call */
  1332. SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */
  1333. SET_IVOR(10, 0x160) /* Decrementer */
  1334. SET_IVOR(11, 0x180) /* Fixed Interval Timer */
  1335. SET_IVOR(12, 0x1a0) /* Watchdog Timer */
  1336. SET_IVOR(13, 0x1c0) /* Data TLB Error */
  1337. SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
  1338. SET_IVOR(15, 0x040) /* Debug */
  1339. sync
  1340. blr
  1341. _GLOBAL(setup_altivec_ivors)
  1342. SET_IVOR(32, 0x200) /* AltiVec Unavailable */
  1343. SET_IVOR(33, 0x220) /* AltiVec Assist */
  1344. blr
  1345. _GLOBAL(setup_perfmon_ivor)
  1346. SET_IVOR(35, 0x260) /* Performance Monitor */
  1347. blr
  1348. _GLOBAL(setup_doorbell_ivors)
  1349. SET_IVOR(36, 0x280) /* Processor Doorbell */
  1350. SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
  1351. blr
  1352. _GLOBAL(setup_ehv_ivors)
  1353. SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
  1354. SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
  1355. SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
  1356. SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
  1357. blr
  1358. _GLOBAL(setup_lrat_ivor)
  1359. SET_IVOR(42, 0x340) /* LRAT Error */
  1360. blr