entry.S 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <[email protected]>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <[email protected]>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <[email protected]>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <[email protected]>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <asm/xilinx_mb_manager.h>
  28. #include <linux/errno.h>
  29. #include <asm/signal.h>
  30. #include <asm/mmu.h>
  31. #undef DEBUG
  32. #ifdef DEBUG
  33. /* Create space for syscalls counting. */
  34. .section .data
  35. .global syscall_debug_table
  36. .align 4
  37. syscall_debug_table:
  38. .space (__NR_syscalls * 4)
  39. #endif /* DEBUG */
  40. #define C_ENTRY(name) .globl name; .align 4; name
  41. /*
  42. * Various ways of setting and clearing BIP in flags reg.
  43. * This is mucky, but necessary using microblaze version that
  44. * allows msr ops to write to BIP
  45. */
  46. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  47. .macro clear_bip
  48. msrclr r0, MSR_BIP
  49. .endm
  50. .macro set_bip
  51. msrset r0, MSR_BIP
  52. .endm
  53. .macro clear_eip
  54. msrclr r0, MSR_EIP
  55. .endm
  56. .macro set_ee
  57. msrset r0, MSR_EE
  58. .endm
  59. .macro disable_irq
  60. msrclr r0, MSR_IE
  61. .endm
  62. .macro enable_irq
  63. msrset r0, MSR_IE
  64. .endm
  65. .macro set_ums
  66. msrset r0, MSR_UMS
  67. msrclr r0, MSR_VMS
  68. .endm
  69. .macro set_vms
  70. msrclr r0, MSR_UMS
  71. msrset r0, MSR_VMS
  72. .endm
  73. .macro clear_ums
  74. msrclr r0, MSR_UMS
  75. .endm
  76. .macro clear_vms_ums
  77. msrclr r0, MSR_VMS | MSR_UMS
  78. .endm
  79. #else
  80. .macro clear_bip
  81. mfs r11, rmsr
  82. andi r11, r11, ~MSR_BIP
  83. mts rmsr, r11
  84. .endm
  85. .macro set_bip
  86. mfs r11, rmsr
  87. ori r11, r11, MSR_BIP
  88. mts rmsr, r11
  89. .endm
  90. .macro clear_eip
  91. mfs r11, rmsr
  92. andi r11, r11, ~MSR_EIP
  93. mts rmsr, r11
  94. .endm
  95. .macro set_ee
  96. mfs r11, rmsr
  97. ori r11, r11, MSR_EE
  98. mts rmsr, r11
  99. .endm
  100. .macro disable_irq
  101. mfs r11, rmsr
  102. andi r11, r11, ~MSR_IE
  103. mts rmsr, r11
  104. .endm
  105. .macro enable_irq
  106. mfs r11, rmsr
  107. ori r11, r11, MSR_IE
  108. mts rmsr, r11
  109. .endm
  110. .macro set_ums
  111. mfs r11, rmsr
  112. ori r11, r11, MSR_VMS
  113. andni r11, r11, MSR_UMS
  114. mts rmsr, r11
  115. .endm
  116. .macro set_vms
  117. mfs r11, rmsr
  118. ori r11, r11, MSR_VMS
  119. andni r11, r11, MSR_UMS
  120. mts rmsr, r11
  121. .endm
  122. .macro clear_ums
  123. mfs r11, rmsr
  124. andni r11, r11, MSR_UMS
  125. mts rmsr,r11
  126. .endm
  127. .macro clear_vms_ums
  128. mfs r11, rmsr
  129. andni r11, r11, (MSR_VMS|MSR_UMS)
  130. mts rmsr,r11
  131. .endm
  132. #endif
  133. /* Define how to call high-level functions. With MMU, virtual mode must be
  134. * enabled when calling the high-level function. Clobbers R11.
  135. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  136. */
  137. /* turn on virtual protected mode save */
  138. #define VM_ON \
  139. set_ums; \
  140. rted r0, 2f; \
  141. nop; \
  142. 2:
  143. /* turn off virtual protected mode save and user mode save*/
  144. #define VM_OFF \
  145. clear_vms_ums; \
  146. rted r0, TOPHYS(1f); \
  147. nop; \
  148. 1:
  149. #define SAVE_REGS \
  150. swi r2, r1, PT_R2; /* Save SDA */ \
  151. swi r3, r1, PT_R3; \
  152. swi r4, r1, PT_R4; \
  153. swi r5, r1, PT_R5; \
  154. swi r6, r1, PT_R6; \
  155. swi r7, r1, PT_R7; \
  156. swi r8, r1, PT_R8; \
  157. swi r9, r1, PT_R9; \
  158. swi r10, r1, PT_R10; \
  159. swi r11, r1, PT_R11; /* save clobbered regs after rval */\
  160. swi r12, r1, PT_R12; \
  161. swi r13, r1, PT_R13; /* Save SDA2 */ \
  162. swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \
  163. swi r15, r1, PT_R15; /* Save LP */ \
  164. swi r16, r1, PT_R16; \
  165. swi r17, r1, PT_R17; \
  166. swi r18, r1, PT_R18; /* Save asm scratch reg */ \
  167. swi r19, r1, PT_R19; \
  168. swi r20, r1, PT_R20; \
  169. swi r21, r1, PT_R21; \
  170. swi r22, r1, PT_R22; \
  171. swi r23, r1, PT_R23; \
  172. swi r24, r1, PT_R24; \
  173. swi r25, r1, PT_R25; \
  174. swi r26, r1, PT_R26; \
  175. swi r27, r1, PT_R27; \
  176. swi r28, r1, PT_R28; \
  177. swi r29, r1, PT_R29; \
  178. swi r30, r1, PT_R30; \
  179. swi r31, r1, PT_R31; /* Save current task reg */ \
  180. mfs r11, rmsr; /* save MSR */ \
  181. swi r11, r1, PT_MSR;
  182. #define RESTORE_REGS_GP \
  183. lwi r2, r1, PT_R2; /* restore SDA */ \
  184. lwi r3, r1, PT_R3; \
  185. lwi r4, r1, PT_R4; \
  186. lwi r5, r1, PT_R5; \
  187. lwi r6, r1, PT_R6; \
  188. lwi r7, r1, PT_R7; \
  189. lwi r8, r1, PT_R8; \
  190. lwi r9, r1, PT_R9; \
  191. lwi r10, r1, PT_R10; \
  192. lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\
  193. lwi r12, r1, PT_R12; \
  194. lwi r13, r1, PT_R13; /* restore SDA2 */ \
  195. lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  196. lwi r15, r1, PT_R15; /* restore LP */ \
  197. lwi r16, r1, PT_R16; \
  198. lwi r17, r1, PT_R17; \
  199. lwi r18, r1, PT_R18; /* restore asm scratch reg */ \
  200. lwi r19, r1, PT_R19; \
  201. lwi r20, r1, PT_R20; \
  202. lwi r21, r1, PT_R21; \
  203. lwi r22, r1, PT_R22; \
  204. lwi r23, r1, PT_R23; \
  205. lwi r24, r1, PT_R24; \
  206. lwi r25, r1, PT_R25; \
  207. lwi r26, r1, PT_R26; \
  208. lwi r27, r1, PT_R27; \
  209. lwi r28, r1, PT_R28; \
  210. lwi r29, r1, PT_R29; \
  211. lwi r30, r1, PT_R30; \
  212. lwi r31, r1, PT_R31; /* Restore cur task reg */
  213. #define RESTORE_REGS \
  214. lwi r11, r1, PT_MSR; \
  215. mts rmsr , r11; \
  216. RESTORE_REGS_GP
  217. #define RESTORE_REGS_RTBD \
  218. lwi r11, r1, PT_MSR; \
  219. andni r11, r11, MSR_EIP; /* clear EIP */ \
  220. ori r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */ \
  221. mts rmsr , r11; \
  222. RESTORE_REGS_GP
  223. #define SAVE_STATE \
  224. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  225. /* See if already in kernel mode.*/ \
  226. mfs r1, rmsr; \
  227. andi r1, r1, MSR_UMS; \
  228. bnei r1, 1f; \
  229. /* Kernel-mode state save. */ \
  230. /* Reload kernel stack-ptr. */ \
  231. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  232. /* FIXME: I can add these two lines to one */ \
  233. /* tophys(r1,r1); */ \
  234. /* addik r1, r1, -PT_SIZE; */ \
  235. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  236. SAVE_REGS \
  237. brid 2f; \
  238. swi r1, r1, PT_MODE; \
  239. 1: /* User-mode state save. */ \
  240. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  241. tophys(r1,r1); \
  242. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  243. /* MS these three instructions can be added to one */ \
  244. /* addik r1, r1, THREAD_SIZE; */ \
  245. /* tophys(r1,r1); */ \
  246. /* addik r1, r1, -PT_SIZE; */ \
  247. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  248. SAVE_REGS \
  249. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  250. swi r11, r1, PT_R1; /* Store user SP. */ \
  251. swi r0, r1, PT_MODE; /* Was in user-mode. */ \
  252. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  253. clear_ums; \
  254. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  255. .text
  256. .extern cpuinfo
  257. C_ENTRY(mb_flush_dcache):
  258. addik r1, r1, -PT_SIZE
  259. SAVE_REGS
  260. addik r3, r0, cpuinfo
  261. lwi r7, r3, CI_DCS
  262. lwi r8, r3, CI_DCL
  263. sub r9, r7, r8
  264. 1:
  265. wdc.flush r9, r0
  266. bgtid r9, 1b
  267. addk r9, r9, r8
  268. RESTORE_REGS
  269. addik r1, r1, PT_SIZE
  270. rtsd r15, 8
  271. nop
  272. C_ENTRY(mb_invalidate_icache):
  273. addik r1, r1, -PT_SIZE
  274. SAVE_REGS
  275. addik r3, r0, cpuinfo
  276. lwi r7, r3, CI_ICS
  277. lwi r8, r3, CI_ICL
  278. sub r9, r7, r8
  279. 1:
  280. wic r9, r0
  281. bgtid r9, 1b
  282. addk r9, r9, r8
  283. RESTORE_REGS
  284. addik r1, r1, PT_SIZE
  285. rtsd r15, 8
  286. nop
  287. /*
  288. * User trap.
  289. *
  290. * System calls are handled here.
  291. *
  292. * Syscall protocol:
  293. * Syscall number in r12, args in r5-r10
  294. * Return value in r3
  295. *
  296. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  297. * are masked. This is nice, means we don't have to CLI before state save
  298. */
  299. C_ENTRY(_user_exception):
  300. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  301. addi r14, r14, 4 /* return address is 4 byte after call */
  302. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  303. tophys(r1,r1);
  304. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  305. /* calculate kernel stack pointer from task struct 8k */
  306. addik r1, r1, THREAD_SIZE;
  307. tophys(r1,r1);
  308. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  309. SAVE_REGS
  310. swi r0, r1, PT_R3
  311. swi r0, r1, PT_R4
  312. swi r0, r1, PT_MODE; /* Was in user-mode. */
  313. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  314. swi r11, r1, PT_R1; /* Store user SP. */
  315. clear_ums;
  316. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  317. /* Save away the syscall number. */
  318. swi r12, r1, PT_R0;
  319. tovirt(r1,r1)
  320. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  321. /* Jump to the appropriate function for the system call number in r12
  322. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  323. * register should point to the location where
  324. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  325. /* Step into virtual mode */
  326. rtbd r0, 3f
  327. nop
  328. 3:
  329. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  330. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  331. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  332. beqi r11, 4f
  333. addik r3, r0, -ENOSYS
  334. swi r3, r1, PT_R3
  335. brlid r15, do_syscall_trace_enter
  336. addik r5, r1, PT_R0
  337. # do_syscall_trace_enter returns the new syscall nr.
  338. addk r12, r0, r3
  339. lwi r5, r1, PT_R5;
  340. lwi r6, r1, PT_R6;
  341. lwi r7, r1, PT_R7;
  342. lwi r8, r1, PT_R8;
  343. lwi r9, r1, PT_R9;
  344. lwi r10, r1, PT_R10;
  345. 4:
  346. /* Jump to the appropriate function for the system call number in r12
  347. * (r12 is not preserved), or return an error if r12 is not valid.
  348. * The LP register should point to the location where the called function
  349. * should return. [note that MAKE_SYS_CALL uses label 1] */
  350. /* See if the system call number is valid */
  351. blti r12, 5f
  352. addi r11, r12, -__NR_syscalls;
  353. bgei r11, 5f;
  354. /* Figure out which function to use for this system call. */
  355. /* Note Microblaze barrel shift is optional, so don't rely on it */
  356. add r12, r12, r12; /* convert num -> ptr */
  357. add r12, r12, r12;
  358. addi r30, r0, 1 /* restarts allowed */
  359. #ifdef DEBUG
  360. /* Trac syscalls and stored them to syscall_debug_table */
  361. /* The first syscall location stores total syscall number */
  362. lwi r3, r0, syscall_debug_table
  363. addi r3, r3, 1
  364. swi r3, r0, syscall_debug_table
  365. lwi r3, r12, syscall_debug_table
  366. addi r3, r3, 1
  367. swi r3, r12, syscall_debug_table
  368. #endif
  369. # Find and jump into the syscall handler.
  370. lwi r12, r12, sys_call_table
  371. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  372. addi r15, r0, ret_from_trap-8
  373. bra r12
  374. /* The syscall number is invalid, return an error. */
  375. 5:
  376. braid ret_from_trap
  377. addi r3, r0, -ENOSYS;
  378. /* Entry point used to return from a syscall/trap */
  379. /* We re-enable BIP bit before state restore */
  380. C_ENTRY(ret_from_trap):
  381. swi r3, r1, PT_R3
  382. swi r4, r1, PT_R4
  383. lwi r11, r1, PT_MODE;
  384. /* See if returning to kernel mode, if so, skip resched &c. */
  385. bnei r11, 2f;
  386. /* We're returning to user mode, so check for various conditions that
  387. * trigger rescheduling. */
  388. /* FIXME: Restructure all these flag checks. */
  389. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  390. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  391. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  392. beqi r11, 1f
  393. brlid r15, do_syscall_trace_leave
  394. addik r5, r1, PT_R0
  395. 1:
  396. /* We're returning to user mode, so check for various conditions that
  397. * trigger rescheduling. */
  398. /* get thread info from current task */
  399. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  400. lwi r19, r11, TI_FLAGS; /* get flags in thread info */
  401. andi r11, r19, _TIF_NEED_RESCHED;
  402. beqi r11, 5f;
  403. bralid r15, schedule; /* Call scheduler */
  404. nop; /* delay slot */
  405. bri 1b
  406. /* Maybe handle a signal */
  407. 5:
  408. andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  409. beqi r11, 4f; /* Signals to handle, handle them */
  410. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  411. bralid r15, do_notify_resume; /* Handle any signals */
  412. add r6, r30, r0; /* Arg 2: int in_syscall */
  413. add r30, r0, r0 /* no more restarts */
  414. bri 1b
  415. /* Finally, return to user state. */
  416. 4: set_bip; /* Ints masked for state restore */
  417. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  418. VM_OFF;
  419. tophys(r1,r1);
  420. RESTORE_REGS_RTBD;
  421. addik r1, r1, PT_SIZE /* Clean up stack space. */
  422. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  423. bri 6f;
  424. /* Return to kernel state. */
  425. 2: set_bip; /* Ints masked for state restore */
  426. VM_OFF;
  427. tophys(r1,r1);
  428. RESTORE_REGS_RTBD;
  429. addik r1, r1, PT_SIZE /* Clean up stack space. */
  430. tovirt(r1,r1);
  431. 6:
  432. TRAP_return: /* Make global symbol for debugging */
  433. rtbd r14, 0; /* Instructions to return from an IRQ */
  434. nop;
  435. /* This the initial entry point for a new child thread, with an appropriate
  436. stack in place that makes it look like the child is in the middle of a
  437. syscall. This function is actually `returned to' from switch_thread
  438. (copy_thread makes ret_from_fork the return address in each new thread's
  439. saved context). */
  440. C_ENTRY(ret_from_fork):
  441. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  442. add r5, r3, r0; /* switch_thread returns the prev task */
  443. /* ( in the delay slot ) */
  444. brid ret_from_trap; /* Do normal trap return */
  445. add r3, r0, r0; /* Child's fork call should return 0. */
  446. C_ENTRY(ret_from_kernel_thread):
  447. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  448. add r5, r3, r0; /* switch_thread returns the prev task */
  449. /* ( in the delay slot ) */
  450. brald r15, r20 /* fn was left in r20 */
  451. addk r5, r0, r19 /* ... and argument - in r19 */
  452. brid ret_from_trap
  453. add r3, r0, r0
  454. C_ENTRY(sys_rt_sigreturn_wrapper):
  455. addik r30, r0, 0 /* no restarts */
  456. brid sys_rt_sigreturn /* Do real work */
  457. addik r5, r1, 0; /* add user context as 1st arg */
  458. /*
  459. * HW EXCEPTION rutine start
  460. */
  461. C_ENTRY(full_exception_trap):
  462. /* adjust exception address for privileged instruction
  463. * for finding where is it */
  464. addik r17, r17, -4
  465. SAVE_STATE /* Save registers */
  466. /* PC, before IRQ/trap - this is one instruction above */
  467. swi r17, r1, PT_PC;
  468. tovirt(r1,r1)
  469. /* FIXME this can be store directly in PT_ESR reg.
  470. * I tested it but there is a fault */
  471. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  472. addik r15, r0, ret_from_exc - 8
  473. mfs r6, resr
  474. mfs r7, rfsr; /* save FSR */
  475. mts rfsr, r0; /* Clear sticky fsr */
  476. rted r0, full_exception
  477. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  478. /*
  479. * Unaligned data trap.
  480. *
  481. * Unaligned data trap last on 4k page is handled here.
  482. *
  483. * Trap entered via exception, so EE bit is set, and interrupts
  484. * are masked. This is nice, means we don't have to CLI before state save
  485. *
  486. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  487. */
  488. C_ENTRY(unaligned_data_trap):
  489. /* MS: I have to save r11 value and then restore it because
  490. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  491. * instructions are not used. We don't need to do if MSR instructions
  492. * are used and they use r0 instead of r11.
  493. * I am using ENTRY_SP which should be primary used only for stack
  494. * pointer saving. */
  495. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  496. set_bip; /* equalize initial state for all possible entries */
  497. clear_eip;
  498. set_ee;
  499. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  500. SAVE_STATE /* Save registers.*/
  501. /* PC, before IRQ/trap - this is one instruction above */
  502. swi r17, r1, PT_PC;
  503. tovirt(r1,r1)
  504. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  505. addik r15, r0, ret_from_exc-8
  506. mfs r3, resr /* ESR */
  507. mfs r4, rear /* EAR */
  508. rtbd r0, _unaligned_data_exception
  509. addik r7, r1, 0 /* parameter struct pt_regs * regs */
  510. /*
  511. * Page fault traps.
  512. *
  513. * If the real exception handler (from hw_exception_handler.S) didn't find
  514. * the mapping for the process, then we're thrown here to handle such situation.
  515. *
  516. * Trap entered via exceptions, so EE bit is set, and interrupts
  517. * are masked. This is nice, means we don't have to CLI before state save
  518. *
  519. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  520. * will bail out to this point if they can't resolve the lightweight TLB fault.
  521. *
  522. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  523. * void do_page_fault(struct pt_regs *regs,
  524. * unsigned long address,
  525. * unsigned long error_code)
  526. */
  527. /* data and intruction trap - which is choose is resolved int fault.c */
  528. C_ENTRY(page_fault_data_trap):
  529. SAVE_STATE /* Save registers.*/
  530. /* PC, before IRQ/trap - this is one instruction above */
  531. swi r17, r1, PT_PC;
  532. tovirt(r1,r1)
  533. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  534. addik r15, r0, ret_from_exc-8
  535. mfs r6, rear /* parameter unsigned long address */
  536. mfs r7, resr /* parameter unsigned long error_code */
  537. rted r0, do_page_fault
  538. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  539. C_ENTRY(page_fault_instr_trap):
  540. SAVE_STATE /* Save registers.*/
  541. /* PC, before IRQ/trap - this is one instruction above */
  542. swi r17, r1, PT_PC;
  543. tovirt(r1,r1)
  544. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  545. addik r15, r0, ret_from_exc-8
  546. mfs r6, rear /* parameter unsigned long address */
  547. ori r7, r0, 0 /* parameter unsigned long error_code */
  548. rted r0, do_page_fault
  549. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  550. /* Entry point used to return from an exception. */
  551. C_ENTRY(ret_from_exc):
  552. lwi r11, r1, PT_MODE;
  553. bnei r11, 2f; /* See if returning to kernel mode, */
  554. /* ... if so, skip resched &c. */
  555. /* We're returning to user mode, so check for various conditions that
  556. trigger rescheduling. */
  557. 1:
  558. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  559. lwi r19, r11, TI_FLAGS; /* get flags in thread info */
  560. andi r11, r19, _TIF_NEED_RESCHED;
  561. beqi r11, 5f;
  562. /* Call the scheduler before returning from a syscall/trap. */
  563. bralid r15, schedule; /* Call scheduler */
  564. nop; /* delay slot */
  565. bri 1b
  566. /* Maybe handle a signal */
  567. 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  568. beqi r11, 4f; /* Signals to handle, handle them */
  569. /*
  570. * Handle a signal return; Pending signals should be in r18.
  571. *
  572. * Not all registers are saved by the normal trap/interrupt entry
  573. * points (for instance, call-saved registers (because the normal
  574. * C-compiler calling sequence in the kernel makes sure they're
  575. * preserved), and call-clobbered registers in the case of
  576. * traps), but signal handlers may want to examine or change the
  577. * complete register state. Here we save anything not saved by
  578. * the normal entry sequence, so that it may be safely restored
  579. * (in a possibly modified form) after do_notify_resume returns. */
  580. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  581. bralid r15, do_notify_resume; /* Handle any signals */
  582. addi r6, r0, 0; /* Arg 2: int in_syscall */
  583. bri 1b
  584. /* Finally, return to user state. */
  585. 4: set_bip; /* Ints masked for state restore */
  586. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  587. VM_OFF;
  588. tophys(r1,r1);
  589. RESTORE_REGS_RTBD;
  590. addik r1, r1, PT_SIZE /* Clean up stack space. */
  591. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  592. bri 6f;
  593. /* Return to kernel state. */
  594. 2: set_bip; /* Ints masked for state restore */
  595. VM_OFF;
  596. tophys(r1,r1);
  597. RESTORE_REGS_RTBD;
  598. addik r1, r1, PT_SIZE /* Clean up stack space. */
  599. tovirt(r1,r1);
  600. 6:
  601. EXC_return: /* Make global symbol for debugging */
  602. rtbd r14, 0; /* Instructions to return from an IRQ */
  603. nop;
  604. /*
  605. * HW EXCEPTION rutine end
  606. */
  607. /*
  608. * Hardware maskable interrupts.
  609. *
  610. * The stack-pointer (r1) should have already been saved to the memory
  611. * location PER_CPU(ENTRY_SP).
  612. */
  613. C_ENTRY(_interrupt):
  614. /* MS: we are in physical address */
  615. /* Save registers, switch to proper stack, convert SP to virtual.*/
  616. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  617. /* MS: See if already in kernel mode. */
  618. mfs r1, rmsr
  619. nop
  620. andi r1, r1, MSR_UMS
  621. bnei r1, 1f
  622. /* Kernel-mode state save. */
  623. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  624. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  625. /* save registers */
  626. /* MS: Make room on the stack -> activation record */
  627. addik r1, r1, -PT_SIZE;
  628. SAVE_REGS
  629. brid 2f;
  630. swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
  631. 1:
  632. /* User-mode state save. */
  633. /* MS: get the saved current */
  634. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  635. tophys(r1,r1);
  636. lwi r1, r1, TS_THREAD_INFO;
  637. addik r1, r1, THREAD_SIZE;
  638. tophys(r1,r1);
  639. /* save registers */
  640. addik r1, r1, -PT_SIZE;
  641. SAVE_REGS
  642. /* calculate mode */
  643. swi r0, r1, PT_MODE;
  644. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  645. swi r11, r1, PT_R1;
  646. clear_ums;
  647. 2:
  648. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  649. tovirt(r1,r1)
  650. addik r15, r0, irq_call;
  651. irq_call:rtbd r0, do_IRQ;
  652. addik r5, r1, 0;
  653. /* MS: we are in virtual mode */
  654. ret_from_irq:
  655. lwi r11, r1, PT_MODE;
  656. bnei r11, 2f;
  657. 1:
  658. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  659. lwi r19, r11, TI_FLAGS; /* MS: get flags from thread info */
  660. andi r11, r19, _TIF_NEED_RESCHED;
  661. beqi r11, 5f
  662. bralid r15, schedule;
  663. nop; /* delay slot */
  664. bri 1b
  665. /* Maybe handle a signal */
  666. 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  667. beqid r11, no_intr_resched
  668. /* Handle a signal return; Pending signals should be in r18. */
  669. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  670. bralid r15, do_notify_resume; /* Handle any signals */
  671. addi r6, r0, 0; /* Arg 2: int in_syscall */
  672. bri 1b
  673. /* Finally, return to user state. */
  674. no_intr_resched:
  675. /* Disable interrupts, we are now committed to the state restore */
  676. disable_irq
  677. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  678. VM_OFF;
  679. tophys(r1,r1);
  680. RESTORE_REGS
  681. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  682. lwi r1, r1, PT_R1 - PT_SIZE;
  683. bri 6f;
  684. /* MS: Return to kernel state. */
  685. 2:
  686. #ifdef CONFIG_PREEMPTION
  687. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  688. /* MS: get preempt_count from thread info */
  689. lwi r5, r11, TI_PREEMPT_COUNT;
  690. bgti r5, restore;
  691. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  692. andi r5, r5, _TIF_NEED_RESCHED;
  693. beqi r5, restore /* if zero jump over */
  694. /* interrupts are off that's why I am calling preempt_chedule_irq */
  695. bralid r15, preempt_schedule_irq
  696. nop
  697. restore:
  698. #endif
  699. VM_OFF /* MS: turn off MMU */
  700. tophys(r1,r1)
  701. RESTORE_REGS
  702. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  703. tovirt(r1,r1);
  704. 6:
  705. IRQ_return: /* MS: Make global symbol for debugging */
  706. rtid r14, 0
  707. nop
  708. #ifdef CONFIG_MB_MANAGER
  709. #define PT_PID PT_SIZE
  710. #define PT_TLBI PT_SIZE + 4
  711. #define PT_ZPR PT_SIZE + 8
  712. #define PT_TLBL0 PT_SIZE + 12
  713. #define PT_TLBH0 PT_SIZE + 16
  714. C_ENTRY(_xtmr_manager_reset):
  715. lwi r1, r0, xmb_manager_stackpointer
  716. /* Restore MSR */
  717. lwi r2, r1, PT_MSR
  718. mts rmsr, r2
  719. bri 4
  720. /* restore Special purpose registers */
  721. lwi r2, r1, PT_PID
  722. mts rpid, r2
  723. lwi r2, r1, PT_TLBI
  724. mts rtlbx, r2
  725. lwi r2, r1, PT_ZPR
  726. mts rzpr, r2
  727. #if CONFIG_XILINX_MICROBLAZE0_USE_FPU
  728. lwi r2, r1, PT_FSR
  729. mts rfsr, r2
  730. #endif
  731. /* restore all the tlb's */
  732. addik r3, r0, TOPHYS(tlb_skip)
  733. addik r6, r0, PT_TLBL0
  734. addik r7, r0, PT_TLBH0
  735. restore_tlb:
  736. add r6, r6, r1
  737. add r7, r7, r1
  738. lwi r2, r6, 0
  739. mts rtlblo, r2
  740. lwi r2, r7, 0
  741. mts rtlbhi, r2
  742. addik r6, r6, 4
  743. addik r7, r7, 4
  744. bgtid r3, restore_tlb
  745. addik r3, r3, -1
  746. lwi r5, r0, TOPHYS(xmb_manager_dev)
  747. lwi r8, r0, TOPHYS(xmb_manager_reset_callback)
  748. set_vms
  749. /* return from reset need -8 to adjust for rtsd r15, 8 */
  750. addik r15, r0, ret_from_reset - 8
  751. rtbd r8, 0
  752. nop
  753. ret_from_reset:
  754. set_bip /* Ints masked for state restore */
  755. VM_OFF
  756. /* MS: Restore all regs */
  757. RESTORE_REGS
  758. lwi r14, r1, PT_R14
  759. lwi r16, r1, PT_PC
  760. addik r1, r1, PT_SIZE + 36
  761. rtbd r16, 0
  762. nop
  763. /*
  764. * Break handler for MB Manager. Enter to _xmb_manager_break by
  765. * injecting fault in one of the TMR Microblaze core.
  766. * FIXME: This break handler supports getting
  767. * called from kernel space only.
  768. */
  769. C_ENTRY(_xmb_manager_break):
  770. /*
  771. * Reserve memory in the stack for context store/restore
  772. * (which includes memory for storing tlbs (max two tlbs))
  773. */
  774. addik r1, r1, -PT_SIZE - 36
  775. swi r1, r0, xmb_manager_stackpointer
  776. SAVE_REGS
  777. swi r14, r1, PT_R14 /* rewrite saved R14 value */
  778. swi r16, r1, PT_PC; /* PC and r16 are the same */
  779. lwi r6, r0, TOPHYS(xmb_manager_baseaddr)
  780. lwi r7, r0, TOPHYS(xmb_manager_crval)
  781. /*
  782. * When the break vector gets asserted because of error injection,
  783. * the break signal must be blocked before exiting from the
  784. * break handler, below code configures the tmr manager
  785. * control register to block break signal.
  786. */
  787. swi r7, r6, 0
  788. /* Save the special purpose registers */
  789. mfs r2, rpid
  790. swi r2, r1, PT_PID
  791. mfs r2, rtlbx
  792. swi r2, r1, PT_TLBI
  793. mfs r2, rzpr
  794. swi r2, r1, PT_ZPR
  795. #if CONFIG_XILINX_MICROBLAZE0_USE_FPU
  796. mfs r2, rfsr
  797. swi r2, r1, PT_FSR
  798. #endif
  799. mfs r2, rmsr
  800. swi r2, r1, PT_MSR
  801. /* Save all the tlb's */
  802. addik r3, r0, TOPHYS(tlb_skip)
  803. addik r6, r0, PT_TLBL0
  804. addik r7, r0, PT_TLBH0
  805. save_tlb:
  806. add r6, r6, r1
  807. add r7, r7, r1
  808. mfs r2, rtlblo
  809. swi r2, r6, 0
  810. mfs r2, rtlbhi
  811. swi r2, r7, 0
  812. addik r6, r6, 4
  813. addik r7, r7, 4
  814. bgtid r3, save_tlb
  815. addik r3, r3, -1
  816. lwi r5, r0, TOPHYS(xmb_manager_dev)
  817. lwi r8, r0, TOPHYS(xmb_manager_callback)
  818. /* return from break need -8 to adjust for rtsd r15, 8 */
  819. addik r15, r0, ret_from_break - 8
  820. rtbd r8, 0
  821. nop
  822. ret_from_break:
  823. /* flush the d-cache */
  824. bralid r15, mb_flush_dcache
  825. nop
  826. /*
  827. * To make sure microblaze i-cache is in a proper state
  828. * invalidate the i-cache.
  829. */
  830. bralid r15, mb_invalidate_icache
  831. nop
  832. set_bip; /* Ints masked for state restore */
  833. VM_OFF;
  834. mbar 1
  835. mbar 2
  836. bri 4
  837. suspend
  838. nop
  839. #endif
  840. /*
  841. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  842. * and call handling function with saved pt_regs
  843. */
  844. C_ENTRY(_debug_exception):
  845. /* BIP bit is set on entry, no interrupts can occur */
  846. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  847. mfs r1, rmsr
  848. nop
  849. andi r1, r1, MSR_UMS
  850. bnei r1, 1f
  851. /* MS: Kernel-mode state save - kgdb */
  852. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  853. /* BIP bit is set on entry, no interrupts can occur */
  854. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
  855. SAVE_REGS;
  856. /* save all regs to pt_reg structure */
  857. swi r0, r1, PT_R0; /* R0 must be saved too */
  858. swi r14, r1, PT_R14 /* rewrite saved R14 value */
  859. swi r16, r1, PT_PC; /* PC and r16 are the same */
  860. /* save special purpose registers to pt_regs */
  861. mfs r11, rear;
  862. swi r11, r1, PT_EAR;
  863. mfs r11, resr;
  864. swi r11, r1, PT_ESR;
  865. mfs r11, rfsr;
  866. swi r11, r1, PT_FSR;
  867. /* stack pointer is in physical address at it is decrease
  868. * by PT_SIZE but we need to get correct R1 value */
  869. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
  870. swi r11, r1, PT_R1
  871. /* MS: r31 - current pointer isn't changed */
  872. tovirt(r1,r1)
  873. #ifdef CONFIG_KGDB
  874. addi r5, r1, 0 /* pass pt_reg address as the first arg */
  875. addik r15, r0, dbtrap_call; /* return address */
  876. rtbd r0, microblaze_kgdb_break
  877. nop;
  878. #endif
  879. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  880. * It is very unlikely that another brki instruction is called. */
  881. bri 0
  882. /* MS: User-mode state save - gdb */
  883. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  884. tophys(r1,r1);
  885. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  886. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  887. tophys(r1,r1);
  888. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  889. SAVE_REGS;
  890. swi r16, r1, PT_PC; /* Save LP */
  891. swi r0, r1, PT_MODE; /* Was in user-mode. */
  892. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  893. swi r11, r1, PT_R1; /* Store user SP. */
  894. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  895. tovirt(r1,r1)
  896. set_vms;
  897. addik r5, r1, 0;
  898. addik r15, r0, dbtrap_call;
  899. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  900. rtbd r0, sw_exception
  901. nop
  902. /* MS: The first instruction for the second part of the gdb/kgdb */
  903. set_bip; /* Ints masked for state restore */
  904. lwi r11, r1, PT_MODE;
  905. bnei r11, 2f;
  906. /* MS: Return to user space - gdb */
  907. 1:
  908. /* Get current task ptr into r11 */
  909. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  910. lwi r19, r11, TI_FLAGS; /* get flags in thread info */
  911. andi r11, r19, _TIF_NEED_RESCHED;
  912. beqi r11, 5f;
  913. /* Call the scheduler before returning from a syscall/trap. */
  914. bralid r15, schedule; /* Call scheduler */
  915. nop; /* delay slot */
  916. bri 1b
  917. /* Maybe handle a signal */
  918. 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  919. beqi r11, 4f; /* Signals to handle, handle them */
  920. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  921. bralid r15, do_notify_resume; /* Handle any signals */
  922. addi r6, r0, 0; /* Arg 2: int in_syscall */
  923. bri 1b
  924. /* Finally, return to user state. */
  925. 4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  926. VM_OFF;
  927. tophys(r1,r1);
  928. /* MS: Restore all regs */
  929. RESTORE_REGS_RTBD
  930. addik r1, r1, PT_SIZE /* Clean up stack space */
  931. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  932. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  933. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  934. nop;
  935. /* MS: Return to kernel state - kgdb */
  936. 2: VM_OFF;
  937. tophys(r1,r1);
  938. /* MS: Restore all regs */
  939. RESTORE_REGS_RTBD
  940. lwi r14, r1, PT_R14;
  941. lwi r16, r1, PT_PC;
  942. addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
  943. tovirt(r1,r1);
  944. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  945. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  946. nop;
  947. ENTRY(_switch_to)
  948. /* prepare return value */
  949. addk r3, r0, CURRENT_TASK
  950. /* save registers in cpu_context */
  951. /* use r11 and r12, volatile registers, as temp register */
  952. /* give start of cpu_context for previous process */
  953. addik r11, r5, TI_CPU_CONTEXT
  954. swi r1, r11, CC_R1
  955. swi r2, r11, CC_R2
  956. /* skip volatile registers.
  957. * they are saved on stack when we jumped to _switch_to() */
  958. /* dedicated registers */
  959. swi r13, r11, CC_R13
  960. swi r14, r11, CC_R14
  961. swi r15, r11, CC_R15
  962. swi r16, r11, CC_R16
  963. swi r17, r11, CC_R17
  964. swi r18, r11, CC_R18
  965. /* save non-volatile registers */
  966. swi r19, r11, CC_R19
  967. swi r20, r11, CC_R20
  968. swi r21, r11, CC_R21
  969. swi r22, r11, CC_R22
  970. swi r23, r11, CC_R23
  971. swi r24, r11, CC_R24
  972. swi r25, r11, CC_R25
  973. swi r26, r11, CC_R26
  974. swi r27, r11, CC_R27
  975. swi r28, r11, CC_R28
  976. swi r29, r11, CC_R29
  977. swi r30, r11, CC_R30
  978. /* special purpose registers */
  979. mfs r12, rmsr
  980. swi r12, r11, CC_MSR
  981. mfs r12, rear
  982. swi r12, r11, CC_EAR
  983. mfs r12, resr
  984. swi r12, r11, CC_ESR
  985. mfs r12, rfsr
  986. swi r12, r11, CC_FSR
  987. /* update r31, the current-give me pointer to task which will be next */
  988. lwi CURRENT_TASK, r6, TI_TASK
  989. /* stored it to current_save too */
  990. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  991. /* get new process' cpu context and restore */
  992. /* give me start where start context of next task */
  993. addik r11, r6, TI_CPU_CONTEXT
  994. /* non-volatile registers */
  995. lwi r30, r11, CC_R30
  996. lwi r29, r11, CC_R29
  997. lwi r28, r11, CC_R28
  998. lwi r27, r11, CC_R27
  999. lwi r26, r11, CC_R26
  1000. lwi r25, r11, CC_R25
  1001. lwi r24, r11, CC_R24
  1002. lwi r23, r11, CC_R23
  1003. lwi r22, r11, CC_R22
  1004. lwi r21, r11, CC_R21
  1005. lwi r20, r11, CC_R20
  1006. lwi r19, r11, CC_R19
  1007. /* dedicated registers */
  1008. lwi r18, r11, CC_R18
  1009. lwi r17, r11, CC_R17
  1010. lwi r16, r11, CC_R16
  1011. lwi r15, r11, CC_R15
  1012. lwi r14, r11, CC_R14
  1013. lwi r13, r11, CC_R13
  1014. /* skip volatile registers */
  1015. lwi r2, r11, CC_R2
  1016. lwi r1, r11, CC_R1
  1017. /* special purpose registers */
  1018. lwi r12, r11, CC_FSR
  1019. mts rfsr, r12
  1020. lwi r12, r11, CC_MSR
  1021. mts rmsr, r12
  1022. rtsd r15, 8
  1023. nop
  1024. #ifdef CONFIG_MB_MANAGER
  1025. .global xmb_inject_err
  1026. .section .text
  1027. .align 2
  1028. .ent xmb_inject_err
  1029. .type xmb_inject_err, @function
  1030. xmb_inject_err:
  1031. addik r1, r1, -PT_SIZE
  1032. SAVE_REGS
  1033. /* Switch to real mode */
  1034. VM_OFF;
  1035. set_bip;
  1036. mbar 1
  1037. mbar 2
  1038. bralid r15, XMB_INJECT_ERR_OFFSET
  1039. nop;
  1040. /* enable virtual mode */
  1041. set_vms;
  1042. /* barrier for instructions and data accesses */
  1043. mbar 1
  1044. mbar 2
  1045. /*
  1046. * Enable Interrupts, Virtual Protected Mode, equalize
  1047. * initial state for all possible entries.
  1048. */
  1049. rtbd r0, 1f
  1050. nop;
  1051. 1:
  1052. RESTORE_REGS
  1053. addik r1, r1, PT_SIZE
  1054. rtsd r15, 8;
  1055. nop;
  1056. .end xmb_inject_err
  1057. .section .data
  1058. .global xmb_manager_dev
  1059. .global xmb_manager_baseaddr
  1060. .global xmb_manager_crval
  1061. .global xmb_manager_callback
  1062. .global xmb_manager_reset_callback
  1063. .global xmb_manager_stackpointer
  1064. .align 4
  1065. xmb_manager_dev:
  1066. .long 0
  1067. xmb_manager_baseaddr:
  1068. .long 0
  1069. xmb_manager_crval:
  1070. .long 0
  1071. xmb_manager_callback:
  1072. .long 0
  1073. xmb_manager_reset_callback:
  1074. .long 0
  1075. xmb_manager_stackpointer:
  1076. .long 0
  1077. /*
  1078. * When the break vector gets asserted because of error injection,
  1079. * the break signal must be blocked before exiting from the
  1080. * break handler, Below api updates the manager address and
  1081. * control register and error count callback arguments,
  1082. * which will be used by the break handler to block the
  1083. * break and call the callback function.
  1084. */
  1085. .global xmb_manager_register
  1086. .section .text
  1087. .align 2
  1088. .ent xmb_manager_register
  1089. .type xmb_manager_register, @function
  1090. xmb_manager_register:
  1091. swi r5, r0, xmb_manager_baseaddr
  1092. swi r6, r0, xmb_manager_crval
  1093. swi r7, r0, xmb_manager_callback
  1094. swi r8, r0, xmb_manager_dev
  1095. swi r9, r0, xmb_manager_reset_callback
  1096. rtsd r15, 8;
  1097. nop;
  1098. .end xmb_manager_register
  1099. #endif
  1100. ENTRY(_reset)
  1101. VM_OFF
  1102. brai 0; /* Jump to reset vector */
  1103. /* These are compiled and loaded into high memory, then
  1104. * copied into place in mach_early_setup */
  1105. .section .init.ivt, "ax"
  1106. #if CONFIG_MANUAL_RESET_VECTOR && !defined(CONFIG_MB_MANAGER)
  1107. .org 0x0
  1108. brai CONFIG_MANUAL_RESET_VECTOR
  1109. #elif defined(CONFIG_MB_MANAGER)
  1110. .org 0x0
  1111. brai TOPHYS(_xtmr_manager_reset);
  1112. #endif
  1113. .org 0x8
  1114. brai TOPHYS(_user_exception); /* syscall handler */
  1115. .org 0x10
  1116. brai TOPHYS(_interrupt); /* Interrupt handler */
  1117. #ifdef CONFIG_MB_MANAGER
  1118. .org 0x18
  1119. brai TOPHYS(_xmb_manager_break); /* microblaze manager break handler */
  1120. #else
  1121. .org 0x18
  1122. brai TOPHYS(_debug_exception); /* debug trap handler */
  1123. #endif
  1124. .org 0x20
  1125. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  1126. #ifdef CONFIG_MB_MANAGER
  1127. /*
  1128. * For TMR Inject API which injects the error should
  1129. * be executed from LMB.
  1130. * TMR Inject is programmed with address of 0x200 so that
  1131. * when program counter matches with this address error will
  1132. * be injected. 0x200 is expected to be next available bram
  1133. * offset, hence used for this api.
  1134. */
  1135. .org XMB_INJECT_ERR_OFFSET
  1136. xmb_inject_error:
  1137. nop
  1138. rtsd r15, 8
  1139. nop
  1140. #endif
  1141. .section .rodata,"a"
  1142. #include "syscall_table.S"
  1143. syscall_table_size=(.-sys_call_table)
  1144. type_SYSCALL:
  1145. .ascii "SYSCALL\0"
  1146. type_IRQ:
  1147. .ascii "IRQ\0"
  1148. type_IRQ_PREEMPT:
  1149. .ascii "IRQ (PREEMPTED)\0"
  1150. type_SYSCALL_PREEMPT:
  1151. .ascii " SYSCALL (PREEMPTED)\0"
  1152. /*
  1153. * Trap decoding for stack unwinder
  1154. * Tuples are (start addr, end addr, string)
  1155. * If return address lies on [start addr, end addr],
  1156. * unwinder displays 'string'
  1157. */
  1158. .align 4
  1159. .global microblaze_trap_handlers
  1160. microblaze_trap_handlers:
  1161. /* Exact matches come first */
  1162. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  1163. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  1164. /* Fuzzy matches go here */
  1165. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  1166. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  1167. /* End of table */
  1168. .word 0 ; .word 0 ; .word 0