genex.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2002, 2007 Maciej W. Rozycki
  9. * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
  10. */
  11. #include <linux/init.h>
  12. #include <asm/asm.h>
  13. #include <asm/asmmacro.h>
  14. #include <asm/cacheops.h>
  15. #include <asm/irqflags.h>
  16. #include <asm/regdef.h>
  17. #include <asm/fpregdef.h>
  18. #include <asm/mipsregs.h>
  19. #include <asm/stackframe.h>
  20. #include <asm/sync.h>
  21. #include <asm/thread_info.h>
  22. __INIT
  23. /*
  24. * General exception vector for all other CPUs.
  25. *
  26. * Be careful when changing this, it has to be at most 128 bytes
  27. * to fit into space reserved for the exception handler.
  28. */
  29. NESTED(except_vec3_generic, 0, sp)
  30. .set push
  31. .set noat
  32. mfc0 k1, CP0_CAUSE
  33. andi k1, k1, 0x7c
  34. #ifdef CONFIG_64BIT
  35. dsll k1, k1, 1
  36. #endif
  37. PTR_L k0, exception_handlers(k1)
  38. jr k0
  39. .set pop
  40. END(except_vec3_generic)
  41. /*
  42. * General exception handler for CPUs with virtual coherency exception.
  43. *
  44. * Be careful when changing this, it has to be at most 256 (as a special
  45. * exception) bytes to fit into space reserved for the exception handler.
  46. */
  47. NESTED(except_vec3_r4000, 0, sp)
  48. .set push
  49. .set arch=r4000
  50. .set noat
  51. mfc0 k1, CP0_CAUSE
  52. li k0, 31<<2
  53. andi k1, k1, 0x7c
  54. .set push
  55. .set noreorder
  56. .set nomacro
  57. beq k1, k0, handle_vced
  58. li k0, 14<<2
  59. beq k1, k0, handle_vcei
  60. #ifdef CONFIG_64BIT
  61. dsll k1, k1, 1
  62. #endif
  63. .set pop
  64. PTR_L k0, exception_handlers(k1)
  65. jr k0
  66. /*
  67. * Big shit, we now may have two dirty primary cache lines for the same
  68. * physical address. We can safely invalidate the line pointed to by
  69. * c0_badvaddr because after return from this exception handler the
  70. * load / store will be re-executed.
  71. */
  72. handle_vced:
  73. MFC0 k0, CP0_BADVADDR
  74. li k1, -4 # Is this ...
  75. and k0, k1 # ... really needed?
  76. mtc0 zero, CP0_TAGLO
  77. cache Index_Store_Tag_D, (k0)
  78. cache Hit_Writeback_Inv_SD, (k0)
  79. #ifdef CONFIG_PROC_FS
  80. PTR_LA k0, vced_count
  81. lw k1, (k0)
  82. addiu k1, 1
  83. sw k1, (k0)
  84. #endif
  85. eret
  86. handle_vcei:
  87. MFC0 k0, CP0_BADVADDR
  88. cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
  89. #ifdef CONFIG_PROC_FS
  90. PTR_LA k0, vcei_count
  91. lw k1, (k0)
  92. addiu k1, 1
  93. sw k1, (k0)
  94. #endif
  95. eret
  96. .set pop
  97. END(except_vec3_r4000)
  98. __FINIT
  99. .align 5 /* 32 byte rollback region */
  100. LEAF(__r4k_wait)
  101. .set push
  102. .set noreorder
  103. /* start of rollback region */
  104. LONG_L t0, TI_FLAGS($28)
  105. nop
  106. andi t0, _TIF_NEED_RESCHED
  107. bnez t0, 1f
  108. nop
  109. nop
  110. nop
  111. #ifdef CONFIG_CPU_MICROMIPS
  112. nop
  113. nop
  114. nop
  115. nop
  116. #endif
  117. .set MIPS_ISA_ARCH_LEVEL_RAW
  118. wait
  119. /* end of rollback region (the region size must be power of two) */
  120. 1:
  121. jr ra
  122. nop
  123. .set pop
  124. END(__r4k_wait)
  125. .macro BUILD_ROLLBACK_PROLOGUE handler
  126. FEXPORT(rollback_\handler)
  127. .set push
  128. .set noat
  129. MFC0 k0, CP0_EPC
  130. PTR_LA k1, __r4k_wait
  131. ori k0, 0x1f /* 32 byte rollback region */
  132. xori k0, 0x1f
  133. bne k0, k1, \handler
  134. MTC0 k0, CP0_EPC
  135. .set pop
  136. .endm
  137. .align 5
  138. BUILD_ROLLBACK_PROLOGUE handle_int
  139. NESTED(handle_int, PT_SIZE, sp)
  140. .cfi_signal_frame
  141. #ifdef CONFIG_TRACE_IRQFLAGS
  142. /*
  143. * Check to see if the interrupted code has just disabled
  144. * interrupts and ignore this interrupt for now if so.
  145. *
  146. * local_irq_disable() disables interrupts and then calls
  147. * trace_hardirqs_off() to track the state. If an interrupt is taken
  148. * after interrupts are disabled but before the state is updated
  149. * it will appear to restore_all that it is incorrectly returning with
  150. * interrupts disabled
  151. */
  152. .set push
  153. .set noat
  154. mfc0 k0, CP0_STATUS
  155. #if defined(CONFIG_CPU_R3000)
  156. and k0, ST0_IEP
  157. bnez k0, 1f
  158. mfc0 k0, CP0_EPC
  159. .set noreorder
  160. j k0
  161. rfe
  162. #else
  163. and k0, ST0_IE
  164. bnez k0, 1f
  165. eret
  166. #endif
  167. 1:
  168. .set pop
  169. #endif
  170. SAVE_ALL docfi=1
  171. CLI
  172. TRACE_IRQS_OFF
  173. LONG_L s0, TI_REGS($28)
  174. LONG_S sp, TI_REGS($28)
  175. /*
  176. * SAVE_ALL ensures we are using a valid kernel stack for the thread.
  177. * Check if we are already using the IRQ stack.
  178. */
  179. move s1, sp # Preserve the sp
  180. /* Get IRQ stack for this CPU */
  181. ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  182. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  183. lui k1, %hi(irq_stack)
  184. #else
  185. lui k1, %highest(irq_stack)
  186. daddiu k1, %higher(irq_stack)
  187. dsll k1, 16
  188. daddiu k1, %hi(irq_stack)
  189. dsll k1, 16
  190. #endif
  191. LONG_SRL k0, SMP_CPUID_PTRSHIFT
  192. LONG_ADDU k1, k0
  193. LONG_L t0, %lo(irq_stack)(k1)
  194. # Check if already on IRQ stack
  195. PTR_LI t1, ~(_THREAD_SIZE-1)
  196. and t1, t1, sp
  197. beq t0, t1, 2f
  198. /* Switch to IRQ stack */
  199. li t1, _IRQ_STACK_START
  200. PTR_ADD sp, t0, t1
  201. /* Save task's sp on IRQ stack so that unwinding can follow it */
  202. LONG_S s1, 0(sp)
  203. 2:
  204. jal plat_irq_dispatch
  205. /* Restore sp */
  206. move sp, s1
  207. j ret_from_irq
  208. #ifdef CONFIG_CPU_MICROMIPS
  209. nop
  210. #endif
  211. END(handle_int)
  212. __INIT
  213. /*
  214. * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
  215. * This is a dedicated interrupt exception vector which reduces the
  216. * interrupt processing overhead. The jump instruction will be replaced
  217. * at the initialization time.
  218. *
  219. * Be careful when changing this, it has to be at most 128 bytes
  220. * to fit into space reserved for the exception handler.
  221. */
  222. NESTED(except_vec4, 0, sp)
  223. 1: j 1b /* Dummy, will be replaced */
  224. END(except_vec4)
  225. /*
  226. * EJTAG debug exception handler.
  227. * The EJTAG debug exception entry point is 0xbfc00480, which
  228. * normally is in the boot PROM, so the boot PROM must do an
  229. * unconditional jump to this vector.
  230. */
  231. NESTED(except_vec_ejtag_debug, 0, sp)
  232. j ejtag_debug_handler
  233. #ifdef CONFIG_CPU_MICROMIPS
  234. nop
  235. #endif
  236. END(except_vec_ejtag_debug)
  237. __FINIT
  238. /*
  239. * Vectored interrupt handler.
  240. * This prototype is copied to ebase + n*IntCtl.VS and patched
  241. * to invoke the handler
  242. */
  243. BUILD_ROLLBACK_PROLOGUE except_vec_vi
  244. NESTED(except_vec_vi, 0, sp)
  245. SAVE_SOME docfi=1
  246. SAVE_AT docfi=1
  247. .set push
  248. .set noreorder
  249. PTR_LA v1, except_vec_vi_handler
  250. FEXPORT(except_vec_vi_lui)
  251. lui v0, 0 /* Patched */
  252. jr v1
  253. FEXPORT(except_vec_vi_ori)
  254. ori v0, 0 /* Patched */
  255. .set pop
  256. END(except_vec_vi)
  257. EXPORT(except_vec_vi_end)
  258. /*
  259. * Common Vectored Interrupt code
  260. * Complete the register saves and invoke the handler which is passed in $v0
  261. */
  262. NESTED(except_vec_vi_handler, 0, sp)
  263. SAVE_TEMP
  264. SAVE_STATIC
  265. CLI
  266. #ifdef CONFIG_TRACE_IRQFLAGS
  267. move s0, v0
  268. TRACE_IRQS_OFF
  269. move v0, s0
  270. #endif
  271. LONG_L s0, TI_REGS($28)
  272. LONG_S sp, TI_REGS($28)
  273. /*
  274. * SAVE_ALL ensures we are using a valid kernel stack for the thread.
  275. * Check if we are already using the IRQ stack.
  276. */
  277. move s1, sp # Preserve the sp
  278. /* Get IRQ stack for this CPU */
  279. ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  280. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  281. lui k1, %hi(irq_stack)
  282. #else
  283. lui k1, %highest(irq_stack)
  284. daddiu k1, %higher(irq_stack)
  285. dsll k1, 16
  286. daddiu k1, %hi(irq_stack)
  287. dsll k1, 16
  288. #endif
  289. LONG_SRL k0, SMP_CPUID_PTRSHIFT
  290. LONG_ADDU k1, k0
  291. LONG_L t0, %lo(irq_stack)(k1)
  292. # Check if already on IRQ stack
  293. PTR_LI t1, ~(_THREAD_SIZE-1)
  294. and t1, t1, sp
  295. beq t0, t1, 2f
  296. /* Switch to IRQ stack */
  297. li t1, _IRQ_STACK_START
  298. PTR_ADD sp, t0, t1
  299. /* Save task's sp on IRQ stack so that unwinding can follow it */
  300. LONG_S s1, 0(sp)
  301. 2:
  302. jalr v0
  303. /* Restore sp */
  304. move sp, s1
  305. j ret_from_irq
  306. END(except_vec_vi_handler)
  307. /*
  308. * EJTAG debug exception handler.
  309. */
  310. NESTED(ejtag_debug_handler, PT_SIZE, sp)
  311. .set push
  312. .set noat
  313. MTC0 k0, CP0_DESAVE
  314. mfc0 k0, CP0_DEBUG
  315. andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP.
  316. beqz k0, ejtag_return
  317. #ifdef CONFIG_SMP
  318. 1: PTR_LA k0, ejtag_debug_buffer_spinlock
  319. __SYNC(full, loongson3_war)
  320. 2: ll k0, 0(k0)
  321. bnez k0, 2b
  322. PTR_LA k0, ejtag_debug_buffer_spinlock
  323. sc k0, 0(k0)
  324. beqz k0, 1b
  325. # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
  326. sync
  327. # endif
  328. PTR_LA k0, ejtag_debug_buffer
  329. LONG_S k1, 0(k0)
  330. ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
  331. PTR_SRL k1, SMP_CPUID_PTRSHIFT
  332. PTR_SLL k1, LONGLOG
  333. PTR_LA k0, ejtag_debug_buffer_per_cpu
  334. PTR_ADDU k0, k1
  335. PTR_LA k1, ejtag_debug_buffer
  336. LONG_L k1, 0(k1)
  337. LONG_S k1, 0(k0)
  338. PTR_LA k0, ejtag_debug_buffer_spinlock
  339. sw zero, 0(k0)
  340. #else
  341. PTR_LA k0, ejtag_debug_buffer
  342. LONG_S k1, 0(k0)
  343. #endif
  344. SAVE_ALL
  345. move a0, sp
  346. jal ejtag_exception_handler
  347. RESTORE_ALL
  348. #ifdef CONFIG_SMP
  349. ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
  350. PTR_SRL k1, SMP_CPUID_PTRSHIFT
  351. PTR_SLL k1, LONGLOG
  352. PTR_LA k0, ejtag_debug_buffer_per_cpu
  353. PTR_ADDU k0, k1
  354. LONG_L k1, 0(k0)
  355. #else
  356. PTR_LA k0, ejtag_debug_buffer
  357. LONG_L k1, 0(k0)
  358. #endif
  359. ejtag_return:
  360. back_to_back_c0_hazard
  361. MFC0 k0, CP0_DESAVE
  362. .set mips32
  363. deret
  364. .set pop
  365. END(ejtag_debug_handler)
  366. /*
  367. * This buffer is reserved for the use of the EJTAG debug
  368. * handler.
  369. */
  370. .data
  371. EXPORT(ejtag_debug_buffer)
  372. .fill LONGSIZE
  373. #ifdef CONFIG_SMP
  374. EXPORT(ejtag_debug_buffer_spinlock)
  375. .fill LONGSIZE
  376. EXPORT(ejtag_debug_buffer_per_cpu)
  377. .fill LONGSIZE * NR_CPUS
  378. #endif
  379. .previous
  380. __INIT
  381. /*
  382. * NMI debug exception handler for MIPS reference boards.
  383. * The NMI debug exception entry point is 0xbfc00000, which
  384. * normally is in the boot PROM, so the boot PROM must do a
  385. * unconditional jump to this vector.
  386. */
  387. NESTED(except_vec_nmi, 0, sp)
  388. j nmi_handler
  389. #ifdef CONFIG_CPU_MICROMIPS
  390. nop
  391. #endif
  392. END(except_vec_nmi)
  393. __FINIT
  394. NESTED(nmi_handler, PT_SIZE, sp)
  395. .cfi_signal_frame
  396. .set push
  397. .set noat
  398. /*
  399. * Clear ERL - restore segment mapping
  400. * Clear BEV - required for page fault exception handler to work
  401. */
  402. mfc0 k0, CP0_STATUS
  403. ori k0, k0, ST0_EXL
  404. li k1, ~(ST0_BEV | ST0_ERL)
  405. and k0, k0, k1
  406. mtc0 k0, CP0_STATUS
  407. _ehb
  408. SAVE_ALL
  409. move a0, sp
  410. jal nmi_exception_handler
  411. /* nmi_exception_handler never returns */
  412. .set pop
  413. END(nmi_handler)
  414. .macro __build_clear_none
  415. .endm
  416. .macro __build_clear_sti
  417. TRACE_IRQS_ON
  418. STI
  419. .endm
  420. .macro __build_clear_cli
  421. CLI
  422. TRACE_IRQS_OFF
  423. .endm
  424. .macro __build_clear_fpe
  425. CLI
  426. TRACE_IRQS_OFF
  427. .set push
  428. /* gas fails to assemble cfc1 for some archs (octeon).*/ \
  429. .set mips1
  430. SET_HARDFLOAT
  431. cfc1 a1, fcr31
  432. .set pop
  433. .endm
  434. .macro __build_clear_msa_fpe
  435. CLI
  436. TRACE_IRQS_OFF
  437. _cfcmsa a1, MSA_CSR
  438. .endm
  439. .macro __build_clear_ade
  440. MFC0 t0, CP0_BADVADDR
  441. PTR_S t0, PT_BVADDR(sp)
  442. KMODE
  443. .endm
  444. .macro __build_clear_gsexc
  445. .set push
  446. /*
  447. * We need to specify a selector to access the CP0.Diag1 (GSCause)
  448. * register. All GSExc-equipped processors have MIPS32.
  449. */
  450. .set mips32
  451. mfc0 a1, CP0_DIAGNOSTIC1
  452. .set pop
  453. TRACE_IRQS_ON
  454. STI
  455. .endm
  456. .macro __BUILD_silent exception
  457. .endm
  458. /* Gas tries to parse the ASM_PRINT argument as a string containing
  459. string escapes and emits bogus warnings if it believes to
  460. recognize an unknown escape code. So make the arguments
  461. start with an n and gas will believe \n is ok ... */
  462. .macro __BUILD_verbose nexception
  463. LONG_L a1, PT_EPC(sp)
  464. #ifdef CONFIG_32BIT
  465. ASM_PRINT("Got \nexception at %08lx\012")
  466. #endif
  467. #ifdef CONFIG_64BIT
  468. ASM_PRINT("Got \nexception at %016lx\012")
  469. #endif
  470. .endm
  471. .macro __BUILD_count exception
  472. LONG_L t0,exception_count_\exception
  473. LONG_ADDIU t0, 1
  474. LONG_S t0,exception_count_\exception
  475. .comm exception_count\exception, 8, 8
  476. .endm
  477. .macro __BUILD_HANDLER exception handler clear verbose ext
  478. .align 5
  479. NESTED(handle_\exception, PT_SIZE, sp)
  480. .cfi_signal_frame
  481. .set noat
  482. SAVE_ALL
  483. FEXPORT(handle_\exception\ext)
  484. __build_clear_\clear
  485. .set at
  486. __BUILD_\verbose \exception
  487. move a0, sp
  488. jal do_\handler
  489. j ret_from_exception
  490. END(handle_\exception)
  491. .endm
  492. .macro BUILD_HANDLER exception handler clear verbose
  493. __BUILD_HANDLER \exception \handler \clear \verbose _int
  494. .endm
  495. BUILD_HANDLER adel ade ade silent /* #4 */
  496. BUILD_HANDLER ades ade ade silent /* #5 */
  497. BUILD_HANDLER ibe be cli silent /* #6 */
  498. BUILD_HANDLER dbe be cli silent /* #7 */
  499. BUILD_HANDLER bp bp sti silent /* #9 */
  500. BUILD_HANDLER ri ri sti silent /* #10 */
  501. BUILD_HANDLER cpu cpu sti silent /* #11 */
  502. BUILD_HANDLER ov ov sti silent /* #12 */
  503. BUILD_HANDLER tr tr sti silent /* #13 */
  504. BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
  505. #ifdef CONFIG_MIPS_FP_SUPPORT
  506. BUILD_HANDLER fpe fpe fpe silent /* #15 */
  507. #endif
  508. BUILD_HANDLER ftlb ftlb none silent /* #16 */
  509. BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */
  510. BUILD_HANDLER msa msa sti silent /* #21 */
  511. BUILD_HANDLER mdmx mdmx sti silent /* #22 */
  512. #ifdef CONFIG_HARDWARE_WATCHPOINTS
  513. /*
  514. * For watch, interrupts will be enabled after the watch
  515. * registers are read.
  516. */
  517. BUILD_HANDLER watch watch cli silent /* #23 */
  518. #else
  519. BUILD_HANDLER watch watch sti verbose /* #23 */
  520. #endif
  521. BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
  522. BUILD_HANDLER mt mt sti silent /* #25 */
  523. BUILD_HANDLER dsp dsp sti silent /* #26 */
  524. BUILD_HANDLER reserved reserved sti verbose /* others */
  525. .align 5
  526. LEAF(handle_ri_rdhwr_tlbp)
  527. .set push
  528. .set noat
  529. .set noreorder
  530. /* check if TLB contains a entry for EPC */
  531. MFC0 k1, CP0_ENTRYHI
  532. andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
  533. MFC0 k0, CP0_EPC
  534. PTR_SRL k0, _PAGE_SHIFT + 1
  535. PTR_SLL k0, _PAGE_SHIFT + 1
  536. or k1, k0
  537. MTC0 k1, CP0_ENTRYHI
  538. mtc0_tlbw_hazard
  539. tlbp
  540. tlb_probe_hazard
  541. mfc0 k1, CP0_INDEX
  542. .set pop
  543. bltz k1, handle_ri /* slow path */
  544. /* fall thru */
  545. END(handle_ri_rdhwr_tlbp)
  546. LEAF(handle_ri_rdhwr)
  547. .set push
  548. .set noat
  549. .set noreorder
  550. /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
  551. /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
  552. MFC0 k1, CP0_EPC
  553. #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
  554. and k0, k1, 1
  555. beqz k0, 1f
  556. xor k1, k0
  557. lhu k0, (k1)
  558. lhu k1, 2(k1)
  559. ins k1, k0, 16, 16
  560. lui k0, 0x007d
  561. b docheck
  562. ori k0, 0x6b3c
  563. 1:
  564. lui k0, 0x7c03
  565. lw k1, (k1)
  566. ori k0, 0xe83b
  567. #else
  568. andi k0, k1, 1
  569. bnez k0, handle_ri
  570. lui k0, 0x7c03
  571. lw k1, (k1)
  572. ori k0, 0xe83b
  573. #endif
  574. .set reorder
  575. docheck:
  576. bne k0, k1, handle_ri /* if not ours */
  577. isrdhwr:
  578. /* The insn is rdhwr. No need to check CAUSE.BD here. */
  579. get_saved_sp /* k1 := current_thread_info */
  580. .set noreorder
  581. MFC0 k0, CP0_EPC
  582. #if defined(CONFIG_CPU_R3000)
  583. ori k1, _THREAD_MASK
  584. xori k1, _THREAD_MASK
  585. LONG_L v1, TI_TP_VALUE(k1)
  586. LONG_ADDIU k0, 4
  587. jr k0
  588. rfe
  589. #else
  590. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  591. LONG_ADDIU k0, 4 /* stall on $k0 */
  592. #else
  593. .set at=v1
  594. LONG_ADDIU k0, 4
  595. .set noat
  596. #endif
  597. MTC0 k0, CP0_EPC
  598. /* I hope three instructions between MTC0 and ERET are enough... */
  599. ori k1, _THREAD_MASK
  600. xori k1, _THREAD_MASK
  601. LONG_L v1, TI_TP_VALUE(k1)
  602. .set push
  603. .set arch=r4000
  604. eret
  605. .set pop
  606. #endif
  607. .set pop
  608. END(handle_ri_rdhwr)
  609. #ifdef CONFIG_CPU_R4X00_BUGS64
  610. /* A temporary overflow handler used by check_daddi(). */
  611. __INIT
  612. BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
  613. #endif