entry.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. */
  6. #include <linux/init.h>
  7. #include <linux/linkage.h>
  8. #include <asm/asm.h>
  9. #include <asm/csr.h>
  10. #include <asm/unistd.h>
  11. #include <asm/thread_info.h>
  12. #include <asm/asm-offsets.h>
  13. #include <asm/errata_list.h>
  14. #if !IS_ENABLED(CONFIG_PREEMPTION)
  15. .set resume_kernel, restore_all
  16. #endif
  17. ENTRY(handle_exception)
  18. /*
  19. * If coming from userspace, preserve the user thread pointer and load
  20. * the kernel thread pointer. If we came from the kernel, the scratch
  21. * register will contain 0, and we should continue on the current TP.
  22. */
  23. csrrw tp, CSR_SCRATCH, tp
  24. bnez tp, _save_context
  25. _restore_kernel_tpsp:
  26. csrr tp, CSR_SCRATCH
  27. REG_S sp, TASK_TI_KERNEL_SP(tp)
  28. #ifdef CONFIG_VMAP_STACK
  29. addi sp, sp, -(PT_SIZE_ON_STACK)
  30. srli sp, sp, THREAD_SHIFT
  31. andi sp, sp, 0x1
  32. bnez sp, handle_kernel_stack_overflow
  33. REG_L sp, TASK_TI_KERNEL_SP(tp)
  34. #endif
  35. _save_context:
  36. REG_S sp, TASK_TI_USER_SP(tp)
  37. REG_L sp, TASK_TI_KERNEL_SP(tp)
  38. addi sp, sp, -(PT_SIZE_ON_STACK)
  39. REG_S x1, PT_RA(sp)
  40. REG_S x3, PT_GP(sp)
  41. REG_S x5, PT_T0(sp)
  42. REG_S x6, PT_T1(sp)
  43. REG_S x7, PT_T2(sp)
  44. REG_S x8, PT_S0(sp)
  45. REG_S x9, PT_S1(sp)
  46. REG_S x10, PT_A0(sp)
  47. REG_S x11, PT_A1(sp)
  48. REG_S x12, PT_A2(sp)
  49. REG_S x13, PT_A3(sp)
  50. REG_S x14, PT_A4(sp)
  51. REG_S x15, PT_A5(sp)
  52. REG_S x16, PT_A6(sp)
  53. REG_S x17, PT_A7(sp)
  54. REG_S x18, PT_S2(sp)
  55. REG_S x19, PT_S3(sp)
  56. REG_S x20, PT_S4(sp)
  57. REG_S x21, PT_S5(sp)
  58. REG_S x22, PT_S6(sp)
  59. REG_S x23, PT_S7(sp)
  60. REG_S x24, PT_S8(sp)
  61. REG_S x25, PT_S9(sp)
  62. REG_S x26, PT_S10(sp)
  63. REG_S x27, PT_S11(sp)
  64. REG_S x28, PT_T3(sp)
  65. REG_S x29, PT_T4(sp)
  66. REG_S x30, PT_T5(sp)
  67. REG_S x31, PT_T6(sp)
  68. /*
  69. * Disable user-mode memory access as it should only be set in the
  70. * actual user copy routines.
  71. *
  72. * Disable the FPU to detect illegal usage of floating point in kernel
  73. * space.
  74. */
  75. li t0, SR_SUM | SR_FS
  76. REG_L s0, TASK_TI_USER_SP(tp)
  77. csrrc s1, CSR_STATUS, t0
  78. csrr s2, CSR_EPC
  79. csrr s3, CSR_TVAL
  80. csrr s4, CSR_CAUSE
  81. csrr s5, CSR_SCRATCH
  82. REG_S s0, PT_SP(sp)
  83. REG_S s1, PT_STATUS(sp)
  84. REG_S s2, PT_EPC(sp)
  85. REG_S s3, PT_BADADDR(sp)
  86. REG_S s4, PT_CAUSE(sp)
  87. REG_S s5, PT_TP(sp)
  88. /*
  89. * Set the scratch register to 0, so that if a recursive exception
  90. * occurs, the exception vector knows it came from the kernel
  91. */
  92. csrw CSR_SCRATCH, x0
  93. /* Load the global pointer */
  94. .option push
  95. .option norelax
  96. la gp, __global_pointer$
  97. .option pop
  98. #ifdef CONFIG_TRACE_IRQFLAGS
  99. call __trace_hardirqs_off
  100. #endif
  101. #ifdef CONFIG_CONTEXT_TRACKING_USER
  102. /* If previous state is in user mode, call user_exit_callable(). */
  103. li a0, SR_PP
  104. and a0, s1, a0
  105. bnez a0, skip_context_tracking
  106. call user_exit_callable
  107. skip_context_tracking:
  108. #endif
  109. /*
  110. * MSB of cause differentiates between
  111. * interrupts and exceptions
  112. */
  113. bge s4, zero, 1f
  114. la ra, ret_from_exception
  115. /* Handle interrupts */
  116. move a0, sp /* pt_regs */
  117. la a1, generic_handle_arch_irq
  118. jr a1
  119. 1:
  120. /*
  121. * Exceptions run with interrupts enabled or disabled depending on the
  122. * state of SR_PIE in m/sstatus.
  123. */
  124. andi t0, s1, SR_PIE
  125. beqz t0, 1f
  126. /* kprobes, entered via ebreak, must have interrupts disabled. */
  127. li t0, EXC_BREAKPOINT
  128. beq s4, t0, 1f
  129. #ifdef CONFIG_TRACE_IRQFLAGS
  130. call __trace_hardirqs_on
  131. #endif
  132. csrs CSR_STATUS, SR_IE
  133. 1:
  134. la ra, ret_from_exception
  135. /* Handle syscalls */
  136. li t0, EXC_SYSCALL
  137. beq s4, t0, handle_syscall
  138. /* Handle other exceptions */
  139. slli t0, s4, RISCV_LGPTR
  140. la t1, excp_vect_table
  141. la t2, excp_vect_table_end
  142. move a0, sp /* pt_regs */
  143. add t0, t1, t0
  144. /* Check if exception code lies within bounds */
  145. bgeu t0, t2, 1f
  146. REG_L t0, 0(t0)
  147. jr t0
  148. 1:
  149. tail do_trap_unknown
  150. handle_syscall:
  151. #ifdef CONFIG_RISCV_M_MODE
  152. /*
  153. * When running is M-Mode (no MMU config), MPIE does not get set.
  154. * As a result, we need to force enable interrupts here because
  155. * handle_exception did not do set SR_IE as it always sees SR_PIE
  156. * being cleared.
  157. */
  158. csrs CSR_STATUS, SR_IE
  159. #endif
  160. #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER)
  161. /* Recover a0 - a7 for system calls */
  162. REG_L a0, PT_A0(sp)
  163. REG_L a1, PT_A1(sp)
  164. REG_L a2, PT_A2(sp)
  165. REG_L a3, PT_A3(sp)
  166. REG_L a4, PT_A4(sp)
  167. REG_L a5, PT_A5(sp)
  168. REG_L a6, PT_A6(sp)
  169. REG_L a7, PT_A7(sp)
  170. #endif
  171. /* save the initial A0 value (needed in signal handlers) */
  172. REG_S a0, PT_ORIG_A0(sp)
  173. /*
  174. * Advance SEPC to avoid executing the original
  175. * scall instruction on sret
  176. */
  177. addi s2, s2, 0x4
  178. REG_S s2, PT_EPC(sp)
  179. /* Trace syscalls, but only if requested by the user. */
  180. REG_L t0, TASK_TI_FLAGS(tp)
  181. andi t0, t0, _TIF_SYSCALL_WORK
  182. bnez t0, handle_syscall_trace_enter
  183. check_syscall_nr:
  184. /* Check to make sure we don't jump to a bogus syscall number. */
  185. li t0, __NR_syscalls
  186. la s0, sys_ni_syscall
  187. /*
  188. * Syscall number held in a7.
  189. * If syscall number is above allowed value, redirect to ni_syscall.
  190. */
  191. bgeu a7, t0, 3f
  192. #ifdef CONFIG_COMPAT
  193. REG_L s0, PT_STATUS(sp)
  194. srli s0, s0, SR_UXL_SHIFT
  195. andi s0, s0, (SR_UXL >> SR_UXL_SHIFT)
  196. li t0, (SR_UXL_32 >> SR_UXL_SHIFT)
  197. sub t0, s0, t0
  198. bnez t0, 1f
  199. /* Call compat_syscall */
  200. la s0, compat_sys_call_table
  201. j 2f
  202. 1:
  203. #endif
  204. /* Call syscall */
  205. la s0, sys_call_table
  206. 2:
  207. slli t0, a7, RISCV_LGPTR
  208. add s0, s0, t0
  209. REG_L s0, 0(s0)
  210. 3:
  211. jalr s0
  212. ret_from_syscall:
  213. /* Set user a0 to kernel a0 */
  214. REG_S a0, PT_A0(sp)
  215. /*
  216. * We didn't execute the actual syscall.
  217. * Seccomp already set return value for the current task pt_regs.
  218. * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
  219. */
  220. ret_from_syscall_rejected:
  221. #ifdef CONFIG_DEBUG_RSEQ
  222. move a0, sp
  223. call rseq_syscall
  224. #endif
  225. /* Trace syscalls, but only if requested by the user. */
  226. REG_L t0, TASK_TI_FLAGS(tp)
  227. andi t0, t0, _TIF_SYSCALL_WORK
  228. bnez t0, handle_syscall_trace_exit
  229. ret_from_exception:
  230. REG_L s0, PT_STATUS(sp)
  231. csrc CSR_STATUS, SR_IE
  232. #ifdef CONFIG_TRACE_IRQFLAGS
  233. call __trace_hardirqs_off
  234. #endif
  235. #ifdef CONFIG_RISCV_M_MODE
  236. /* the MPP value is too large to be used as an immediate arg for addi */
  237. li t0, SR_MPP
  238. and s0, s0, t0
  239. #else
  240. andi s0, s0, SR_SPP
  241. #endif
  242. bnez s0, resume_kernel
  243. /* Interrupts must be disabled here so flags are checked atomically */
  244. REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
  245. andi s1, s0, _TIF_WORK_MASK
  246. bnez s1, resume_userspace_slow
  247. resume_userspace:
  248. #ifdef CONFIG_CONTEXT_TRACKING_USER
  249. call user_enter_callable
  250. #endif
  251. /* Save unwound kernel stack pointer in thread_info */
  252. addi s0, sp, PT_SIZE_ON_STACK
  253. REG_S s0, TASK_TI_KERNEL_SP(tp)
  254. /*
  255. * Save TP into the scratch register , so we can find the kernel data
  256. * structures again.
  257. */
  258. csrw CSR_SCRATCH, tp
  259. restore_all:
  260. #ifdef CONFIG_TRACE_IRQFLAGS
  261. REG_L s1, PT_STATUS(sp)
  262. andi t0, s1, SR_PIE
  263. beqz t0, 1f
  264. call __trace_hardirqs_on
  265. j 2f
  266. 1:
  267. call __trace_hardirqs_off
  268. 2:
  269. #endif
  270. REG_L a0, PT_STATUS(sp)
  271. /*
  272. * The current load reservation is effectively part of the processor's
  273. * state, in the sense that load reservations cannot be shared between
  274. * different hart contexts. We can't actually save and restore a load
  275. * reservation, so instead here we clear any existing reservation --
  276. * it's always legal for implementations to clear load reservations at
  277. * any point (as long as the forward progress guarantee is kept, but
  278. * we'll ignore that here).
  279. *
  280. * Dangling load reservations can be the result of taking a trap in the
  281. * middle of an LR/SC sequence, but can also be the result of a taken
  282. * forward branch around an SC -- which is how we implement CAS. As a
  283. * result we need to clear reservations between the last CAS and the
  284. * jump back to the new context. While it is unlikely the store
  285. * completes, implementations are allowed to expand reservations to be
  286. * arbitrarily large.
  287. */
  288. REG_L a2, PT_EPC(sp)
  289. REG_SC x0, a2, PT_EPC(sp)
  290. csrw CSR_STATUS, a0
  291. csrw CSR_EPC, a2
  292. REG_L x1, PT_RA(sp)
  293. REG_L x3, PT_GP(sp)
  294. REG_L x4, PT_TP(sp)
  295. REG_L x5, PT_T0(sp)
  296. REG_L x6, PT_T1(sp)
  297. REG_L x7, PT_T2(sp)
  298. REG_L x8, PT_S0(sp)
  299. REG_L x9, PT_S1(sp)
  300. REG_L x10, PT_A0(sp)
  301. REG_L x11, PT_A1(sp)
  302. REG_L x12, PT_A2(sp)
  303. REG_L x13, PT_A3(sp)
  304. REG_L x14, PT_A4(sp)
  305. REG_L x15, PT_A5(sp)
  306. REG_L x16, PT_A6(sp)
  307. REG_L x17, PT_A7(sp)
  308. REG_L x18, PT_S2(sp)
  309. REG_L x19, PT_S3(sp)
  310. REG_L x20, PT_S4(sp)
  311. REG_L x21, PT_S5(sp)
  312. REG_L x22, PT_S6(sp)
  313. REG_L x23, PT_S7(sp)
  314. REG_L x24, PT_S8(sp)
  315. REG_L x25, PT_S9(sp)
  316. REG_L x26, PT_S10(sp)
  317. REG_L x27, PT_S11(sp)
  318. REG_L x28, PT_T3(sp)
  319. REG_L x29, PT_T4(sp)
  320. REG_L x30, PT_T5(sp)
  321. REG_L x31, PT_T6(sp)
  322. REG_L x2, PT_SP(sp)
  323. #ifdef CONFIG_RISCV_M_MODE
  324. mret
  325. #else
  326. sret
  327. #endif
  328. #if IS_ENABLED(CONFIG_PREEMPTION)
  329. resume_kernel:
  330. REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
  331. bnez s0, restore_all
  332. REG_L s0, TASK_TI_FLAGS(tp)
  333. andi s0, s0, _TIF_NEED_RESCHED
  334. beqz s0, restore_all
  335. call preempt_schedule_irq
  336. j restore_all
  337. #endif
  338. resume_userspace_slow:
  339. /* Enter slow path for supplementary processing */
  340. move a0, sp /* pt_regs */
  341. move a1, s0 /* current_thread_info->flags */
  342. call do_work_pending
  343. j resume_userspace
  344. /* Slow paths for ptrace. */
  345. handle_syscall_trace_enter:
  346. move a0, sp
  347. call do_syscall_trace_enter
  348. move t0, a0
  349. REG_L a0, PT_A0(sp)
  350. REG_L a1, PT_A1(sp)
  351. REG_L a2, PT_A2(sp)
  352. REG_L a3, PT_A3(sp)
  353. REG_L a4, PT_A4(sp)
  354. REG_L a5, PT_A5(sp)
  355. REG_L a6, PT_A6(sp)
  356. REG_L a7, PT_A7(sp)
  357. bnez t0, ret_from_syscall_rejected
  358. j check_syscall_nr
  359. handle_syscall_trace_exit:
  360. move a0, sp
  361. call do_syscall_trace_exit
  362. j ret_from_exception
  363. #ifdef CONFIG_VMAP_STACK
  364. handle_kernel_stack_overflow:
  365. /*
  366. * Takes the psuedo-spinlock for the shadow stack, in case multiple
  367. * harts are concurrently overflowing their kernel stacks. We could
  368. * store any value here, but since we're overflowing the kernel stack
  369. * already we only have SP to use as a scratch register. So we just
  370. * swap in the address of the spinlock, as that's definately non-zero.
  371. *
  372. * Pairs with a store_release in handle_bad_stack().
  373. */
  374. 1: la sp, spin_shadow_stack
  375. REG_AMOSWAP_AQ sp, sp, (sp)
  376. bnez sp, 1b
  377. la sp, shadow_stack
  378. addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
  379. //save caller register to shadow stack
  380. addi sp, sp, -(PT_SIZE_ON_STACK)
  381. REG_S x1, PT_RA(sp)
  382. REG_S x5, PT_T0(sp)
  383. REG_S x6, PT_T1(sp)
  384. REG_S x7, PT_T2(sp)
  385. REG_S x10, PT_A0(sp)
  386. REG_S x11, PT_A1(sp)
  387. REG_S x12, PT_A2(sp)
  388. REG_S x13, PT_A3(sp)
  389. REG_S x14, PT_A4(sp)
  390. REG_S x15, PT_A5(sp)
  391. REG_S x16, PT_A6(sp)
  392. REG_S x17, PT_A7(sp)
  393. REG_S x28, PT_T3(sp)
  394. REG_S x29, PT_T4(sp)
  395. REG_S x30, PT_T5(sp)
  396. REG_S x31, PT_T6(sp)
  397. la ra, restore_caller_reg
  398. tail get_overflow_stack
  399. restore_caller_reg:
  400. //save per-cpu overflow stack
  401. REG_S a0, -8(sp)
  402. //restore caller register from shadow_stack
  403. REG_L x1, PT_RA(sp)
  404. REG_L x5, PT_T0(sp)
  405. REG_L x6, PT_T1(sp)
  406. REG_L x7, PT_T2(sp)
  407. REG_L x10, PT_A0(sp)
  408. REG_L x11, PT_A1(sp)
  409. REG_L x12, PT_A2(sp)
  410. REG_L x13, PT_A3(sp)
  411. REG_L x14, PT_A4(sp)
  412. REG_L x15, PT_A5(sp)
  413. REG_L x16, PT_A6(sp)
  414. REG_L x17, PT_A7(sp)
  415. REG_L x28, PT_T3(sp)
  416. REG_L x29, PT_T4(sp)
  417. REG_L x30, PT_T5(sp)
  418. REG_L x31, PT_T6(sp)
  419. //load per-cpu overflow stack
  420. REG_L sp, -8(sp)
  421. addi sp, sp, -(PT_SIZE_ON_STACK)
  422. //save context to overflow stack
  423. REG_S x1, PT_RA(sp)
  424. REG_S x3, PT_GP(sp)
  425. REG_S x5, PT_T0(sp)
  426. REG_S x6, PT_T1(sp)
  427. REG_S x7, PT_T2(sp)
  428. REG_S x8, PT_S0(sp)
  429. REG_S x9, PT_S1(sp)
  430. REG_S x10, PT_A0(sp)
  431. REG_S x11, PT_A1(sp)
  432. REG_S x12, PT_A2(sp)
  433. REG_S x13, PT_A3(sp)
  434. REG_S x14, PT_A4(sp)
  435. REG_S x15, PT_A5(sp)
  436. REG_S x16, PT_A6(sp)
  437. REG_S x17, PT_A7(sp)
  438. REG_S x18, PT_S2(sp)
  439. REG_S x19, PT_S3(sp)
  440. REG_S x20, PT_S4(sp)
  441. REG_S x21, PT_S5(sp)
  442. REG_S x22, PT_S6(sp)
  443. REG_S x23, PT_S7(sp)
  444. REG_S x24, PT_S8(sp)
  445. REG_S x25, PT_S9(sp)
  446. REG_S x26, PT_S10(sp)
  447. REG_S x27, PT_S11(sp)
  448. REG_S x28, PT_T3(sp)
  449. REG_S x29, PT_T4(sp)
  450. REG_S x30, PT_T5(sp)
  451. REG_S x31, PT_T6(sp)
  452. REG_L s0, TASK_TI_KERNEL_SP(tp)
  453. csrr s1, CSR_STATUS
  454. csrr s2, CSR_EPC
  455. csrr s3, CSR_TVAL
  456. csrr s4, CSR_CAUSE
  457. csrr s5, CSR_SCRATCH
  458. REG_S s0, PT_SP(sp)
  459. REG_S s1, PT_STATUS(sp)
  460. REG_S s2, PT_EPC(sp)
  461. REG_S s3, PT_BADADDR(sp)
  462. REG_S s4, PT_CAUSE(sp)
  463. REG_S s5, PT_TP(sp)
  464. move a0, sp
  465. tail handle_bad_stack
  466. #endif
  467. END(handle_exception)
  468. ENTRY(ret_from_fork)
  469. la ra, ret_from_exception
  470. tail schedule_tail
  471. ENDPROC(ret_from_fork)
  472. ENTRY(ret_from_kernel_thread)
  473. call schedule_tail
  474. /* Call fn(arg) */
  475. la ra, ret_from_exception
  476. move a0, s1
  477. jr s0
  478. ENDPROC(ret_from_kernel_thread)
  479. /*
  480. * Integer register context switch
  481. * The callee-saved registers must be saved and restored.
  482. *
  483. * a0: previous task_struct (must be preserved across the switch)
  484. * a1: next task_struct
  485. *
  486. * The value of a0 and a1 must be preserved by this function, as that's how
  487. * arguments are passed to schedule_tail.
  488. */
  489. ENTRY(__switch_to)
  490. /* Save context into prev->thread */
  491. li a4, TASK_THREAD_RA
  492. add a3, a0, a4
  493. add a4, a1, a4
  494. REG_S ra, TASK_THREAD_RA_RA(a3)
  495. REG_S sp, TASK_THREAD_SP_RA(a3)
  496. REG_S s0, TASK_THREAD_S0_RA(a3)
  497. REG_S s1, TASK_THREAD_S1_RA(a3)
  498. REG_S s2, TASK_THREAD_S2_RA(a3)
  499. REG_S s3, TASK_THREAD_S3_RA(a3)
  500. REG_S s4, TASK_THREAD_S4_RA(a3)
  501. REG_S s5, TASK_THREAD_S5_RA(a3)
  502. REG_S s6, TASK_THREAD_S6_RA(a3)
  503. REG_S s7, TASK_THREAD_S7_RA(a3)
  504. REG_S s8, TASK_THREAD_S8_RA(a3)
  505. REG_S s9, TASK_THREAD_S9_RA(a3)
  506. REG_S s10, TASK_THREAD_S10_RA(a3)
  507. REG_S s11, TASK_THREAD_S11_RA(a3)
  508. /* Restore context from next->thread */
  509. REG_L ra, TASK_THREAD_RA_RA(a4)
  510. REG_L sp, TASK_THREAD_SP_RA(a4)
  511. REG_L s0, TASK_THREAD_S0_RA(a4)
  512. REG_L s1, TASK_THREAD_S1_RA(a4)
  513. REG_L s2, TASK_THREAD_S2_RA(a4)
  514. REG_L s3, TASK_THREAD_S3_RA(a4)
  515. REG_L s4, TASK_THREAD_S4_RA(a4)
  516. REG_L s5, TASK_THREAD_S5_RA(a4)
  517. REG_L s6, TASK_THREAD_S6_RA(a4)
  518. REG_L s7, TASK_THREAD_S7_RA(a4)
  519. REG_L s8, TASK_THREAD_S8_RA(a4)
  520. REG_L s9, TASK_THREAD_S9_RA(a4)
  521. REG_L s10, TASK_THREAD_S10_RA(a4)
  522. REG_L s11, TASK_THREAD_S11_RA(a4)
  523. /* The offset of thread_info in task_struct is zero. */
  524. move tp, a1
  525. ret
  526. ENDPROC(__switch_to)
  527. #ifndef CONFIG_MMU
  528. #define do_page_fault do_trap_unknown
  529. #endif
  530. .section ".rodata"
  531. .align LGREG
  532. /* Exception vector table */
  533. ENTRY(excp_vect_table)
  534. RISCV_PTR do_trap_insn_misaligned
  535. ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
  536. RISCV_PTR do_trap_insn_illegal
  537. RISCV_PTR do_trap_break
  538. RISCV_PTR do_trap_load_misaligned
  539. RISCV_PTR do_trap_load_fault
  540. RISCV_PTR do_trap_store_misaligned
  541. RISCV_PTR do_trap_store_fault
  542. RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
  543. RISCV_PTR do_trap_ecall_s
  544. RISCV_PTR do_trap_unknown
  545. RISCV_PTR do_trap_ecall_m
  546. /* instruciton page fault */
  547. ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
  548. RISCV_PTR do_page_fault /* load page fault */
  549. RISCV_PTR do_trap_unknown
  550. RISCV_PTR do_page_fault /* store page fault */
  551. excp_vect_table_end:
  552. END(excp_vect_table)
  553. #ifndef CONFIG_MMU
  554. ENTRY(__user_rt_sigreturn)
  555. li a7, __NR_rt_sigreturn
  556. scall
  557. END(__user_rt_sigreturn)
  558. #endif