entry.S 1.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. *
  5. * Derived from MIPS:
  6. * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2001 MIPS Technologies, Inc.
  9. */
  10. #include <asm/asm.h>
  11. #include <asm/asmmacro.h>
  12. #include <asm/loongarch.h>
  13. #include <asm/regdef.h>
  14. #include <asm/stackframe.h>
  15. #include <asm/thread_info.h>
  16. .text
  17. .cfi_sections .debug_frame
  18. .align 5
  19. SYM_FUNC_START(handle_syscall)
  20. csrrd t0, PERCPU_BASE_KS
  21. la.abs t1, kernelsp
  22. add.d t1, t1, t0
  23. move t2, sp
  24. ld.d sp, t1, 0
  25. addi.d sp, sp, -PT_SIZE
  26. cfi_st t2, PT_R3
  27. cfi_rel_offset sp, PT_R3
  28. st.d zero, sp, PT_R0
  29. csrrd t2, LOONGARCH_CSR_PRMD
  30. st.d t2, sp, PT_PRMD
  31. csrrd t2, LOONGARCH_CSR_CRMD
  32. st.d t2, sp, PT_CRMD
  33. csrrd t2, LOONGARCH_CSR_EUEN
  34. st.d t2, sp, PT_EUEN
  35. csrrd t2, LOONGARCH_CSR_ECFG
  36. st.d t2, sp, PT_ECFG
  37. csrrd t2, LOONGARCH_CSR_ESTAT
  38. st.d t2, sp, PT_ESTAT
  39. cfi_st ra, PT_R1
  40. cfi_st a0, PT_R4
  41. cfi_st a1, PT_R5
  42. cfi_st a2, PT_R6
  43. cfi_st a3, PT_R7
  44. cfi_st a4, PT_R8
  45. cfi_st a5, PT_R9
  46. cfi_st a6, PT_R10
  47. cfi_st a7, PT_R11
  48. csrrd ra, LOONGARCH_CSR_ERA
  49. st.d ra, sp, PT_ERA
  50. cfi_rel_offset ra, PT_ERA
  51. cfi_st tp, PT_R2
  52. cfi_st u0, PT_R21
  53. cfi_st fp, PT_R22
  54. SAVE_STATIC
  55. move u0, t0
  56. li.d tp, ~_THREAD_MASK
  57. and tp, tp, sp
  58. move a0, sp
  59. bl do_syscall
  60. RESTORE_ALL_AND_RET
  61. SYM_FUNC_END(handle_syscall)
  62. SYM_CODE_START(ret_from_fork)
  63. bl schedule_tail # a0 = struct task_struct *prev
  64. move a0, sp
  65. bl syscall_exit_to_user_mode
  66. RESTORE_STATIC
  67. RESTORE_SOME
  68. RESTORE_SP_AND_RET
  69. SYM_CODE_END(ret_from_fork)
  70. SYM_CODE_START(ret_from_kernel_thread)
  71. bl schedule_tail # a0 = struct task_struct *prev
  72. move a0, s1
  73. jirl ra, s0, 0
  74. move a0, sp
  75. bl syscall_exit_to_user_mode
  76. RESTORE_STATIC
  77. RESTORE_SOME
  78. RESTORE_SP_AND_RET
  79. SYM_CODE_END(ret_from_kernel_thread)