ctx_sw.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * Vineetg: Aug 2009
  6. * -"C" version of lowest level context switch asm macro called by schedular
  7. * gcc doesn't generate the dward CFI info for hand written asm, hence can't
  8. * backtrace out of it (e.g. tasks sleeping in kernel).
  9. * So we cheat a bit by writing almost similar code in inline-asm.
  10. * -This is a hacky way of doing things, but there is no other simple way.
  11. * I don't want/intend to extend unwinding code to understand raw asm
  12. */
  13. #include <asm/asm-offsets.h>
  14. #include <linux/sched.h>
  15. #include <linux/sched/debug.h>
  16. #define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
  17. struct task_struct *__sched
  18. __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
  19. {
  20. unsigned int tmp;
  21. unsigned int prev = (unsigned int)prev_task;
  22. unsigned int next = (unsigned int)next_task;
  23. __asm__ __volatile__(
  24. /* FP/BLINK save generated by gcc (standard function prologue */
  25. "st.a r13, [sp, -4] \n\t"
  26. "st.a r14, [sp, -4] \n\t"
  27. "st.a r15, [sp, -4] \n\t"
  28. "st.a r16, [sp, -4] \n\t"
  29. "st.a r17, [sp, -4] \n\t"
  30. "st.a r18, [sp, -4] \n\t"
  31. "st.a r19, [sp, -4] \n\t"
  32. "st.a r20, [sp, -4] \n\t"
  33. "st.a r21, [sp, -4] \n\t"
  34. "st.a r22, [sp, -4] \n\t"
  35. "st.a r23, [sp, -4] \n\t"
  36. "st.a r24, [sp, -4] \n\t"
  37. #ifndef CONFIG_ARC_CURR_IN_REG
  38. "st.a r25, [sp, -4] \n\t"
  39. #else
  40. "sub sp, sp, 4 \n\t" /* usual r25 placeholder */
  41. #endif
  42. /* set ksp of outgoing task in tsk->thread.ksp */
  43. #if KSP_WORD_OFF <= 255
  44. "st.as sp, [%3, %1] \n\t"
  45. #else
  46. /*
  47. * Workaround for NR_CPUS=4k
  48. * %1 is bigger than 255 (S9 offset for st.as)
  49. */
  50. "add2 r24, %3, %1 \n\t"
  51. "st sp, [r24] \n\t"
  52. #endif
  53. /*
  54. * setup _current_task with incoming tsk.
  55. * optionally, set r25 to that as well
  56. * For SMP extra work to get to &_current_task[cpu]
  57. * (open coded SET_CURR_TASK_ON_CPU)
  58. */
  59. #ifndef CONFIG_SMP
  60. "st %2, [@_current_task] \n\t"
  61. #else
  62. "lr r24, [identity] \n\t"
  63. "lsr r24, r24, 8 \n\t"
  64. "bmsk r24, r24, 7 \n\t"
  65. "add2 r24, @_current_task, r24 \n\t"
  66. "st %2, [r24] \n\t"
  67. #endif
  68. #ifdef CONFIG_ARC_CURR_IN_REG
  69. "mov r25, %2 \n\t"
  70. #endif
  71. /* get ksp of incoming task from tsk->thread.ksp */
  72. "ld.as sp, [%2, %1] \n\t"
  73. /* start loading it's CALLEE reg file */
  74. #ifndef CONFIG_ARC_CURR_IN_REG
  75. "ld.ab r25, [sp, 4] \n\t"
  76. #else
  77. "add sp, sp, 4 \n\t"
  78. #endif
  79. "ld.ab r24, [sp, 4] \n\t"
  80. "ld.ab r23, [sp, 4] \n\t"
  81. "ld.ab r22, [sp, 4] \n\t"
  82. "ld.ab r21, [sp, 4] \n\t"
  83. "ld.ab r20, [sp, 4] \n\t"
  84. "ld.ab r19, [sp, 4] \n\t"
  85. "ld.ab r18, [sp, 4] \n\t"
  86. "ld.ab r17, [sp, 4] \n\t"
  87. "ld.ab r16, [sp, 4] \n\t"
  88. "ld.ab r15, [sp, 4] \n\t"
  89. "ld.ab r14, [sp, 4] \n\t"
  90. "ld.ab r13, [sp, 4] \n\t"
  91. /* last (ret value) = prev : although for ARC it mov r0, r0 */
  92. "mov %0, %3 \n\t"
  93. /* FP/BLINK restore generated by gcc (standard func epilogue */
  94. : "=r"(tmp)
  95. : "n"(KSP_WORD_OFF), "r"(next), "r"(prev)
  96. : "blink"
  97. );
  98. return (struct task_struct *)tmp;
  99. }