switch_to.h 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_SWITCH_TO_H
  3. #define _ASM_X86_SWITCH_TO_H
  4. #include <linux/sched/task_stack.h>
  5. struct task_struct; /* one of the stranger aspects of C forward declarations */
  6. struct task_struct *__switch_to_asm(struct task_struct *prev,
  7. struct task_struct *next);
  8. __visible struct task_struct *__switch_to(struct task_struct *prev,
  9. struct task_struct *next);
  10. asmlinkage void ret_from_fork(void);
  11. /*
  12. * This is the structure pointed to by thread.sp for an inactive task. The
  13. * order of the fields must match the code in __switch_to_asm().
  14. */
  15. struct inactive_task_frame {
  16. #ifdef CONFIG_X86_64
  17. unsigned long r15;
  18. unsigned long r14;
  19. unsigned long r13;
  20. unsigned long r12;
  21. #else
  22. unsigned long flags;
  23. unsigned long si;
  24. unsigned long di;
  25. #endif
  26. unsigned long bx;
  27. /*
  28. * These two fields must be together. They form a stack frame header,
  29. * needed by get_frame_pointer().
  30. */
  31. unsigned long bp;
  32. unsigned long ret_addr;
  33. };
  34. struct fork_frame {
  35. struct inactive_task_frame frame;
  36. struct pt_regs regs;
  37. };
  38. #define switch_to(prev, next, last) \
  39. do { \
  40. ((last) = __switch_to_asm((prev), (next))); \
  41. } while (0)
  42. #ifdef CONFIG_X86_32
  43. static inline void refresh_sysenter_cs(struct thread_struct *thread)
  44. {
  45. /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  46. if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
  47. return;
  48. this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
  49. wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  50. }
  51. #endif
  52. /* This is used when switching tasks or entering/exiting vm86 mode. */
  53. static inline void update_task_stack(struct task_struct *task)
  54. {
  55. /* sp0 always points to the entry trampoline stack, which is constant: */
  56. #ifdef CONFIG_X86_32
  57. if (static_cpu_has(X86_FEATURE_XENPV))
  58. load_sp0(task->thread.sp0);
  59. else
  60. this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
  61. #else
  62. /* Xen PV enters the kernel on the thread stack. */
  63. if (static_cpu_has(X86_FEATURE_XENPV))
  64. load_sp0(task_top_of_stack(task));
  65. #endif
  66. }
  67. static inline void kthread_frame_init(struct inactive_task_frame *frame,
  68. int (*fun)(void *), void *arg)
  69. {
  70. frame->bx = (unsigned long)fun;
  71. #ifdef CONFIG_X86_32
  72. frame->di = (unsigned long)arg;
  73. #else
  74. frame->r12 = (unsigned long)arg;
  75. #endif
  76. }
  77. #endif /* _ASM_X86_SWITCH_TO_H */