idle.h 1.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SCHED_IDLE_H
  3. #define _LINUX_SCHED_IDLE_H
  4. #include <linux/sched.h>
  5. enum cpu_idle_type {
  6. CPU_IDLE,
  7. CPU_NOT_IDLE,
  8. CPU_NEWLY_IDLE,
  9. CPU_MAX_IDLE_TYPES
  10. };
  11. #ifdef CONFIG_SMP
  12. extern void wake_up_if_idle(int cpu);
  13. #else
  14. static inline void wake_up_if_idle(int cpu) { }
  15. #endif
  16. /*
  17. * Idle thread specific functions to determine the need_resched
  18. * polling state.
  19. */
  20. #ifdef TIF_POLLING_NRFLAG
  21. static inline void __current_set_polling(void)
  22. {
  23. set_thread_flag(TIF_POLLING_NRFLAG);
  24. }
  25. static inline bool __must_check current_set_polling_and_test(void)
  26. {
  27. __current_set_polling();
  28. /*
  29. * Polling state must be visible before we test NEED_RESCHED,
  30. * paired by resched_curr()
  31. */
  32. smp_mb__after_atomic();
  33. return unlikely(tif_need_resched());
  34. }
  35. static inline void __current_clr_polling(void)
  36. {
  37. clear_thread_flag(TIF_POLLING_NRFLAG);
  38. }
  39. static inline bool __must_check current_clr_polling_and_test(void)
  40. {
  41. __current_clr_polling();
  42. /*
  43. * Polling state must be visible before we test NEED_RESCHED,
  44. * paired by resched_curr()
  45. */
  46. smp_mb__after_atomic();
  47. return unlikely(tif_need_resched());
  48. }
  49. #else
  50. static inline void __current_set_polling(void) { }
  51. static inline void __current_clr_polling(void) { }
  52. static inline bool __must_check current_set_polling_and_test(void)
  53. {
  54. return unlikely(tif_need_resched());
  55. }
  56. static inline bool __must_check current_clr_polling_and_test(void)
  57. {
  58. return unlikely(tif_need_resched());
  59. }
  60. #endif
  61. static inline void current_clr_polling(void)
  62. {
  63. __current_clr_polling();
  64. /*
  65. * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
  66. * Once the bit is cleared, we'll get IPIs with every new
  67. * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
  68. * fold.
  69. */
  70. smp_mb(); /* paired with resched_curr() */
  71. preempt_fold_need_resched();
  72. }
  73. #endif /* _LINUX_SCHED_IDLE_H */