tick.h 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Tick related global functions
  4. */
  5. #ifndef _LINUX_TICK_H
  6. #define _LINUX_TICK_H
  7. #include <linux/clockchips.h>
  8. #include <linux/irqflags.h>
  9. #include <linux/percpu.h>
  10. #include <linux/context_tracking_state.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/sched.h>
  13. #include <linux/rcupdate.h>
  14. #ifdef CONFIG_GENERIC_CLOCKEVENTS
  15. extern void __init tick_init(void);
  16. /* Should be core only, but ARM BL switcher requires it */
  17. extern void tick_suspend_local(void);
  18. /* Should be core only, but XEN resume magic and ARM BL switcher require it */
  19. extern void tick_resume_local(void);
  20. extern void tick_handover_do_timer(void);
  21. extern void tick_cleanup_dead_cpu(int cpu);
  22. #else /* CONFIG_GENERIC_CLOCKEVENTS */
  23. static inline void tick_init(void) { }
  24. static inline void tick_suspend_local(void) { }
  25. static inline void tick_resume_local(void) { }
  26. static inline void tick_handover_do_timer(void) { }
  27. static inline void tick_cleanup_dead_cpu(int cpu) { }
  28. #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
  29. #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
  30. extern void tick_freeze(void);
  31. extern void tick_unfreeze(void);
  32. #else
  33. static inline void tick_freeze(void) { }
  34. static inline void tick_unfreeze(void) { }
  35. #endif
  36. #ifdef CONFIG_TICK_ONESHOT
  37. extern void tick_irq_enter(void);
  38. # ifndef arch_needs_cpu
  39. # define arch_needs_cpu() (0)
  40. # endif
  41. # else
  42. static inline void tick_irq_enter(void) { }
  43. #endif
  44. #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
  45. extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu);
  46. #else
  47. static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { }
  48. #endif
  49. enum tick_broadcast_mode {
  50. TICK_BROADCAST_OFF,
  51. TICK_BROADCAST_ON,
  52. TICK_BROADCAST_FORCE,
  53. };
  54. enum tick_broadcast_state {
  55. TICK_BROADCAST_EXIT,
  56. TICK_BROADCAST_ENTER,
  57. };
  58. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  59. extern void tick_broadcast_control(enum tick_broadcast_mode mode);
  60. #else
  61. static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
  62. #endif /* BROADCAST */
  63. #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU)
  64. extern void tick_offline_cpu(unsigned int cpu);
  65. #else
  66. static inline void tick_offline_cpu(unsigned int cpu) { }
  67. #endif
  68. #ifdef CONFIG_GENERIC_CLOCKEVENTS
  69. extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
  70. #else
  71. static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
  72. {
  73. return 0;
  74. }
  75. #endif
  76. static inline void tick_broadcast_enable(void)
  77. {
  78. tick_broadcast_control(TICK_BROADCAST_ON);
  79. }
  80. static inline void tick_broadcast_disable(void)
  81. {
  82. tick_broadcast_control(TICK_BROADCAST_OFF);
  83. }
  84. static inline void tick_broadcast_force(void)
  85. {
  86. tick_broadcast_control(TICK_BROADCAST_FORCE);
  87. }
  88. static inline int tick_broadcast_enter(void)
  89. {
  90. return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER);
  91. }
  92. static inline void tick_broadcast_exit(void)
  93. {
  94. tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
  95. }
  96. enum tick_dep_bits {
  97. TICK_DEP_BIT_POSIX_TIMER = 0,
  98. TICK_DEP_BIT_PERF_EVENTS = 1,
  99. TICK_DEP_BIT_SCHED = 2,
  100. TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
  101. TICK_DEP_BIT_RCU = 4,
  102. TICK_DEP_BIT_RCU_EXP = 5
  103. };
  104. #define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP
  105. #define TICK_DEP_MASK_NONE 0
  106. #define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
  107. #define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
  108. #define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
  109. #define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
  110. #define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
  111. #define TICK_DEP_MASK_RCU_EXP (1 << TICK_DEP_BIT_RCU_EXP)
  112. #ifdef CONFIG_NO_HZ_COMMON
  113. extern bool tick_nohz_enabled;
  114. extern bool tick_nohz_tick_stopped(void);
  115. extern bool tick_nohz_tick_stopped_cpu(int cpu);
  116. extern void tick_nohz_idle_stop_tick(void);
  117. extern void tick_nohz_idle_retain_tick(void);
  118. extern void tick_nohz_idle_restart_tick(void);
  119. extern void tick_nohz_idle_enter(void);
  120. extern void tick_nohz_idle_exit(void);
  121. extern void tick_nohz_irq_exit(void);
  122. extern bool tick_nohz_idle_got_tick(void);
  123. extern ktime_t tick_nohz_get_next_hrtimer(void);
  124. extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
  125. extern unsigned long tick_nohz_get_idle_calls(void);
  126. extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
  127. extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
  128. extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
  129. static inline void tick_nohz_idle_stop_tick_protected(void)
  130. {
  131. local_irq_disable();
  132. tick_nohz_idle_stop_tick();
  133. local_irq_enable();
  134. }
  135. #else /* !CONFIG_NO_HZ_COMMON */
  136. #define tick_nohz_enabled (0)
  137. static inline int tick_nohz_tick_stopped(void) { return 0; }
  138. static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
  139. static inline void tick_nohz_idle_stop_tick(void) { }
  140. static inline void tick_nohz_idle_retain_tick(void) { }
  141. static inline void tick_nohz_idle_restart_tick(void) { }
  142. static inline void tick_nohz_idle_enter(void) { }
  143. static inline void tick_nohz_idle_exit(void) { }
  144. static inline bool tick_nohz_idle_got_tick(void) { return false; }
  145. static inline ktime_t tick_nohz_get_next_hrtimer(void)
  146. {
  147. /* Next wake up is the tick period, assume it starts now */
  148. return ktime_add(ktime_get(), TICK_NSEC);
  149. }
  150. static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
  151. {
  152. *delta_next = TICK_NSEC;
  153. return *delta_next;
  154. }
  155. static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
  156. static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
  157. static inline void tick_nohz_idle_stop_tick_protected(void) { }
  158. #endif /* !CONFIG_NO_HZ_COMMON */
  159. #ifdef CONFIG_NO_HZ_FULL
  160. extern bool tick_nohz_full_running;
  161. extern cpumask_var_t tick_nohz_full_mask;
  162. static inline bool tick_nohz_full_enabled(void)
  163. {
  164. if (!context_tracking_enabled())
  165. return false;
  166. return tick_nohz_full_running;
  167. }
  168. /*
  169. * Check if a CPU is part of the nohz_full subset. Arrange for evaluating
  170. * the cpu expression (typically smp_processor_id()) _after_ the static
  171. * key.
  172. */
  173. #define tick_nohz_full_cpu(_cpu) ({ \
  174. bool __ret = false; \
  175. if (tick_nohz_full_enabled()) \
  176. __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \
  177. __ret; \
  178. })
  179. static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
  180. {
  181. if (tick_nohz_full_enabled())
  182. cpumask_or(mask, mask, tick_nohz_full_mask);
  183. }
  184. extern void tick_nohz_dep_set(enum tick_dep_bits bit);
  185. extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
  186. extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
  187. extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
  188. extern void tick_nohz_dep_set_task(struct task_struct *tsk,
  189. enum tick_dep_bits bit);
  190. extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
  191. enum tick_dep_bits bit);
  192. extern void tick_nohz_dep_set_signal(struct task_struct *tsk,
  193. enum tick_dep_bits bit);
  194. extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
  195. enum tick_dep_bits bit);
  196. extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
  197. /*
  198. * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
  199. * on top of static keys.
  200. */
  201. static inline void tick_dep_set(enum tick_dep_bits bit)
  202. {
  203. if (tick_nohz_full_enabled())
  204. tick_nohz_dep_set(bit);
  205. }
  206. static inline void tick_dep_clear(enum tick_dep_bits bit)
  207. {
  208. if (tick_nohz_full_enabled())
  209. tick_nohz_dep_clear(bit);
  210. }
  211. static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
  212. {
  213. if (tick_nohz_full_cpu(cpu))
  214. tick_nohz_dep_set_cpu(cpu, bit);
  215. }
  216. static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
  217. {
  218. if (tick_nohz_full_cpu(cpu))
  219. tick_nohz_dep_clear_cpu(cpu, bit);
  220. }
  221. static inline void tick_dep_set_task(struct task_struct *tsk,
  222. enum tick_dep_bits bit)
  223. {
  224. if (tick_nohz_full_enabled())
  225. tick_nohz_dep_set_task(tsk, bit);
  226. }
  227. static inline void tick_dep_clear_task(struct task_struct *tsk,
  228. enum tick_dep_bits bit)
  229. {
  230. if (tick_nohz_full_enabled())
  231. tick_nohz_dep_clear_task(tsk, bit);
  232. }
  233. static inline void tick_dep_set_signal(struct task_struct *tsk,
  234. enum tick_dep_bits bit)
  235. {
  236. if (tick_nohz_full_enabled())
  237. tick_nohz_dep_set_signal(tsk, bit);
  238. }
  239. static inline void tick_dep_clear_signal(struct signal_struct *signal,
  240. enum tick_dep_bits bit)
  241. {
  242. if (tick_nohz_full_enabled())
  243. tick_nohz_dep_clear_signal(signal, bit);
  244. }
  245. extern void tick_nohz_full_kick_cpu(int cpu);
  246. extern void __tick_nohz_task_switch(void);
  247. extern void __init tick_nohz_full_setup(cpumask_var_t cpumask);
  248. #else
  249. static inline bool tick_nohz_full_enabled(void) { return false; }
  250. static inline bool tick_nohz_full_cpu(int cpu) { return false; }
  251. static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
  252. static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
  253. static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
  254. static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
  255. static inline void tick_dep_set(enum tick_dep_bits bit) { }
  256. static inline void tick_dep_clear(enum tick_dep_bits bit) { }
  257. static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
  258. static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
  259. static inline void tick_dep_set_task(struct task_struct *tsk,
  260. enum tick_dep_bits bit) { }
  261. static inline void tick_dep_clear_task(struct task_struct *tsk,
  262. enum tick_dep_bits bit) { }
  263. static inline void tick_dep_set_signal(struct task_struct *tsk,
  264. enum tick_dep_bits bit) { }
  265. static inline void tick_dep_clear_signal(struct signal_struct *signal,
  266. enum tick_dep_bits bit) { }
  267. static inline void tick_nohz_full_kick_cpu(int cpu) { }
  268. static inline void __tick_nohz_task_switch(void) { }
  269. static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { }
  270. #endif
  271. static inline void tick_nohz_task_switch(void)
  272. {
  273. if (tick_nohz_full_enabled())
  274. __tick_nohz_task_switch();
  275. }
  276. static inline void tick_nohz_user_enter_prepare(void)
  277. {
  278. if (tick_nohz_full_cpu(smp_processor_id()))
  279. rcu_nocb_flush_deferred_wakeup();
  280. }
  281. #endif