cputime.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SCHED_CPUTIME_H
  3. #define _LINUX_SCHED_CPUTIME_H
  4. #include <linux/sched/signal.h>
  5. /*
  6. * cputime accounting APIs:
  7. */
  8. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  9. #include <asm/cputime.h>
  10. #ifndef cputime_to_nsecs
  11. # define cputime_to_nsecs(__ct) \
  12. (cputime_to_usecs(__ct) * NSEC_PER_USEC)
  13. #endif
  14. #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  15. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  16. extern bool task_cputime(struct task_struct *t,
  17. u64 *utime, u64 *stime);
  18. extern u64 task_gtime(struct task_struct *t);
  19. #else
  20. static inline bool task_cputime(struct task_struct *t,
  21. u64 *utime, u64 *stime)
  22. {
  23. *utime = t->utime;
  24. *stime = t->stime;
  25. return false;
  26. }
  27. static inline u64 task_gtime(struct task_struct *t)
  28. {
  29. return t->gtime;
  30. }
  31. #endif
  32. #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
  33. static inline void task_cputime_scaled(struct task_struct *t,
  34. u64 *utimescaled,
  35. u64 *stimescaled)
  36. {
  37. *utimescaled = t->utimescaled;
  38. *stimescaled = t->stimescaled;
  39. }
  40. #else
  41. static inline void task_cputime_scaled(struct task_struct *t,
  42. u64 *utimescaled,
  43. u64 *stimescaled)
  44. {
  45. task_cputime(t, utimescaled, stimescaled);
  46. }
  47. #endif
  48. extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
  49. extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
  50. extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
  51. u64 *ut, u64 *st);
  52. /*
  53. * Thread group CPU time accounting.
  54. */
  55. void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
  56. void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples);
  57. /*
  58. * The following are functions that support scheduler-internal time accounting.
  59. * These functions are generally called at the timer tick. None of this depends
  60. * on CONFIG_SCHEDSTATS.
  61. */
  62. /**
  63. * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active
  64. *
  65. * @tsk: Pointer to target task.
  66. */
  67. #ifdef CONFIG_POSIX_TIMERS
  68. static inline
  69. struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
  70. {
  71. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  72. /*
  73. * Check whether posix CPU timers are active. If not the thread
  74. * group accounting is not active either. Lockless check.
  75. */
  76. if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active))
  77. return NULL;
  78. /*
  79. * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
  80. * in __exit_signal(), we won't account to the signal struct further
  81. * cputime consumed by that task, even though the task can still be
  82. * ticking after __exit_signal().
  83. *
  84. * In order to keep a consistent behaviour between thread group cputime
  85. * and thread group cputimer accounting, lets also ignore the cputime
  86. * elapsing after __exit_signal() in any thread group timer running.
  87. *
  88. * This makes sure that POSIX CPU clocks and timers are synchronized, so
  89. * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
  90. * clock delta is behind the expiring timer value.
  91. */
  92. if (unlikely(!tsk->sighand))
  93. return NULL;
  94. return cputimer;
  95. }
  96. #else
  97. static inline
  98. struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
  99. {
  100. return NULL;
  101. }
  102. #endif
  103. /**
  104. * account_group_user_time - Maintain utime for a thread group.
  105. *
  106. * @tsk: Pointer to task structure.
  107. * @cputime: Time value by which to increment the utime field of the
  108. * thread_group_cputime structure.
  109. *
  110. * If thread group time is being maintained, get the structure for the
  111. * running CPU and update the utime field there.
  112. */
  113. static inline void account_group_user_time(struct task_struct *tsk,
  114. u64 cputime)
  115. {
  116. struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
  117. if (!cputimer)
  118. return;
  119. atomic64_add(cputime, &cputimer->cputime_atomic.utime);
  120. }
  121. /**
  122. * account_group_system_time - Maintain stime for a thread group.
  123. *
  124. * @tsk: Pointer to task structure.
  125. * @cputime: Time value by which to increment the stime field of the
  126. * thread_group_cputime structure.
  127. *
  128. * If thread group time is being maintained, get the structure for the
  129. * running CPU and update the stime field there.
  130. */
  131. static inline void account_group_system_time(struct task_struct *tsk,
  132. u64 cputime)
  133. {
  134. struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
  135. if (!cputimer)
  136. return;
  137. atomic64_add(cputime, &cputimer->cputime_atomic.stime);
  138. }
  139. /**
  140. * account_group_exec_runtime - Maintain exec runtime for a thread group.
  141. *
  142. * @tsk: Pointer to task structure.
  143. * @ns: Time value by which to increment the sum_exec_runtime field
  144. * of the thread_group_cputime structure.
  145. *
  146. * If thread group time is being maintained, get the structure for the
  147. * running CPU and update the sum_exec_runtime field there.
  148. */
  149. static inline void account_group_exec_runtime(struct task_struct *tsk,
  150. unsigned long long ns)
  151. {
  152. struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
  153. if (!cputimer)
  154. return;
  155. atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
  156. }
  157. static inline void prev_cputime_init(struct prev_cputime *prev)
  158. {
  159. #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  160. prev->utime = prev->stime = 0;
  161. raw_spin_lock_init(&prev->lock);
  162. #endif
  163. }
  164. extern unsigned long long
  165. task_sched_runtime(struct task_struct *task);
  166. #endif /* _LINUX_SCHED_CPUTIME_H */