vtime.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_KERNEL_VTIME_H
  3. #define _LINUX_KERNEL_VTIME_H
  4. #include <linux/context_tracking_state.h>
  5. #include <linux/sched.h>
  6. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  7. #include <asm/vtime.h>
  8. #endif
  9. /*
  10. * Common vtime APIs
  11. */
  12. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  13. extern void vtime_account_kernel(struct task_struct *tsk);
  14. extern void vtime_account_idle(struct task_struct *tsk);
  15. #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
  16. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  17. extern void arch_vtime_task_switch(struct task_struct *tsk);
  18. extern void vtime_user_enter(struct task_struct *tsk);
  19. extern void vtime_user_exit(struct task_struct *tsk);
  20. extern void vtime_guest_enter(struct task_struct *tsk);
  21. extern void vtime_guest_exit(struct task_struct *tsk);
  22. extern void vtime_init_idle(struct task_struct *tsk, int cpu);
  23. #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
  24. static inline void vtime_user_enter(struct task_struct *tsk) { }
  25. static inline void vtime_user_exit(struct task_struct *tsk) { }
  26. static inline void vtime_guest_enter(struct task_struct *tsk) { }
  27. static inline void vtime_guest_exit(struct task_struct *tsk) { }
  28. static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
  29. #endif
  30. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  31. extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
  32. extern void vtime_account_softirq(struct task_struct *tsk);
  33. extern void vtime_account_hardirq(struct task_struct *tsk);
  34. extern void vtime_flush(struct task_struct *tsk);
  35. #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  36. static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
  37. static inline void vtime_account_softirq(struct task_struct *tsk) { }
  38. static inline void vtime_account_hardirq(struct task_struct *tsk) { }
  39. static inline void vtime_flush(struct task_struct *tsk) { }
  40. #endif
  41. /*
  42. * vtime_accounting_enabled_this_cpu() definitions/declarations
  43. */
  44. #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
  45. static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
  46. extern void vtime_task_switch(struct task_struct *prev);
  47. static __always_inline void vtime_account_guest_enter(void)
  48. {
  49. vtime_account_kernel(current);
  50. current->flags |= PF_VCPU;
  51. }
  52. static __always_inline void vtime_account_guest_exit(void)
  53. {
  54. vtime_account_kernel(current);
  55. current->flags &= ~PF_VCPU;
  56. }
  57. #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
  58. /*
  59. * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
  60. * in that case and compute the tickless cputime.
  61. * For now vtime state is tied to context tracking. We might want to decouple
  62. * those later if necessary.
  63. */
  64. static inline bool vtime_accounting_enabled(void)
  65. {
  66. return context_tracking_enabled();
  67. }
  68. static inline bool vtime_accounting_enabled_cpu(int cpu)
  69. {
  70. return context_tracking_enabled_cpu(cpu);
  71. }
  72. static inline bool vtime_accounting_enabled_this_cpu(void)
  73. {
  74. return context_tracking_enabled_this_cpu();
  75. }
  76. extern void vtime_task_switch_generic(struct task_struct *prev);
  77. static inline void vtime_task_switch(struct task_struct *prev)
  78. {
  79. if (vtime_accounting_enabled_this_cpu())
  80. vtime_task_switch_generic(prev);
  81. }
  82. static __always_inline void vtime_account_guest_enter(void)
  83. {
  84. if (vtime_accounting_enabled_this_cpu())
  85. vtime_guest_enter(current);
  86. else
  87. current->flags |= PF_VCPU;
  88. }
  89. static __always_inline void vtime_account_guest_exit(void)
  90. {
  91. if (vtime_accounting_enabled_this_cpu())
  92. vtime_guest_exit(current);
  93. else
  94. current->flags &= ~PF_VCPU;
  95. }
  96. #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
  97. static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
  98. static inline void vtime_task_switch(struct task_struct *prev) { }
  99. static __always_inline void vtime_account_guest_enter(void)
  100. {
  101. current->flags |= PF_VCPU;
  102. }
  103. static __always_inline void vtime_account_guest_exit(void)
  104. {
  105. current->flags &= ~PF_VCPU;
  106. }
  107. #endif
  108. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  109. extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);
  110. #else
  111. static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
  112. #endif
  113. static inline void account_softirq_enter(struct task_struct *tsk)
  114. {
  115. vtime_account_irq(tsk, SOFTIRQ_OFFSET);
  116. irqtime_account_irq(tsk, SOFTIRQ_OFFSET);
  117. }
  118. static inline void account_softirq_exit(struct task_struct *tsk)
  119. {
  120. vtime_account_softirq(tsk);
  121. irqtime_account_irq(tsk, 0);
  122. }
  123. static inline void account_hardirq_enter(struct task_struct *tsk)
  124. {
  125. vtime_account_irq(tsk, HARDIRQ_OFFSET);
  126. irqtime_account_irq(tsk, HARDIRQ_OFFSET);
  127. }
  128. static inline void account_hardirq_exit(struct task_struct *tsk)
  129. {
  130. vtime_account_hardirq(tsk);
  131. irqtime_account_irq(tsk, 0);
  132. }
  133. #endif /* _LINUX_KERNEL_VTIME_H */