arch_timer.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * arch/arm64/include/asm/arch_timer.h
  4. *
  5. * Copyright (C) 2012 ARM Ltd.
  6. * Author: Marc Zyngier <[email protected]>
  7. */
  8. #ifndef __ASM_ARCH_TIMER_H
  9. #define __ASM_ARCH_TIMER_H
  10. #include <asm/barrier.h>
  11. #include <asm/hwcap.h>
  12. #include <asm/sysreg.h>
  13. #include <linux/bug.h>
  14. #include <linux/init.h>
  15. #include <linux/jump_label.h>
  16. #include <linux/smp.h>
  17. #include <linux/types.h>
  18. #include <clocksource/arm_arch_timer.h>
  19. #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
  20. #define has_erratum_handler(h) \
  21. ({ \
  22. const struct arch_timer_erratum_workaround *__wa; \
  23. __wa = __this_cpu_read(timer_unstable_counter_workaround); \
  24. (__wa && __wa->h); \
  25. })
  26. #define erratum_handler(h) \
  27. ({ \
  28. const struct arch_timer_erratum_workaround *__wa; \
  29. __wa = __this_cpu_read(timer_unstable_counter_workaround); \
  30. (__wa && __wa->h) ? ({ isb(); __wa->h;}) : arch_timer_##h; \
  31. })
  32. #else
  33. #define has_erratum_handler(h) false
  34. #define erratum_handler(h) (arch_timer_##h)
  35. #endif
  36. enum arch_timer_erratum_match_type {
  37. ate_match_dt,
  38. ate_match_local_cap_id,
  39. ate_match_acpi_oem_info,
  40. };
  41. struct clock_event_device;
  42. struct arch_timer_erratum_workaround {
  43. enum arch_timer_erratum_match_type match_type;
  44. const void *id;
  45. const char *desc;
  46. u64 (*read_cntpct_el0)(void);
  47. u64 (*read_cntvct_el0)(void);
  48. int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
  49. int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
  50. bool disable_compat_vdso;
  51. };
  52. DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
  53. timer_unstable_counter_workaround);
  54. static inline notrace u64 arch_timer_read_cntpct_el0(void)
  55. {
  56. u64 cnt;
  57. asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
  58. "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
  59. ARM64_HAS_ECV)
  60. : "=r" (cnt));
  61. return cnt;
  62. }
  63. static inline notrace u64 arch_timer_read_cntvct_el0(void)
  64. {
  65. u64 cnt;
  66. asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
  67. "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
  68. ARM64_HAS_ECV)
  69. : "=r" (cnt));
  70. return cnt;
  71. }
  72. #define arch_timer_reg_read_stable(reg) \
  73. ({ \
  74. u64 _val; \
  75. \
  76. preempt_disable_notrace(); \
  77. _val = erratum_handler(read_ ## reg)(); \
  78. preempt_enable_notrace(); \
  79. \
  80. _val; \
  81. })
  82. /*
  83. * These register accessors are marked inline so the compiler can
  84. * nicely work out which register we want, and chuck away the rest of
  85. * the code.
  86. */
  87. static __always_inline
  88. void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u64 val)
  89. {
  90. if (access == ARCH_TIMER_PHYS_ACCESS) {
  91. switch (reg) {
  92. case ARCH_TIMER_REG_CTRL:
  93. write_sysreg(val, cntp_ctl_el0);
  94. isb();
  95. break;
  96. case ARCH_TIMER_REG_CVAL:
  97. write_sysreg(val, cntp_cval_el0);
  98. break;
  99. default:
  100. BUILD_BUG();
  101. }
  102. } else if (access == ARCH_TIMER_VIRT_ACCESS) {
  103. switch (reg) {
  104. case ARCH_TIMER_REG_CTRL:
  105. write_sysreg(val, cntv_ctl_el0);
  106. isb();
  107. break;
  108. case ARCH_TIMER_REG_CVAL:
  109. write_sysreg(val, cntv_cval_el0);
  110. break;
  111. default:
  112. BUILD_BUG();
  113. }
  114. } else {
  115. BUILD_BUG();
  116. }
  117. }
  118. static __always_inline
  119. u64 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
  120. {
  121. if (access == ARCH_TIMER_PHYS_ACCESS) {
  122. switch (reg) {
  123. case ARCH_TIMER_REG_CTRL:
  124. return read_sysreg(cntp_ctl_el0);
  125. default:
  126. BUILD_BUG();
  127. }
  128. } else if (access == ARCH_TIMER_VIRT_ACCESS) {
  129. switch (reg) {
  130. case ARCH_TIMER_REG_CTRL:
  131. return read_sysreg(cntv_ctl_el0);
  132. default:
  133. BUILD_BUG();
  134. }
  135. }
  136. BUILD_BUG();
  137. unreachable();
  138. }
  139. static inline u32 arch_timer_get_cntfrq(void)
  140. {
  141. return read_sysreg(cntfrq_el0);
  142. }
  143. static inline u32 arch_timer_get_cntkctl(void)
  144. {
  145. return read_sysreg(cntkctl_el1);
  146. }
  147. static inline void arch_timer_set_cntkctl(u32 cntkctl)
  148. {
  149. write_sysreg(cntkctl, cntkctl_el1);
  150. isb();
  151. }
  152. static __always_inline u64 __arch_counter_get_cntpct_stable(void)
  153. {
  154. u64 cnt;
  155. cnt = arch_timer_reg_read_stable(cntpct_el0);
  156. arch_counter_enforce_ordering(cnt);
  157. return cnt;
  158. }
  159. static __always_inline u64 __arch_counter_get_cntpct(void)
  160. {
  161. u64 cnt;
  162. asm volatile(ALTERNATIVE("isb\n mrs %0, cntpct_el0",
  163. "nop\n" __mrs_s("%0", SYS_CNTPCTSS_EL0),
  164. ARM64_HAS_ECV)
  165. : "=r" (cnt));
  166. arch_counter_enforce_ordering(cnt);
  167. return cnt;
  168. }
  169. static __always_inline u64 __arch_counter_get_cntvct_stable(void)
  170. {
  171. u64 cnt;
  172. cnt = arch_timer_reg_read_stable(cntvct_el0);
  173. arch_counter_enforce_ordering(cnt);
  174. return cnt;
  175. }
  176. static __always_inline u64 __arch_counter_get_cntvct(void)
  177. {
  178. u64 cnt;
  179. asm volatile(ALTERNATIVE("isb\n mrs %0, cntvct_el0",
  180. "nop\n" __mrs_s("%0", SYS_CNTVCTSS_EL0),
  181. ARM64_HAS_ECV)
  182. : "=r" (cnt));
  183. arch_counter_enforce_ordering(cnt);
  184. return cnt;
  185. }
  186. static inline int arch_timer_arch_init(void)
  187. {
  188. return 0;
  189. }
  190. static inline void arch_timer_set_evtstrm_feature(void)
  191. {
  192. cpu_set_named_feature(EVTSTRM);
  193. #ifdef CONFIG_COMPAT
  194. compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
  195. #endif
  196. }
  197. static inline bool arch_timer_have_evtstrm_feature(void)
  198. {
  199. return cpu_have_named_feature(EVTSTRM);
  200. }
  201. #endif