gettimeofday.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * Copyright (C) 2018 ARM Limited
  3. * Copyright (C) 2015 Imagination Technologies
  4. * Author: Alex Smith <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #ifndef __ASM_VDSO_GETTIMEOFDAY_H
  12. #define __ASM_VDSO_GETTIMEOFDAY_H
  13. #ifndef __ASSEMBLY__
  14. #include <asm/vdso/vdso.h>
  15. #include <asm/clocksource.h>
  16. #include <asm/unistd.h>
  17. #include <asm/vdso.h>
  18. #define VDSO_HAS_CLOCK_GETRES 1
  19. #if MIPS_ISA_REV < 6
  20. #define VDSO_SYSCALL_CLOBBERS "hi", "lo",
  21. #else
  22. #define VDSO_SYSCALL_CLOBBERS
  23. #endif
  24. static __always_inline long gettimeofday_fallback(
  25. struct __kernel_old_timeval *_tv,
  26. struct timezone *_tz)
  27. {
  28. register struct timezone *tz asm("a1") = _tz;
  29. register struct __kernel_old_timeval *tv asm("a0") = _tv;
  30. register long ret asm("v0");
  31. register long nr asm("v0") = __NR_gettimeofday;
  32. register long error asm("a3");
  33. asm volatile(
  34. " syscall\n"
  35. : "=r" (ret), "=r" (error)
  36. : "r" (tv), "r" (tz), "r" (nr)
  37. : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
  38. "$14", "$15", "$24", "$25",
  39. VDSO_SYSCALL_CLOBBERS
  40. "memory");
  41. return error ? -ret : ret;
  42. }
  43. static __always_inline long clock_gettime_fallback(
  44. clockid_t _clkid,
  45. struct __kernel_timespec *_ts)
  46. {
  47. register struct __kernel_timespec *ts asm("a1") = _ts;
  48. register clockid_t clkid asm("a0") = _clkid;
  49. register long ret asm("v0");
  50. #if _MIPS_SIM == _MIPS_SIM_ABI64
  51. register long nr asm("v0") = __NR_clock_gettime;
  52. #else
  53. register long nr asm("v0") = __NR_clock_gettime64;
  54. #endif
  55. register long error asm("a3");
  56. asm volatile(
  57. " syscall\n"
  58. : "=r" (ret), "=r" (error)
  59. : "r" (clkid), "r" (ts), "r" (nr)
  60. : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
  61. "$14", "$15", "$24", "$25",
  62. VDSO_SYSCALL_CLOBBERS
  63. "memory");
  64. return error ? -ret : ret;
  65. }
  66. static __always_inline int clock_getres_fallback(
  67. clockid_t _clkid,
  68. struct __kernel_timespec *_ts)
  69. {
  70. register struct __kernel_timespec *ts asm("a1") = _ts;
  71. register clockid_t clkid asm("a0") = _clkid;
  72. register long ret asm("v0");
  73. #if _MIPS_SIM == _MIPS_SIM_ABI64
  74. register long nr asm("v0") = __NR_clock_getres;
  75. #else
  76. register long nr asm("v0") = __NR_clock_getres_time64;
  77. #endif
  78. register long error asm("a3");
  79. asm volatile(
  80. " syscall\n"
  81. : "=r" (ret), "=r" (error)
  82. : "r" (clkid), "r" (ts), "r" (nr)
  83. : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
  84. "$14", "$15", "$24", "$25",
  85. VDSO_SYSCALL_CLOBBERS
  86. "memory");
  87. return error ? -ret : ret;
  88. }
  89. #if _MIPS_SIM != _MIPS_SIM_ABI64
  90. static __always_inline long clock_gettime32_fallback(
  91. clockid_t _clkid,
  92. struct old_timespec32 *_ts)
  93. {
  94. register struct old_timespec32 *ts asm("a1") = _ts;
  95. register clockid_t clkid asm("a0") = _clkid;
  96. register long ret asm("v0");
  97. register long nr asm("v0") = __NR_clock_gettime;
  98. register long error asm("a3");
  99. asm volatile(
  100. " syscall\n"
  101. : "=r" (ret), "=r" (error)
  102. : "r" (clkid), "r" (ts), "r" (nr)
  103. : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
  104. "$14", "$15", "$24", "$25",
  105. VDSO_SYSCALL_CLOBBERS
  106. "memory");
  107. return error ? -ret : ret;
  108. }
  109. static __always_inline int clock_getres32_fallback(
  110. clockid_t _clkid,
  111. struct old_timespec32 *_ts)
  112. {
  113. register struct old_timespec32 *ts asm("a1") = _ts;
  114. register clockid_t clkid asm("a0") = _clkid;
  115. register long ret asm("v0");
  116. register long nr asm("v0") = __NR_clock_getres;
  117. register long error asm("a3");
  118. asm volatile(
  119. " syscall\n"
  120. : "=r" (ret), "=r" (error)
  121. : "r" (clkid), "r" (ts), "r" (nr)
  122. : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
  123. "$14", "$15", "$24", "$25",
  124. VDSO_SYSCALL_CLOBBERS
  125. "memory");
  126. return error ? -ret : ret;
  127. }
  128. #endif
  129. #ifdef CONFIG_CSRC_R4K
  130. static __always_inline u64 read_r4k_count(void)
  131. {
  132. unsigned int count;
  133. __asm__ __volatile__(
  134. " .set push\n"
  135. " .set mips32r2\n"
  136. " rdhwr %0, $2\n"
  137. " .set pop\n"
  138. : "=r" (count));
  139. return count;
  140. }
  141. #endif
  142. #ifdef CONFIG_CLKSRC_MIPS_GIC
  143. static __always_inline u64 read_gic_count(const struct vdso_data *data)
  144. {
  145. void __iomem *gic = get_gic(data);
  146. u32 hi, hi2, lo;
  147. do {
  148. hi = __raw_readl(gic + sizeof(lo));
  149. lo = __raw_readl(gic);
  150. hi2 = __raw_readl(gic + sizeof(lo));
  151. } while (hi2 != hi);
  152. return (((u64)hi) << 32) + lo;
  153. }
  154. #endif
  155. static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
  156. const struct vdso_data *vd)
  157. {
  158. #ifdef CONFIG_CSRC_R4K
  159. if (clock_mode == VDSO_CLOCKMODE_R4K)
  160. return read_r4k_count();
  161. #endif
  162. #ifdef CONFIG_CLKSRC_MIPS_GIC
  163. if (clock_mode == VDSO_CLOCKMODE_GIC)
  164. return read_gic_count(vd);
  165. #endif
  166. /*
  167. * Core checks mode already. So this raced against a concurrent
  168. * update. Return something. Core will do another round see the
  169. * change and fallback to syscall.
  170. */
  171. return 0;
  172. }
  173. static inline bool mips_vdso_hres_capable(void)
  174. {
  175. return IS_ENABLED(CONFIG_CSRC_R4K) ||
  176. IS_ENABLED(CONFIG_CLKSRC_MIPS_GIC);
  177. }
  178. #define __arch_vdso_hres_capable mips_vdso_hres_capable
  179. static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
  180. {
  181. return get_vdso_data();
  182. }
  183. #endif /* !__ASSEMBLY__ */
  184. #endif /* __ASM_VDSO_GETTIMEOFDAY_H */