vclock_gettime.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2006 Andi Kleen, SUSE Labs.
  4. *
  5. * Fast user context implementation of clock_gettime, gettimeofday, and time.
  6. *
  7. * The code should have no internal unresolved relocations.
  8. * Check with readelf after changing.
  9. * Also alternative() doesn't work.
  10. */
  11. /*
  12. * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/time.h>
  16. #include <linux/string.h>
  17. #include <asm/io.h>
  18. #include <asm/unistd.h>
  19. #include <asm/timex.h>
  20. #include <asm/clocksource.h>
  21. #include <asm/vvar.h>
  22. #ifdef CONFIG_SPARC64
  23. #define SYSCALL_STRING \
  24. "ta 0x6d;" \
  25. "bcs,a 1f;" \
  26. " sub %%g0, %%o0, %%o0;" \
  27. "1:"
  28. #else
  29. #define SYSCALL_STRING \
  30. "ta 0x10;" \
  31. "bcs,a 1f;" \
  32. " sub %%g0, %%o0, %%o0;" \
  33. "1:"
  34. #endif
  35. #define SYSCALL_CLOBBERS \
  36. "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
  37. "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \
  38. "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \
  39. "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \
  40. "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", \
  41. "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", \
  42. "cc", "memory"
  43. /*
  44. * Compute the vvar page's address in the process address space, and return it
  45. * as a pointer to the vvar_data.
  46. */
  47. notrace static __always_inline struct vvar_data *get_vvar_data(void)
  48. {
  49. unsigned long ret;
  50. /*
  51. * vdso data page is the first vDSO page so grab the PC
  52. * and move up a page to get to the data page.
  53. */
  54. __asm__("rd %%pc, %0" : "=r" (ret));
  55. ret &= ~(8192 - 1);
  56. ret -= 8192;
  57. return (struct vvar_data *) ret;
  58. }
  59. notrace static long vdso_fallback_gettime(long clock, struct __kernel_old_timespec *ts)
  60. {
  61. register long num __asm__("g1") = __NR_clock_gettime;
  62. register long o0 __asm__("o0") = clock;
  63. register long o1 __asm__("o1") = (long) ts;
  64. __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
  65. "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
  66. return o0;
  67. }
  68. notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
  69. {
  70. register long num __asm__("g1") = __NR_gettimeofday;
  71. register long o0 __asm__("o0") = (long) tv;
  72. register long o1 __asm__("o1") = (long) tz;
  73. __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
  74. "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
  75. return o0;
  76. }
  77. #ifdef CONFIG_SPARC64
  78. notrace static __always_inline u64 vread_tick(void)
  79. {
  80. u64 ret;
  81. __asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
  82. return ret;
  83. }
  84. notrace static __always_inline u64 vread_tick_stick(void)
  85. {
  86. u64 ret;
  87. __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
  88. return ret;
  89. }
  90. #else
  91. notrace static __always_inline u64 vread_tick(void)
  92. {
  93. register unsigned long long ret asm("o4");
  94. __asm__ __volatile__("rd %%tick, %L0\n\t"
  95. "srlx %L0, 32, %H0"
  96. : "=r" (ret));
  97. return ret;
  98. }
  99. notrace static __always_inline u64 vread_tick_stick(void)
  100. {
  101. register unsigned long long ret asm("o4");
  102. __asm__ __volatile__("rd %%asr24, %L0\n\t"
  103. "srlx %L0, 32, %H0"
  104. : "=r" (ret));
  105. return ret;
  106. }
  107. #endif
  108. notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
  109. {
  110. u64 v;
  111. u64 cycles;
  112. cycles = vread_tick();
  113. v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
  114. return v * vvar->clock.mult;
  115. }
  116. notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar)
  117. {
  118. u64 v;
  119. u64 cycles;
  120. cycles = vread_tick_stick();
  121. v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
  122. return v * vvar->clock.mult;
  123. }
  124. notrace static __always_inline int do_realtime(struct vvar_data *vvar,
  125. struct __kernel_old_timespec *ts)
  126. {
  127. unsigned long seq;
  128. u64 ns;
  129. do {
  130. seq = vvar_read_begin(vvar);
  131. ts->tv_sec = vvar->wall_time_sec;
  132. ns = vvar->wall_time_snsec;
  133. ns += vgetsns(vvar);
  134. ns >>= vvar->clock.shift;
  135. } while (unlikely(vvar_read_retry(vvar, seq)));
  136. ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  137. ts->tv_nsec = ns;
  138. return 0;
  139. }
  140. notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
  141. struct __kernel_old_timespec *ts)
  142. {
  143. unsigned long seq;
  144. u64 ns;
  145. do {
  146. seq = vvar_read_begin(vvar);
  147. ts->tv_sec = vvar->wall_time_sec;
  148. ns = vvar->wall_time_snsec;
  149. ns += vgetsns_stick(vvar);
  150. ns >>= vvar->clock.shift;
  151. } while (unlikely(vvar_read_retry(vvar, seq)));
  152. ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  153. ts->tv_nsec = ns;
  154. return 0;
  155. }
  156. notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
  157. struct __kernel_old_timespec *ts)
  158. {
  159. unsigned long seq;
  160. u64 ns;
  161. do {
  162. seq = vvar_read_begin(vvar);
  163. ts->tv_sec = vvar->monotonic_time_sec;
  164. ns = vvar->monotonic_time_snsec;
  165. ns += vgetsns(vvar);
  166. ns >>= vvar->clock.shift;
  167. } while (unlikely(vvar_read_retry(vvar, seq)));
  168. ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  169. ts->tv_nsec = ns;
  170. return 0;
  171. }
  172. notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
  173. struct __kernel_old_timespec *ts)
  174. {
  175. unsigned long seq;
  176. u64 ns;
  177. do {
  178. seq = vvar_read_begin(vvar);
  179. ts->tv_sec = vvar->monotonic_time_sec;
  180. ns = vvar->monotonic_time_snsec;
  181. ns += vgetsns_stick(vvar);
  182. ns >>= vvar->clock.shift;
  183. } while (unlikely(vvar_read_retry(vvar, seq)));
  184. ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
  185. ts->tv_nsec = ns;
  186. return 0;
  187. }
  188. notrace static int do_realtime_coarse(struct vvar_data *vvar,
  189. struct __kernel_old_timespec *ts)
  190. {
  191. unsigned long seq;
  192. do {
  193. seq = vvar_read_begin(vvar);
  194. ts->tv_sec = vvar->wall_time_coarse_sec;
  195. ts->tv_nsec = vvar->wall_time_coarse_nsec;
  196. } while (unlikely(vvar_read_retry(vvar, seq)));
  197. return 0;
  198. }
  199. notrace static int do_monotonic_coarse(struct vvar_data *vvar,
  200. struct __kernel_old_timespec *ts)
  201. {
  202. unsigned long seq;
  203. do {
  204. seq = vvar_read_begin(vvar);
  205. ts->tv_sec = vvar->monotonic_time_coarse_sec;
  206. ts->tv_nsec = vvar->monotonic_time_coarse_nsec;
  207. } while (unlikely(vvar_read_retry(vvar, seq)));
  208. return 0;
  209. }
  210. notrace int
  211. __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
  212. {
  213. struct vvar_data *vvd = get_vvar_data();
  214. switch (clock) {
  215. case CLOCK_REALTIME:
  216. if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
  217. break;
  218. return do_realtime(vvd, ts);
  219. case CLOCK_MONOTONIC:
  220. if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
  221. break;
  222. return do_monotonic(vvd, ts);
  223. case CLOCK_REALTIME_COARSE:
  224. return do_realtime_coarse(vvd, ts);
  225. case CLOCK_MONOTONIC_COARSE:
  226. return do_monotonic_coarse(vvd, ts);
  227. }
  228. /*
  229. * Unknown clock ID ? Fall back to the syscall.
  230. */
  231. return vdso_fallback_gettime(clock, ts);
  232. }
  233. int
  234. clock_gettime(clockid_t, struct __kernel_old_timespec *)
  235. __attribute__((weak, alias("__vdso_clock_gettime")));
  236. notrace int
  237. __vdso_clock_gettime_stick(clockid_t clock, struct __kernel_old_timespec *ts)
  238. {
  239. struct vvar_data *vvd = get_vvar_data();
  240. switch (clock) {
  241. case CLOCK_REALTIME:
  242. if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
  243. break;
  244. return do_realtime_stick(vvd, ts);
  245. case CLOCK_MONOTONIC:
  246. if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
  247. break;
  248. return do_monotonic_stick(vvd, ts);
  249. case CLOCK_REALTIME_COARSE:
  250. return do_realtime_coarse(vvd, ts);
  251. case CLOCK_MONOTONIC_COARSE:
  252. return do_monotonic_coarse(vvd, ts);
  253. }
  254. /*
  255. * Unknown clock ID ? Fall back to the syscall.
  256. */
  257. return vdso_fallback_gettime(clock, ts);
  258. }
  259. notrace int
  260. __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
  261. {
  262. struct vvar_data *vvd = get_vvar_data();
  263. if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
  264. if (likely(tv != NULL)) {
  265. union tstv_t {
  266. struct __kernel_old_timespec ts;
  267. struct __kernel_old_timeval tv;
  268. } *tstv = (union tstv_t *) tv;
  269. do_realtime(vvd, &tstv->ts);
  270. /*
  271. * Assign before dividing to ensure that the division is
  272. * done in the type of tv_usec, not tv_nsec.
  273. *
  274. * There cannot be > 1 billion usec in a second:
  275. * do_realtime() has already distributed such overflow
  276. * into tv_sec. So we can assign it to an int safely.
  277. */
  278. tstv->tv.tv_usec = tstv->ts.tv_nsec;
  279. tstv->tv.tv_usec /= 1000;
  280. }
  281. if (unlikely(tz != NULL)) {
  282. /* Avoid memcpy. Some old compilers fail to inline it */
  283. tz->tz_minuteswest = vvd->tz_minuteswest;
  284. tz->tz_dsttime = vvd->tz_dsttime;
  285. }
  286. return 0;
  287. }
  288. return vdso_fallback_gettimeofday(tv, tz);
  289. }
  290. int
  291. gettimeofday(struct __kernel_old_timeval *, struct timezone *)
  292. __attribute__((weak, alias("__vdso_gettimeofday")));
  293. notrace int
  294. __vdso_gettimeofday_stick(struct __kernel_old_timeval *tv, struct timezone *tz)
  295. {
  296. struct vvar_data *vvd = get_vvar_data();
  297. if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
  298. if (likely(tv != NULL)) {
  299. union tstv_t {
  300. struct __kernel_old_timespec ts;
  301. struct __kernel_old_timeval tv;
  302. } *tstv = (union tstv_t *) tv;
  303. do_realtime_stick(vvd, &tstv->ts);
  304. /*
  305. * Assign before dividing to ensure that the division is
  306. * done in the type of tv_usec, not tv_nsec.
  307. *
  308. * There cannot be > 1 billion usec in a second:
  309. * do_realtime() has already distributed such overflow
  310. * into tv_sec. So we can assign it to an int safely.
  311. */
  312. tstv->tv.tv_usec = tstv->ts.tv_nsec;
  313. tstv->tv.tv_usec /= 1000;
  314. }
  315. if (unlikely(tz != NULL)) {
  316. /* Avoid memcpy. Some old compilers fail to inline it */
  317. tz->tz_minuteswest = vvd->tz_minuteswest;
  318. tz->tz_dsttime = vvd->tz_dsttime;
  319. }
  320. return 0;
  321. }
  322. return vdso_fallback_gettimeofday(tv, tz);
  323. }