vvar.h 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. /*
  2. * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
  3. */
  4. #ifndef _ASM_SPARC_VVAR_DATA_H
  5. #define _ASM_SPARC_VVAR_DATA_H
  6. #include <asm/clocksource.h>
  7. #include <asm/processor.h>
  8. #include <asm/barrier.h>
  9. #include <linux/time.h>
  10. #include <linux/types.h>
  11. struct vvar_data {
  12. unsigned int seq;
  13. int vclock_mode;
  14. struct { /* extract of a clocksource struct */
  15. u64 cycle_last;
  16. u64 mask;
  17. int mult;
  18. int shift;
  19. } clock;
  20. /* open coded 'struct timespec' */
  21. u64 wall_time_sec;
  22. u64 wall_time_snsec;
  23. u64 monotonic_time_snsec;
  24. u64 monotonic_time_sec;
  25. u64 monotonic_time_coarse_sec;
  26. u64 monotonic_time_coarse_nsec;
  27. u64 wall_time_coarse_sec;
  28. u64 wall_time_coarse_nsec;
  29. int tz_minuteswest;
  30. int tz_dsttime;
  31. };
  32. extern struct vvar_data *vvar_data;
  33. extern int vdso_fix_stick;
  34. static inline unsigned int vvar_read_begin(const struct vvar_data *s)
  35. {
  36. unsigned int ret;
  37. repeat:
  38. ret = READ_ONCE(s->seq);
  39. if (unlikely(ret & 1)) {
  40. cpu_relax();
  41. goto repeat;
  42. }
  43. smp_rmb(); /* Finish all reads before we return seq */
  44. return ret;
  45. }
  46. static inline int vvar_read_retry(const struct vvar_data *s,
  47. unsigned int start)
  48. {
  49. smp_rmb(); /* Finish all reads before checking the value of seq */
  50. return unlikely(s->seq != start);
  51. }
  52. static inline void vvar_write_begin(struct vvar_data *s)
  53. {
  54. ++s->seq;
  55. smp_wmb(); /* Makes sure that increment of seq is reflected */
  56. }
  57. static inline void vvar_write_end(struct vvar_data *s)
  58. {
  59. smp_wmb(); /* Makes the value of seq current before we increment */
  60. ++s->seq;
  61. }
  62. #endif /* _ASM_SPARC_VVAR_DATA_H */