u64_stats_sync.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_U64_STATS_SYNC_H
  3. #define _LINUX_U64_STATS_SYNC_H
  4. /*
  5. * Protect against 64-bit values tearing on 32-bit architectures. This is
  6. * typically used for statistics read/update in different subsystems.
  7. *
  8. * Key points :
  9. *
  10. * - Use a seqcount on 32-bit
  11. * - The whole thing is a no-op on 64-bit architectures.
  12. *
  13. * Usage constraints:
  14. *
  15. * 1) Write side must ensure mutual exclusion, or one seqcount update could
  16. * be lost, thus blocking readers forever.
  17. *
  18. * 2) Write side must disable preemption, or a seqcount reader can preempt the
  19. * writer and also spin forever.
  20. *
  21. * 3) Write side must use the _irqsave() variant if other writers, or a reader,
  22. * can be invoked from an IRQ context. On 64bit systems this variant does not
  23. * disable interrupts.
  24. *
  25. * 4) If reader fetches several counters, there is no guarantee the whole values
  26. * are consistent w.r.t. each other (remember point #2: seqcounts are not
  27. * used for 64bit architectures).
  28. *
  29. * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
  30. * pure reads.
  31. *
  32. * Usage :
  33. *
  34. * Stats producer (writer) should use following template granted it already got
  35. * an exclusive access to counters (a lock is already taken, or per cpu
  36. * data is used [in a non preemptable context])
  37. *
  38. * spin_lock_bh(...) or other synchronization to get exclusive access
  39. * ...
  40. * u64_stats_update_begin(&stats->syncp);
  41. * u64_stats_add(&stats->bytes64, len); // non atomic operation
  42. * u64_stats_inc(&stats->packets64); // non atomic operation
  43. * u64_stats_update_end(&stats->syncp);
  44. *
  45. * While a consumer (reader) should use following template to get consistent
  46. * snapshot for each variable (but no guarantee on several ones)
  47. *
  48. * u64 tbytes, tpackets;
  49. * unsigned int start;
  50. *
  51. * do {
  52. * start = u64_stats_fetch_begin(&stats->syncp);
  53. * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
  54. * tpackets = u64_stats_read(&stats->packets64); // non atomic operation
  55. * } while (u64_stats_fetch_retry(&stats->syncp, start));
  56. *
  57. *
  58. * Example of use in drivers/net/loopback.c, using per_cpu containers,
  59. * in BH disabled context.
  60. */
  61. #include <linux/seqlock.h>
  62. struct u64_stats_sync {
  63. #if BITS_PER_LONG == 32
  64. seqcount_t seq;
  65. #endif
  66. };
  67. #if BITS_PER_LONG == 64
  68. #include <asm/local64.h>
  69. typedef struct {
  70. local64_t v;
  71. } u64_stats_t ;
  72. static inline u64 u64_stats_read(const u64_stats_t *p)
  73. {
  74. return local64_read(&p->v);
  75. }
  76. static inline void u64_stats_set(u64_stats_t *p, u64 val)
  77. {
  78. local64_set(&p->v, val);
  79. }
  80. static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
  81. {
  82. local64_add(val, &p->v);
  83. }
  84. static inline void u64_stats_inc(u64_stats_t *p)
  85. {
  86. local64_inc(&p->v);
  87. }
  88. static inline void u64_stats_init(struct u64_stats_sync *syncp) { }
  89. static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) { }
  90. static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) { }
  91. static inline unsigned long __u64_stats_irqsave(void) { return 0; }
  92. static inline void __u64_stats_irqrestore(unsigned long flags) { }
  93. static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
  94. {
  95. return 0;
  96. }
  97. static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
  98. unsigned int start)
  99. {
  100. return false;
  101. }
  102. #else /* 64 bit */
  103. typedef struct {
  104. u64 v;
  105. } u64_stats_t;
  106. static inline u64 u64_stats_read(const u64_stats_t *p)
  107. {
  108. return p->v;
  109. }
  110. static inline void u64_stats_set(u64_stats_t *p, u64 val)
  111. {
  112. p->v = val;
  113. }
  114. static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
  115. {
  116. p->v += val;
  117. }
  118. static inline void u64_stats_inc(u64_stats_t *p)
  119. {
  120. p->v++;
  121. }
  122. static inline void u64_stats_init(struct u64_stats_sync *syncp)
  123. {
  124. seqcount_init(&syncp->seq);
  125. }
  126. static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
  127. {
  128. preempt_disable_nested();
  129. write_seqcount_begin(&syncp->seq);
  130. }
  131. static inline void __u64_stats_update_end(struct u64_stats_sync *syncp)
  132. {
  133. write_seqcount_end(&syncp->seq);
  134. preempt_enable_nested();
  135. }
  136. static inline unsigned long __u64_stats_irqsave(void)
  137. {
  138. unsigned long flags;
  139. local_irq_save(flags);
  140. return flags;
  141. }
  142. static inline void __u64_stats_irqrestore(unsigned long flags)
  143. {
  144. local_irq_restore(flags);
  145. }
  146. static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
  147. {
  148. return read_seqcount_begin(&syncp->seq);
  149. }
  150. static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
  151. unsigned int start)
  152. {
  153. return read_seqcount_retry(&syncp->seq, start);
  154. }
  155. #endif /* !64 bit */
  156. static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
  157. {
  158. __u64_stats_update_begin(syncp);
  159. }
  160. static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
  161. {
  162. __u64_stats_update_end(syncp);
  163. }
  164. static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
  165. {
  166. unsigned long flags = __u64_stats_irqsave();
  167. __u64_stats_update_begin(syncp);
  168. return flags;
  169. }
  170. static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
  171. unsigned long flags)
  172. {
  173. __u64_stats_update_end(syncp);
  174. __u64_stats_irqrestore(flags);
  175. }
  176. static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
  177. {
  178. return __u64_stats_fetch_begin(syncp);
  179. }
  180. static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
  181. unsigned int start)
  182. {
  183. return __u64_stats_fetch_retry(syncp, start);
  184. }
  185. /* Obsolete interfaces */
  186. static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
  187. {
  188. return u64_stats_fetch_begin(syncp);
  189. }
  190. static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
  191. unsigned int start)
  192. {
  193. return u64_stats_fetch_retry(syncp, start);
  194. }
  195. #endif /* _LINUX_U64_STATS_SYNC_H */