percpu_counter.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_PERCPU_COUNTER_H
  3. #define _LINUX_PERCPU_COUNTER_H
  4. /*
  5. * A simple "approximate counter" for use in ext2 and ext3 superblocks.
  6. *
  7. * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
  8. */
  9. #include <linux/spinlock.h>
  10. #include <linux/smp.h>
  11. #include <linux/list.h>
  12. #include <linux/threads.h>
  13. #include <linux/percpu.h>
  14. #include <linux/types.h>
  15. #include <linux/gfp.h>
  16. /* percpu_counter batch for local add or sub */
  17. #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
  18. #ifdef CONFIG_SMP
  19. struct percpu_counter {
  20. raw_spinlock_t lock;
  21. s64 count;
  22. #ifdef CONFIG_HOTPLUG_CPU
  23. struct list_head list; /* All percpu_counters are on a list */
  24. #endif
  25. s32 __percpu *counters;
  26. };
  27. extern int percpu_counter_batch;
  28. int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
  29. struct lock_class_key *key);
  30. #define percpu_counter_init(fbc, value, gfp) \
  31. ({ \
  32. static struct lock_class_key __key; \
  33. \
  34. __percpu_counter_init(fbc, value, gfp, &__key); \
  35. })
  36. void percpu_counter_destroy(struct percpu_counter *fbc);
  37. void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
  38. void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
  39. s32 batch);
  40. s64 __percpu_counter_sum(struct percpu_counter *fbc);
  41. int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
  42. void percpu_counter_sync(struct percpu_counter *fbc);
  43. static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  44. {
  45. return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
  46. }
  47. static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  48. {
  49. percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
  50. }
  51. /*
  52. * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
  53. * are accumulated in local per cpu counter and not in fbc->count until
  54. * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
  55. * write efficient.
  56. * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
  57. * used to add up the counts from each CPU to account for all the local
  58. * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
  59. * should be used when a counter is updated frequently and read rarely.
  60. */
  61. static inline void
  62. percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
  63. {
  64. percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
  65. }
  66. static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  67. {
  68. s64 ret = __percpu_counter_sum(fbc);
  69. return ret < 0 ? 0 : ret;
  70. }
  71. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  72. {
  73. return __percpu_counter_sum(fbc);
  74. }
  75. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  76. {
  77. return fbc->count;
  78. }
  79. /*
  80. * It is possible for the percpu_counter_read() to return a small negative
  81. * number for some counter which should never be negative.
  82. *
  83. */
  84. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  85. {
  86. /* Prevent reloads of fbc->count */
  87. s64 ret = READ_ONCE(fbc->count);
  88. if (ret >= 0)
  89. return ret;
  90. return 0;
  91. }
  92. static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
  93. {
  94. return (fbc->counters != NULL);
  95. }
  96. #else /* !CONFIG_SMP */
  97. struct percpu_counter {
  98. s64 count;
  99. };
  100. static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
  101. gfp_t gfp)
  102. {
  103. fbc->count = amount;
  104. return 0;
  105. }
  106. static inline void percpu_counter_destroy(struct percpu_counter *fbc)
  107. {
  108. }
  109. static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  110. {
  111. fbc->count = amount;
  112. }
  113. static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  114. {
  115. if (fbc->count > rhs)
  116. return 1;
  117. else if (fbc->count < rhs)
  118. return -1;
  119. else
  120. return 0;
  121. }
  122. static inline int
  123. __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
  124. {
  125. return percpu_counter_compare(fbc, rhs);
  126. }
  127. static inline void
  128. percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  129. {
  130. preempt_disable();
  131. fbc->count += amount;
  132. preempt_enable();
  133. }
  134. /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
  135. static inline void
  136. percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
  137. {
  138. percpu_counter_add(fbc, amount);
  139. }
  140. static inline void
  141. percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
  142. {
  143. percpu_counter_add(fbc, amount);
  144. }
  145. static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  146. {
  147. return fbc->count;
  148. }
  149. /*
  150. * percpu_counter is intended to track positive numbers. In the UP case the
  151. * number should never be negative.
  152. */
  153. static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  154. {
  155. return fbc->count;
  156. }
  157. static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  158. {
  159. return percpu_counter_read_positive(fbc);
  160. }
  161. static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  162. {
  163. return percpu_counter_read(fbc);
  164. }
  165. static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
  166. {
  167. return true;
  168. }
  169. static inline void percpu_counter_sync(struct percpu_counter *fbc)
  170. {
  171. }
  172. #endif /* CONFIG_SMP */
  173. static inline void percpu_counter_inc(struct percpu_counter *fbc)
  174. {
  175. percpu_counter_add(fbc, 1);
  176. }
  177. static inline void percpu_counter_dec(struct percpu_counter *fbc)
  178. {
  179. percpu_counter_add(fbc, -1);
  180. }
  181. static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
  182. {
  183. percpu_counter_add(fbc, -amount);
  184. }
  185. static inline void
  186. percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
  187. {
  188. percpu_counter_add_local(fbc, -amount);
  189. }
  190. #endif /* _LINUX_PERCPU_COUNTER_H */