stats.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * bcache stats code
  4. *
  5. * Copyright 2012 Google, Inc.
  6. */
  7. #include "bcache.h"
  8. #include "stats.h"
  9. #include "btree.h"
  10. #include "sysfs.h"
  11. /*
  12. * We keep absolute totals of various statistics, and addionally a set of three
  13. * rolling averages.
  14. *
  15. * Every so often, a timer goes off and rescales the rolling averages.
  16. * accounting_rescale[] is how many times the timer has to go off before we
  17. * rescale each set of numbers; that gets us half lives of 5 minutes, one hour,
  18. * and one day.
  19. *
  20. * accounting_delay is how often the timer goes off - 22 times in 5 minutes,
  21. * and accounting_weight is what we use to rescale:
  22. *
  23. * pow(31 / 32, 22) ~= 1/2
  24. *
  25. * So that we don't have to increment each set of numbers every time we (say)
  26. * get a cache hit, we increment a single atomic_t in acc->collector, and when
  27. * the rescale function runs it resets the atomic counter to 0 and adds its
  28. * old value to each of the exported numbers.
  29. *
  30. * To reduce rounding error, the numbers in struct cache_stats are all
  31. * stored left shifted by 16, and scaled back in the sysfs show() function.
  32. */
  33. static const unsigned int DAY_RESCALE = 288;
  34. static const unsigned int HOUR_RESCALE = 12;
  35. static const unsigned int FIVE_MINUTE_RESCALE = 1;
  36. static const unsigned int accounting_delay = (HZ * 300) / 22;
  37. static const unsigned int accounting_weight = 32;
  38. /* sysfs reading/writing */
  39. read_attribute(cache_hits);
  40. read_attribute(cache_misses);
  41. read_attribute(cache_bypass_hits);
  42. read_attribute(cache_bypass_misses);
  43. read_attribute(cache_hit_ratio);
  44. read_attribute(cache_miss_collisions);
  45. read_attribute(bypassed);
  46. SHOW(bch_stats)
  47. {
  48. struct cache_stats *s =
  49. container_of(kobj, struct cache_stats, kobj);
  50. #define var(stat) (s->stat >> 16)
  51. var_print(cache_hits);
  52. var_print(cache_misses);
  53. var_print(cache_bypass_hits);
  54. var_print(cache_bypass_misses);
  55. sysfs_print(cache_hit_ratio,
  56. DIV_SAFE(var(cache_hits) * 100,
  57. var(cache_hits) + var(cache_misses)));
  58. var_print(cache_miss_collisions);
  59. sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
  60. #undef var
  61. return 0;
  62. }
  63. STORE(bch_stats)
  64. {
  65. return size;
  66. }
  67. static void bch_stats_release(struct kobject *k)
  68. {
  69. }
  70. static struct attribute *bch_stats_attrs[] = {
  71. &sysfs_cache_hits,
  72. &sysfs_cache_misses,
  73. &sysfs_cache_bypass_hits,
  74. &sysfs_cache_bypass_misses,
  75. &sysfs_cache_hit_ratio,
  76. &sysfs_cache_miss_collisions,
  77. &sysfs_bypassed,
  78. NULL
  79. };
  80. ATTRIBUTE_GROUPS(bch_stats);
  81. static KTYPE(bch_stats);
  82. int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
  83. struct kobject *parent)
  84. {
  85. int ret = kobject_add(&acc->total.kobj, parent,
  86. "stats_total");
  87. ret = ret ?: kobject_add(&acc->five_minute.kobj, parent,
  88. "stats_five_minute");
  89. ret = ret ?: kobject_add(&acc->hour.kobj, parent,
  90. "stats_hour");
  91. ret = ret ?: kobject_add(&acc->day.kobj, parent,
  92. "stats_day");
  93. return ret;
  94. }
  95. void bch_cache_accounting_clear(struct cache_accounting *acc)
  96. {
  97. acc->total.cache_hits = 0;
  98. acc->total.cache_misses = 0;
  99. acc->total.cache_bypass_hits = 0;
  100. acc->total.cache_bypass_misses = 0;
  101. acc->total.cache_miss_collisions = 0;
  102. acc->total.sectors_bypassed = 0;
  103. }
  104. void bch_cache_accounting_destroy(struct cache_accounting *acc)
  105. {
  106. kobject_put(&acc->total.kobj);
  107. kobject_put(&acc->five_minute.kobj);
  108. kobject_put(&acc->hour.kobj);
  109. kobject_put(&acc->day.kobj);
  110. atomic_set(&acc->closing, 1);
  111. if (del_timer_sync(&acc->timer))
  112. closure_return(&acc->cl);
  113. }
  114. /* EWMA scaling */
  115. static void scale_stat(unsigned long *stat)
  116. {
  117. *stat = ewma_add(*stat, 0, accounting_weight, 0);
  118. }
  119. static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
  120. {
  121. if (++stats->rescale == rescale_at) {
  122. stats->rescale = 0;
  123. scale_stat(&stats->cache_hits);
  124. scale_stat(&stats->cache_misses);
  125. scale_stat(&stats->cache_bypass_hits);
  126. scale_stat(&stats->cache_bypass_misses);
  127. scale_stat(&stats->cache_miss_collisions);
  128. scale_stat(&stats->sectors_bypassed);
  129. }
  130. }
  131. static void scale_accounting(struct timer_list *t)
  132. {
  133. struct cache_accounting *acc = from_timer(acc, t, timer);
  134. #define move_stat(name) do { \
  135. unsigned int t = atomic_xchg(&acc->collector.name, 0); \
  136. t <<= 16; \
  137. acc->five_minute.name += t; \
  138. acc->hour.name += t; \
  139. acc->day.name += t; \
  140. acc->total.name += t; \
  141. } while (0)
  142. move_stat(cache_hits);
  143. move_stat(cache_misses);
  144. move_stat(cache_bypass_hits);
  145. move_stat(cache_bypass_misses);
  146. move_stat(cache_miss_collisions);
  147. move_stat(sectors_bypassed);
  148. scale_stats(&acc->total, 0);
  149. scale_stats(&acc->day, DAY_RESCALE);
  150. scale_stats(&acc->hour, HOUR_RESCALE);
  151. scale_stats(&acc->five_minute, FIVE_MINUTE_RESCALE);
  152. acc->timer.expires += accounting_delay;
  153. if (!atomic_read(&acc->closing))
  154. add_timer(&acc->timer);
  155. else
  156. closure_return(&acc->cl);
  157. }
  158. static void mark_cache_stats(struct cache_stat_collector *stats,
  159. bool hit, bool bypass)
  160. {
  161. if (!bypass)
  162. if (hit)
  163. atomic_inc(&stats->cache_hits);
  164. else
  165. atomic_inc(&stats->cache_misses);
  166. else
  167. if (hit)
  168. atomic_inc(&stats->cache_bypass_hits);
  169. else
  170. atomic_inc(&stats->cache_bypass_misses);
  171. }
  172. void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
  173. bool hit, bool bypass)
  174. {
  175. struct cached_dev *dc = container_of(d, struct cached_dev, disk);
  176. mark_cache_stats(&dc->accounting.collector, hit, bypass);
  177. mark_cache_stats(&c->accounting.collector, hit, bypass);
  178. }
  179. void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
  180. {
  181. struct cached_dev *dc = container_of(d, struct cached_dev, disk);
  182. atomic_inc(&dc->accounting.collector.cache_miss_collisions);
  183. atomic_inc(&c->accounting.collector.cache_miss_collisions);
  184. }
  185. void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
  186. int sectors)
  187. {
  188. atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
  189. atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
  190. }
  191. void bch_cache_accounting_init(struct cache_accounting *acc,
  192. struct closure *parent)
  193. {
  194. kobject_init(&acc->total.kobj, &bch_stats_ktype);
  195. kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
  196. kobject_init(&acc->hour.kobj, &bch_stats_ktype);
  197. kobject_init(&acc->day.kobj, &bch_stats_ktype);
  198. closure_init(&acc->cl, parent);
  199. timer_setup(&acc->timer, scale_accounting, 0);
  200. acc->timer.expires = jiffies + accounting_delay;
  201. add_timer(&acc->timer);
  202. }