dm-cache-background-tracker.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*
  2. * Copyright (C) 2017 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-cache-background-tracker.h"
  7. /*----------------------------------------------------------------*/
  8. #define DM_MSG_PREFIX "dm-background-tracker"
  9. struct bt_work {
  10. struct list_head list;
  11. struct rb_node node;
  12. struct policy_work work;
  13. };
  14. struct background_tracker {
  15. unsigned int max_work;
  16. atomic_t pending_promotes;
  17. atomic_t pending_writebacks;
  18. atomic_t pending_demotes;
  19. struct list_head issued;
  20. struct list_head queued;
  21. struct rb_root pending;
  22. struct kmem_cache *work_cache;
  23. };
  24. struct background_tracker *btracker_create(unsigned int max_work)
  25. {
  26. struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
  27. if (!b) {
  28. DMERR("couldn't create background_tracker");
  29. return NULL;
  30. }
  31. b->max_work = max_work;
  32. atomic_set(&b->pending_promotes, 0);
  33. atomic_set(&b->pending_writebacks, 0);
  34. atomic_set(&b->pending_demotes, 0);
  35. INIT_LIST_HEAD(&b->issued);
  36. INIT_LIST_HEAD(&b->queued);
  37. b->pending = RB_ROOT;
  38. b->work_cache = KMEM_CACHE(bt_work, 0);
  39. if (!b->work_cache) {
  40. DMERR("couldn't create mempool for background work items");
  41. kfree(b);
  42. b = NULL;
  43. }
  44. return b;
  45. }
  46. EXPORT_SYMBOL_GPL(btracker_create);
  47. void btracker_destroy(struct background_tracker *b)
  48. {
  49. struct bt_work *w, *tmp;
  50. BUG_ON(!list_empty(&b->issued));
  51. list_for_each_entry_safe (w, tmp, &b->queued, list) {
  52. list_del(&w->list);
  53. kmem_cache_free(b->work_cache, w);
  54. }
  55. kmem_cache_destroy(b->work_cache);
  56. kfree(b);
  57. }
  58. EXPORT_SYMBOL_GPL(btracker_destroy);
  59. static int cmp_oblock(dm_oblock_t lhs, dm_oblock_t rhs)
  60. {
  61. if (from_oblock(lhs) < from_oblock(rhs))
  62. return -1;
  63. if (from_oblock(rhs) < from_oblock(lhs))
  64. return 1;
  65. return 0;
  66. }
  67. static bool __insert_pending(struct background_tracker *b,
  68. struct bt_work *nw)
  69. {
  70. int cmp;
  71. struct bt_work *w;
  72. struct rb_node **new = &b->pending.rb_node, *parent = NULL;
  73. while (*new) {
  74. w = container_of(*new, struct bt_work, node);
  75. parent = *new;
  76. cmp = cmp_oblock(w->work.oblock, nw->work.oblock);
  77. if (cmp < 0)
  78. new = &((*new)->rb_left);
  79. else if (cmp > 0)
  80. new = &((*new)->rb_right);
  81. else
  82. /* already present */
  83. return false;
  84. }
  85. rb_link_node(&nw->node, parent, new);
  86. rb_insert_color(&nw->node, &b->pending);
  87. return true;
  88. }
  89. static struct bt_work *__find_pending(struct background_tracker *b,
  90. dm_oblock_t oblock)
  91. {
  92. int cmp;
  93. struct bt_work *w;
  94. struct rb_node **new = &b->pending.rb_node;
  95. while (*new) {
  96. w = container_of(*new, struct bt_work, node);
  97. cmp = cmp_oblock(w->work.oblock, oblock);
  98. if (cmp < 0)
  99. new = &((*new)->rb_left);
  100. else if (cmp > 0)
  101. new = &((*new)->rb_right);
  102. else
  103. break;
  104. }
  105. return *new ? w : NULL;
  106. }
  107. static void update_stats(struct background_tracker *b, struct policy_work *w, int delta)
  108. {
  109. switch (w->op) {
  110. case POLICY_PROMOTE:
  111. atomic_add(delta, &b->pending_promotes);
  112. break;
  113. case POLICY_DEMOTE:
  114. atomic_add(delta, &b->pending_demotes);
  115. break;
  116. case POLICY_WRITEBACK:
  117. atomic_add(delta, &b->pending_writebacks);
  118. break;
  119. }
  120. }
  121. unsigned int btracker_nr_writebacks_queued(struct background_tracker *b)
  122. {
  123. return atomic_read(&b->pending_writebacks);
  124. }
  125. EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
  126. unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
  127. {
  128. return atomic_read(&b->pending_demotes);
  129. }
  130. EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
  131. static bool max_work_reached(struct background_tracker *b)
  132. {
  133. return atomic_read(&b->pending_promotes) +
  134. atomic_read(&b->pending_writebacks) +
  135. atomic_read(&b->pending_demotes) >= b->max_work;
  136. }
  137. static struct bt_work *alloc_work(struct background_tracker *b)
  138. {
  139. if (max_work_reached(b))
  140. return NULL;
  141. return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
  142. }
  143. int btracker_queue(struct background_tracker *b,
  144. struct policy_work *work,
  145. struct policy_work **pwork)
  146. {
  147. struct bt_work *w;
  148. if (pwork)
  149. *pwork = NULL;
  150. w = alloc_work(b);
  151. if (!w)
  152. return -ENOMEM;
  153. memcpy(&w->work, work, sizeof(*work));
  154. if (!__insert_pending(b, w)) {
  155. /*
  156. * There was a race, we'll just ignore this second
  157. * bit of work for the same oblock.
  158. */
  159. kmem_cache_free(b->work_cache, w);
  160. return -EINVAL;
  161. }
  162. if (pwork) {
  163. *pwork = &w->work;
  164. list_add(&w->list, &b->issued);
  165. } else
  166. list_add(&w->list, &b->queued);
  167. update_stats(b, &w->work, 1);
  168. return 0;
  169. }
  170. EXPORT_SYMBOL_GPL(btracker_queue);
  171. /*
  172. * Returns -ENODATA if there's no work.
  173. */
  174. int btracker_issue(struct background_tracker *b, struct policy_work **work)
  175. {
  176. struct bt_work *w;
  177. if (list_empty(&b->queued))
  178. return -ENODATA;
  179. w = list_first_entry(&b->queued, struct bt_work, list);
  180. list_move(&w->list, &b->issued);
  181. *work = &w->work;
  182. return 0;
  183. }
  184. EXPORT_SYMBOL_GPL(btracker_issue);
  185. void btracker_complete(struct background_tracker *b,
  186. struct policy_work *op)
  187. {
  188. struct bt_work *w = container_of(op, struct bt_work, work);
  189. update_stats(b, &w->work, -1);
  190. rb_erase(&w->node, &b->pending);
  191. list_del(&w->list);
  192. kmem_cache_free(b->work_cache, w);
  193. }
  194. EXPORT_SYMBOL_GPL(btracker_complete);
  195. bool btracker_promotion_already_present(struct background_tracker *b,
  196. dm_oblock_t oblock)
  197. {
  198. return __find_pending(b, oblock) != NULL;
  199. }
  200. EXPORT_SYMBOL_GPL(btracker_promotion_already_present);
  201. /*----------------------------------------------------------------*/