list_lru.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  4. * Authors: David Chinner and Glauber Costa
  5. *
  6. * Generic LRU infrastructure
  7. */
  8. #ifndef _LRU_LIST_H
  9. #define _LRU_LIST_H
  10. #include <linux/list.h>
  11. #include <linux/nodemask.h>
  12. #include <linux/shrinker.h>
  13. #include <linux/xarray.h>
  14. struct mem_cgroup;
  15. /* list_lru_walk_cb has to always return one of those */
  16. enum lru_status {
  17. LRU_REMOVED, /* item removed from list */
  18. LRU_REMOVED_RETRY, /* item removed, but lock has been
  19. dropped and reacquired */
  20. LRU_ROTATE, /* item referenced, give another pass */
  21. LRU_SKIP, /* item cannot be locked, skip */
  22. LRU_RETRY, /* item not freeable. May drop the lock
  23. internally, but has to return locked. */
  24. };
  25. struct list_lru_one {
  26. struct list_head list;
  27. /* may become negative during memcg reparenting */
  28. long nr_items;
  29. };
  30. struct list_lru_memcg {
  31. struct rcu_head rcu;
  32. /* array of per cgroup per node lists, indexed by node id */
  33. struct list_lru_one node[];
  34. };
  35. struct list_lru_node {
  36. /* protects all lists on the node, including per cgroup */
  37. spinlock_t lock;
  38. /* global list, used for the root cgroup in cgroup aware lrus */
  39. struct list_lru_one lru;
  40. long nr_items;
  41. } ____cacheline_aligned_in_smp;
  42. struct list_lru {
  43. struct list_lru_node *node;
  44. #ifdef CONFIG_MEMCG_KMEM
  45. struct list_head list;
  46. int shrinker_id;
  47. bool memcg_aware;
  48. struct xarray xa;
  49. #endif
  50. };
  51. void list_lru_destroy(struct list_lru *lru);
  52. int __list_lru_init(struct list_lru *lru, bool memcg_aware,
  53. struct lock_class_key *key, struct shrinker *shrinker);
  54. #define list_lru_init(lru) \
  55. __list_lru_init((lru), false, NULL, NULL)
  56. #define list_lru_init_key(lru, key) \
  57. __list_lru_init((lru), false, (key), NULL)
  58. #define list_lru_init_memcg(lru, shrinker) \
  59. __list_lru_init((lru), true, NULL, shrinker)
  60. int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
  61. gfp_t gfp);
  62. void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
  63. /**
  64. * list_lru_add: add an element to the lru list's tail
  65. * @list_lru: the lru pointer
  66. * @item: the item to be added.
  67. *
  68. * If the element is already part of a list, this function returns doing
  69. * nothing. Therefore the caller does not need to keep state about whether or
  70. * not the element already belongs in the list and is allowed to lazy update
  71. * it. Note however that this is valid for *a* list, not *this* list. If
  72. * the caller organize itself in a way that elements can be in more than
  73. * one type of list, it is up to the caller to fully remove the item from
  74. * the previous list (with list_lru_del() for instance) before moving it
  75. * to @list_lru
  76. *
  77. * Return value: true if the list was updated, false otherwise
  78. */
  79. bool list_lru_add(struct list_lru *lru, struct list_head *item);
  80. /**
  81. * list_lru_del: delete an element to the lru list
  82. * @list_lru: the lru pointer
  83. * @item: the item to be deleted.
  84. *
  85. * This function works analogously as list_lru_add in terms of list
  86. * manipulation. The comments about an element already pertaining to
  87. * a list are also valid for list_lru_del.
  88. *
  89. * Return value: true if the list was updated, false otherwise
  90. */
  91. bool list_lru_del(struct list_lru *lru, struct list_head *item);
  92. /**
  93. * list_lru_count_one: return the number of objects currently held by @lru
  94. * @lru: the lru pointer.
  95. * @nid: the node id to count from.
  96. * @memcg: the cgroup to count from.
  97. *
  98. * Always return a non-negative number, 0 for empty lists. There is no
  99. * guarantee that the list is not updated while the count is being computed.
  100. * Callers that want such a guarantee need to provide an outer lock.
  101. */
  102. unsigned long list_lru_count_one(struct list_lru *lru,
  103. int nid, struct mem_cgroup *memcg);
  104. unsigned long list_lru_count_node(struct list_lru *lru, int nid);
  105. static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
  106. struct shrink_control *sc)
  107. {
  108. return list_lru_count_one(lru, sc->nid, sc->memcg);
  109. }
  110. static inline unsigned long list_lru_count(struct list_lru *lru)
  111. {
  112. long count = 0;
  113. int nid;
  114. for_each_node_state(nid, N_NORMAL_MEMORY)
  115. count += list_lru_count_node(lru, nid);
  116. return count;
  117. }
  118. void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
  119. void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  120. struct list_head *head);
  121. typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
  122. struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
  123. /**
  124. * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
  125. * @lru: the lru pointer.
  126. * @nid: the node id to scan from.
  127. * @memcg: the cgroup to scan from.
  128. * @isolate: callback function that is responsible for deciding what to do with
  129. * the item currently being scanned
  130. * @cb_arg: opaque type that will be passed to @isolate
  131. * @nr_to_walk: how many items to scan.
  132. *
  133. * This function will scan all elements in a particular list_lru, calling the
  134. * @isolate callback for each of those items, along with the current list
  135. * spinlock and a caller-provided opaque. The @isolate callback can choose to
  136. * drop the lock internally, but *must* return with the lock held. The callback
  137. * will return an enum lru_status telling the list_lru infrastructure what to
  138. * do with the object being scanned.
  139. *
  140. * Please note that nr_to_walk does not mean how many objects will be freed,
  141. * just how many objects will be scanned.
  142. *
  143. * Return value: the number of objects effectively removed from the LRU.
  144. */
  145. unsigned long list_lru_walk_one(struct list_lru *lru,
  146. int nid, struct mem_cgroup *memcg,
  147. list_lru_walk_cb isolate, void *cb_arg,
  148. unsigned long *nr_to_walk);
  149. /**
  150. * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
  151. * @lru: the lru pointer.
  152. * @nid: the node id to scan from.
  153. * @memcg: the cgroup to scan from.
  154. * @isolate: callback function that is responsible for deciding what to do with
  155. * the item currently being scanned
  156. * @cb_arg: opaque type that will be passed to @isolate
  157. * @nr_to_walk: how many items to scan.
  158. *
  159. * Same as @list_lru_walk_one except that the spinlock is acquired with
  160. * spin_lock_irq().
  161. */
  162. unsigned long list_lru_walk_one_irq(struct list_lru *lru,
  163. int nid, struct mem_cgroup *memcg,
  164. list_lru_walk_cb isolate, void *cb_arg,
  165. unsigned long *nr_to_walk);
  166. unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  167. list_lru_walk_cb isolate, void *cb_arg,
  168. unsigned long *nr_to_walk);
  169. static inline unsigned long
  170. list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
  171. list_lru_walk_cb isolate, void *cb_arg)
  172. {
  173. return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
  174. &sc->nr_to_scan);
  175. }
  176. static inline unsigned long
  177. list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
  178. list_lru_walk_cb isolate, void *cb_arg)
  179. {
  180. return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
  181. &sc->nr_to_scan);
  182. }
  183. static inline unsigned long
  184. list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
  185. void *cb_arg, unsigned long nr_to_walk)
  186. {
  187. long isolated = 0;
  188. int nid;
  189. for_each_node_state(nid, N_NORMAL_MEMORY) {
  190. isolated += list_lru_walk_node(lru, nid, isolate,
  191. cb_arg, &nr_to_walk);
  192. if (nr_to_walk <= 0)
  193. break;
  194. }
  195. return isolated;
  196. }
  197. #endif /* _LRU_LIST_H */