list_lru.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  4. * Authors: David Chinner and Glauber Costa
  5. *
  6. * Generic LRU infrastructure
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/mm.h>
  11. #include <linux/list_lru.h>
  12. #include <linux/slab.h>
  13. #include <linux/mutex.h>
  14. #include <linux/memcontrol.h>
  15. #include "slab.h"
  16. #include "internal.h"
  17. #ifdef CONFIG_MEMCG_KMEM
  18. static LIST_HEAD(memcg_list_lrus);
  19. static DEFINE_MUTEX(list_lrus_mutex);
  20. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  21. {
  22. return lru->memcg_aware;
  23. }
  24. static void list_lru_register(struct list_lru *lru)
  25. {
  26. if (!list_lru_memcg_aware(lru))
  27. return;
  28. mutex_lock(&list_lrus_mutex);
  29. list_add(&lru->list, &memcg_list_lrus);
  30. mutex_unlock(&list_lrus_mutex);
  31. }
  32. static void list_lru_unregister(struct list_lru *lru)
  33. {
  34. if (!list_lru_memcg_aware(lru))
  35. return;
  36. mutex_lock(&list_lrus_mutex);
  37. list_del(&lru->list);
  38. mutex_unlock(&list_lrus_mutex);
  39. }
  40. static int lru_shrinker_id(struct list_lru *lru)
  41. {
  42. return lru->shrinker_id;
  43. }
  44. static inline struct list_lru_one *
  45. list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
  46. {
  47. if (list_lru_memcg_aware(lru) && idx >= 0) {
  48. struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
  49. return mlru ? &mlru->node[nid] : NULL;
  50. }
  51. return &lru->node[nid].lru;
  52. }
  53. static inline struct list_lru_one *
  54. list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
  55. struct mem_cgroup **memcg_ptr)
  56. {
  57. struct list_lru_node *nlru = &lru->node[nid];
  58. struct list_lru_one *l = &nlru->lru;
  59. struct mem_cgroup *memcg = NULL;
  60. if (!list_lru_memcg_aware(lru))
  61. goto out;
  62. memcg = mem_cgroup_from_slab_obj(ptr);
  63. if (!memcg)
  64. goto out;
  65. l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
  66. out:
  67. if (memcg_ptr)
  68. *memcg_ptr = memcg;
  69. return l;
  70. }
  71. #else
  72. static void list_lru_register(struct list_lru *lru)
  73. {
  74. }
  75. static void list_lru_unregister(struct list_lru *lru)
  76. {
  77. }
  78. static int lru_shrinker_id(struct list_lru *lru)
  79. {
  80. return -1;
  81. }
  82. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  83. {
  84. return false;
  85. }
  86. static inline struct list_lru_one *
  87. list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
  88. {
  89. return &lru->node[nid].lru;
  90. }
  91. static inline struct list_lru_one *
  92. list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
  93. struct mem_cgroup **memcg_ptr)
  94. {
  95. if (memcg_ptr)
  96. *memcg_ptr = NULL;
  97. return &lru->node[nid].lru;
  98. }
  99. #endif /* CONFIG_MEMCG_KMEM */
  100. bool list_lru_add(struct list_lru *lru, struct list_head *item)
  101. {
  102. int nid = page_to_nid(virt_to_page(item));
  103. struct list_lru_node *nlru = &lru->node[nid];
  104. struct mem_cgroup *memcg;
  105. struct list_lru_one *l;
  106. spin_lock(&nlru->lock);
  107. if (list_empty(item)) {
  108. l = list_lru_from_kmem(lru, nid, item, &memcg);
  109. list_add_tail(item, &l->list);
  110. /* Set shrinker bit if the first element was added */
  111. if (!l->nr_items++)
  112. set_shrinker_bit(memcg, nid,
  113. lru_shrinker_id(lru));
  114. nlru->nr_items++;
  115. spin_unlock(&nlru->lock);
  116. return true;
  117. }
  118. spin_unlock(&nlru->lock);
  119. return false;
  120. }
  121. EXPORT_SYMBOL_GPL(list_lru_add);
  122. bool list_lru_del(struct list_lru *lru, struct list_head *item)
  123. {
  124. int nid = page_to_nid(virt_to_page(item));
  125. struct list_lru_node *nlru = &lru->node[nid];
  126. struct list_lru_one *l;
  127. spin_lock(&nlru->lock);
  128. if (!list_empty(item)) {
  129. l = list_lru_from_kmem(lru, nid, item, NULL);
  130. list_del_init(item);
  131. l->nr_items--;
  132. nlru->nr_items--;
  133. spin_unlock(&nlru->lock);
  134. return true;
  135. }
  136. spin_unlock(&nlru->lock);
  137. return false;
  138. }
  139. EXPORT_SYMBOL_GPL(list_lru_del);
  140. void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  141. {
  142. list_del_init(item);
  143. list->nr_items--;
  144. }
  145. EXPORT_SYMBOL_GPL(list_lru_isolate);
  146. void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  147. struct list_head *head)
  148. {
  149. list_move(item, head);
  150. list->nr_items--;
  151. }
  152. EXPORT_SYMBOL_GPL(list_lru_isolate_move);
  153. unsigned long list_lru_count_one(struct list_lru *lru,
  154. int nid, struct mem_cgroup *memcg)
  155. {
  156. struct list_lru_one *l;
  157. long count;
  158. rcu_read_lock();
  159. l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
  160. count = l ? READ_ONCE(l->nr_items) : 0;
  161. rcu_read_unlock();
  162. if (unlikely(count < 0))
  163. count = 0;
  164. return count;
  165. }
  166. EXPORT_SYMBOL_GPL(list_lru_count_one);
  167. unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  168. {
  169. struct list_lru_node *nlru;
  170. nlru = &lru->node[nid];
  171. return nlru->nr_items;
  172. }
  173. EXPORT_SYMBOL_GPL(list_lru_count_node);
  174. static unsigned long
  175. __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
  176. list_lru_walk_cb isolate, void *cb_arg,
  177. unsigned long *nr_to_walk)
  178. {
  179. struct list_lru_node *nlru = &lru->node[nid];
  180. struct list_lru_one *l;
  181. struct list_head *item, *n;
  182. unsigned long isolated = 0;
  183. restart:
  184. l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
  185. if (!l)
  186. goto out;
  187. list_for_each_safe(item, n, &l->list) {
  188. enum lru_status ret;
  189. /*
  190. * decrement nr_to_walk first so that we don't livelock if we
  191. * get stuck on large numbers of LRU_RETRY items
  192. */
  193. if (!*nr_to_walk)
  194. break;
  195. --*nr_to_walk;
  196. ret = isolate(item, l, &nlru->lock, cb_arg);
  197. switch (ret) {
  198. case LRU_REMOVED_RETRY:
  199. assert_spin_locked(&nlru->lock);
  200. fallthrough;
  201. case LRU_REMOVED:
  202. isolated++;
  203. nlru->nr_items--;
  204. /*
  205. * If the lru lock has been dropped, our list
  206. * traversal is now invalid and so we have to
  207. * restart from scratch.
  208. */
  209. if (ret == LRU_REMOVED_RETRY)
  210. goto restart;
  211. break;
  212. case LRU_ROTATE:
  213. list_move_tail(item, &l->list);
  214. break;
  215. case LRU_SKIP:
  216. break;
  217. case LRU_RETRY:
  218. /*
  219. * The lru lock has been dropped, our list traversal is
  220. * now invalid and so we have to restart from scratch.
  221. */
  222. assert_spin_locked(&nlru->lock);
  223. goto restart;
  224. default:
  225. BUG();
  226. }
  227. }
  228. out:
  229. return isolated;
  230. }
  231. unsigned long
  232. list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  233. list_lru_walk_cb isolate, void *cb_arg,
  234. unsigned long *nr_to_walk)
  235. {
  236. struct list_lru_node *nlru = &lru->node[nid];
  237. unsigned long ret;
  238. spin_lock(&nlru->lock);
  239. ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
  240. cb_arg, nr_to_walk);
  241. spin_unlock(&nlru->lock);
  242. return ret;
  243. }
  244. EXPORT_SYMBOL_GPL(list_lru_walk_one);
  245. unsigned long
  246. list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  247. list_lru_walk_cb isolate, void *cb_arg,
  248. unsigned long *nr_to_walk)
  249. {
  250. struct list_lru_node *nlru = &lru->node[nid];
  251. unsigned long ret;
  252. spin_lock_irq(&nlru->lock);
  253. ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
  254. cb_arg, nr_to_walk);
  255. spin_unlock_irq(&nlru->lock);
  256. return ret;
  257. }
  258. unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  259. list_lru_walk_cb isolate, void *cb_arg,
  260. unsigned long *nr_to_walk)
  261. {
  262. long isolated = 0;
  263. isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
  264. nr_to_walk);
  265. #ifdef CONFIG_MEMCG_KMEM
  266. if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  267. struct list_lru_memcg *mlru;
  268. unsigned long index;
  269. xa_for_each(&lru->xa, index, mlru) {
  270. struct list_lru_node *nlru = &lru->node[nid];
  271. spin_lock(&nlru->lock);
  272. isolated += __list_lru_walk_one(lru, nid, index,
  273. isolate, cb_arg,
  274. nr_to_walk);
  275. spin_unlock(&nlru->lock);
  276. if (*nr_to_walk <= 0)
  277. break;
  278. }
  279. }
  280. #endif
  281. return isolated;
  282. }
  283. EXPORT_SYMBOL_GPL(list_lru_walk_node);
  284. static void init_one_lru(struct list_lru_one *l)
  285. {
  286. INIT_LIST_HEAD(&l->list);
  287. l->nr_items = 0;
  288. }
  289. #ifdef CONFIG_MEMCG_KMEM
  290. static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
  291. {
  292. int nid;
  293. struct list_lru_memcg *mlru;
  294. mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
  295. if (!mlru)
  296. return NULL;
  297. for_each_node(nid)
  298. init_one_lru(&mlru->node[nid]);
  299. return mlru;
  300. }
  301. static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
  302. {
  303. struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
  304. /*
  305. * The __list_lru_walk_one() can walk the list of this node.
  306. * We need kvfree_rcu() here. And the walking of the list
  307. * is under lru->node[nid]->lock, which can serve as a RCU
  308. * read-side critical section.
  309. */
  310. if (mlru)
  311. kvfree_rcu(mlru, rcu);
  312. }
  313. static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  314. {
  315. if (memcg_aware)
  316. xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
  317. lru->memcg_aware = memcg_aware;
  318. }
  319. static void memcg_destroy_list_lru(struct list_lru *lru)
  320. {
  321. XA_STATE(xas, &lru->xa, 0);
  322. struct list_lru_memcg *mlru;
  323. if (!list_lru_memcg_aware(lru))
  324. return;
  325. xas_lock_irq(&xas);
  326. xas_for_each(&xas, mlru, ULONG_MAX) {
  327. kfree(mlru);
  328. xas_store(&xas, NULL);
  329. }
  330. xas_unlock_irq(&xas);
  331. }
  332. static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
  333. int src_idx, struct mem_cgroup *dst_memcg)
  334. {
  335. struct list_lru_node *nlru = &lru->node[nid];
  336. int dst_idx = dst_memcg->kmemcg_id;
  337. struct list_lru_one *src, *dst;
  338. /*
  339. * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  340. * we have to use IRQ-safe primitives here to avoid deadlock.
  341. */
  342. spin_lock_irq(&nlru->lock);
  343. src = list_lru_from_memcg_idx(lru, nid, src_idx);
  344. if (!src)
  345. goto out;
  346. dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
  347. list_splice_init(&src->list, &dst->list);
  348. if (src->nr_items) {
  349. dst->nr_items += src->nr_items;
  350. set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
  351. src->nr_items = 0;
  352. }
  353. out:
  354. spin_unlock_irq(&nlru->lock);
  355. }
  356. static void memcg_reparent_list_lru(struct list_lru *lru,
  357. int src_idx, struct mem_cgroup *dst_memcg)
  358. {
  359. int i;
  360. for_each_node(i)
  361. memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
  362. memcg_list_lru_free(lru, src_idx);
  363. }
  364. void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
  365. {
  366. struct cgroup_subsys_state *css;
  367. struct list_lru *lru;
  368. int src_idx = memcg->kmemcg_id;
  369. /*
  370. * Change kmemcg_id of this cgroup and all its descendants to the
  371. * parent's id, and then move all entries from this cgroup's list_lrus
  372. * to ones of the parent.
  373. *
  374. * After we have finished, all list_lrus corresponding to this cgroup
  375. * are guaranteed to remain empty. So we can safely free this cgroup's
  376. * list lrus in memcg_list_lru_free().
  377. *
  378. * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc()
  379. * from allocating list lrus for this cgroup after memcg_list_lru_free()
  380. * call.
  381. */
  382. rcu_read_lock();
  383. css_for_each_descendant_pre(css, &memcg->css) {
  384. struct mem_cgroup *child;
  385. child = mem_cgroup_from_css(css);
  386. WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id);
  387. }
  388. rcu_read_unlock();
  389. mutex_lock(&list_lrus_mutex);
  390. list_for_each_entry(lru, &memcg_list_lrus, list)
  391. memcg_reparent_list_lru(lru, src_idx, parent);
  392. mutex_unlock(&list_lrus_mutex);
  393. }
  394. static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
  395. struct list_lru *lru)
  396. {
  397. int idx = memcg->kmemcg_id;
  398. return idx < 0 || xa_load(&lru->xa, idx);
  399. }
  400. int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
  401. gfp_t gfp)
  402. {
  403. int i;
  404. unsigned long flags;
  405. struct list_lru_memcg_table {
  406. struct list_lru_memcg *mlru;
  407. struct mem_cgroup *memcg;
  408. } *table;
  409. XA_STATE(xas, &lru->xa, 0);
  410. if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
  411. return 0;
  412. gfp &= GFP_RECLAIM_MASK;
  413. table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
  414. if (!table)
  415. return -ENOMEM;
  416. /*
  417. * Because the list_lru can be reparented to the parent cgroup's
  418. * list_lru, we should make sure that this cgroup and all its
  419. * ancestors have allocated list_lru_memcg.
  420. */
  421. for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
  422. if (memcg_list_lru_allocated(memcg, lru))
  423. break;
  424. table[i].memcg = memcg;
  425. table[i].mlru = memcg_init_list_lru_one(gfp);
  426. if (!table[i].mlru) {
  427. while (i--)
  428. kfree(table[i].mlru);
  429. kfree(table);
  430. return -ENOMEM;
  431. }
  432. }
  433. xas_lock_irqsave(&xas, flags);
  434. while (i--) {
  435. int index = READ_ONCE(table[i].memcg->kmemcg_id);
  436. struct list_lru_memcg *mlru = table[i].mlru;
  437. xas_set(&xas, index);
  438. retry:
  439. if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) {
  440. kfree(mlru);
  441. } else {
  442. xas_store(&xas, mlru);
  443. if (xas_error(&xas) == -ENOMEM) {
  444. xas_unlock_irqrestore(&xas, flags);
  445. if (xas_nomem(&xas, gfp))
  446. xas_set_err(&xas, 0);
  447. xas_lock_irqsave(&xas, flags);
  448. /*
  449. * The xas lock has been released, this memcg
  450. * can be reparented before us. So reload
  451. * memcg id. More details see the comments
  452. * in memcg_reparent_list_lrus().
  453. */
  454. index = READ_ONCE(table[i].memcg->kmemcg_id);
  455. if (index < 0)
  456. xas_set_err(&xas, 0);
  457. else if (!xas_error(&xas) && index != xas.xa_index)
  458. xas_set(&xas, index);
  459. goto retry;
  460. }
  461. }
  462. }
  463. /* xas_nomem() is used to free memory instead of memory allocation. */
  464. if (xas.xa_alloc)
  465. xas_nomem(&xas, gfp);
  466. xas_unlock_irqrestore(&xas, flags);
  467. kfree(table);
  468. return xas_error(&xas);
  469. }
  470. #else
  471. static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  472. {
  473. }
  474. static void memcg_destroy_list_lru(struct list_lru *lru)
  475. {
  476. }
  477. #endif /* CONFIG_MEMCG_KMEM */
  478. int __list_lru_init(struct list_lru *lru, bool memcg_aware,
  479. struct lock_class_key *key, struct shrinker *shrinker)
  480. {
  481. int i;
  482. #ifdef CONFIG_MEMCG_KMEM
  483. if (shrinker)
  484. lru->shrinker_id = shrinker->id;
  485. else
  486. lru->shrinker_id = -1;
  487. #endif
  488. lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
  489. if (!lru->node)
  490. return -ENOMEM;
  491. for_each_node(i) {
  492. spin_lock_init(&lru->node[i].lock);
  493. if (key)
  494. lockdep_set_class(&lru->node[i].lock, key);
  495. init_one_lru(&lru->node[i].lru);
  496. }
  497. memcg_init_list_lru(lru, memcg_aware);
  498. list_lru_register(lru);
  499. return 0;
  500. }
  501. EXPORT_SYMBOL_GPL(__list_lru_init);
  502. void list_lru_destroy(struct list_lru *lru)
  503. {
  504. /* Already destroyed or not yet initialized? */
  505. if (!lru->node)
  506. return;
  507. list_lru_unregister(lru);
  508. memcg_destroy_list_lru(lru);
  509. kfree(lru->node);
  510. lru->node = NULL;
  511. #ifdef CONFIG_MEMCG_KMEM
  512. lru->shrinker_id = -1;
  513. #endif
  514. }
  515. EXPORT_SYMBOL_GPL(list_lru_destroy);