deferred-free-helper.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Deferred dmabuf freeing helper
  4. *
  5. * Copyright (C) 2020 Linaro, Ltd.
  6. *
  7. * Based on the ION page pool code
  8. * Copyright (C) 2011 Google, Inc.
  9. */
  10. #include <linux/freezer.h>
  11. #include <linux/list.h>
  12. #include <linux/slab.h>
  13. #include <linux/swap.h>
  14. #include <linux/sched/signal.h>
  15. #include "deferred-free-helper.h"
  16. static LIST_HEAD(free_list);
  17. static size_t list_nr_pages;
  18. wait_queue_head_t freelist_waitqueue;
  19. struct task_struct *freelist_task;
  20. static DEFINE_SPINLOCK(free_list_lock);
  21. void deferred_free(struct deferred_freelist_item *item,
  22. void (*free)(struct deferred_freelist_item*,
  23. enum df_reason),
  24. size_t nr_pages)
  25. {
  26. unsigned long flags;
  27. INIT_LIST_HEAD(&item->list);
  28. item->nr_pages = nr_pages;
  29. item->free = free;
  30. spin_lock_irqsave(&free_list_lock, flags);
  31. list_add(&item->list, &free_list);
  32. list_nr_pages += nr_pages;
  33. spin_unlock_irqrestore(&free_list_lock, flags);
  34. wake_up(&freelist_waitqueue);
  35. }
  36. EXPORT_SYMBOL_GPL(deferred_free);
  37. static size_t free_one_item(enum df_reason reason)
  38. {
  39. unsigned long flags;
  40. size_t nr_pages;
  41. struct deferred_freelist_item *item;
  42. spin_lock_irqsave(&free_list_lock, flags);
  43. if (list_empty(&free_list)) {
  44. spin_unlock_irqrestore(&free_list_lock, flags);
  45. return 0;
  46. }
  47. item = list_first_entry(&free_list, struct deferred_freelist_item, list);
  48. list_del(&item->list);
  49. nr_pages = item->nr_pages;
  50. list_nr_pages -= nr_pages;
  51. spin_unlock_irqrestore(&free_list_lock, flags);
  52. item->free(item, reason);
  53. return nr_pages;
  54. }
  55. static unsigned long get_freelist_nr_pages(void)
  56. {
  57. unsigned long nr_pages;
  58. unsigned long flags;
  59. spin_lock_irqsave(&free_list_lock, flags);
  60. nr_pages = list_nr_pages;
  61. spin_unlock_irqrestore(&free_list_lock, flags);
  62. return nr_pages;
  63. }
  64. static unsigned long freelist_shrink_count(struct shrinker *shrinker,
  65. struct shrink_control *sc)
  66. {
  67. return get_freelist_nr_pages();
  68. }
  69. static unsigned long freelist_shrink_scan(struct shrinker *shrinker,
  70. struct shrink_control *sc)
  71. {
  72. unsigned long total_freed = 0;
  73. if (sc->nr_to_scan == 0)
  74. return 0;
  75. while (total_freed < sc->nr_to_scan) {
  76. size_t pages_freed = free_one_item(DF_UNDER_PRESSURE);
  77. if (!pages_freed)
  78. break;
  79. total_freed += pages_freed;
  80. }
  81. return total_freed;
  82. }
  83. static struct shrinker freelist_shrinker = {
  84. .count_objects = freelist_shrink_count,
  85. .scan_objects = freelist_shrink_scan,
  86. .seeks = DEFAULT_SEEKS,
  87. .batch = 0,
  88. };
  89. static int deferred_free_thread(void *data)
  90. {
  91. while (true) {
  92. wait_event_freezable(freelist_waitqueue,
  93. get_freelist_nr_pages() > 0);
  94. free_one_item(DF_NORMAL);
  95. }
  96. return 0;
  97. }
  98. static int deferred_freelist_init(void)
  99. {
  100. list_nr_pages = 0;
  101. init_waitqueue_head(&freelist_waitqueue);
  102. freelist_task = kthread_run(deferred_free_thread, NULL,
  103. "%s", "dmabuf-deferred-free-worker");
  104. if (IS_ERR(freelist_task)) {
  105. pr_err("Creating thread for deferred free failed\n");
  106. return -1;
  107. }
  108. sched_set_normal(freelist_task, 19);
  109. return register_shrinker(&freelist_shrinker, "dmabuf-deferred-free-shrinker");
  110. }
  111. module_init(deferred_freelist_init);
  112. MODULE_LICENSE("GPL v2");