page_pool.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DMA BUF page pool system
  4. *
  5. * Copyright (C) 2020 Linaro Ltd.
  6. *
  7. * Based on the ION page pool code
  8. * Copyright (C) 2011 Google, Inc.
  9. */
  10. #include "page_pool.h"
  11. #include <linux/list.h>
  12. #include <linux/shrinker.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/swap.h>
  15. #include <linux/sched/signal.h>
  16. /* page types we track in the pool */
  17. enum {
  18. POOL_LOWPAGE, /* Clean lowmem pages */
  19. POOL_HIGHPAGE, /* Clean highmem pages */
  20. POOL_TYPE_SIZE,
  21. };
  22. /**
  23. * struct dmabuf_page_pool - pagepool struct
  24. * @count[]: array of number of pages of that type in the pool
  25. * @items[]: array of list of pages of the specific type
  26. * @lock: lock protecting this struct and especially the count
  27. * item list
  28. * @gfp_mask: gfp_mask to use from alloc
  29. * @order: order of pages in the pool
  30. * @list: list node for list of pools
  31. *
  32. * Allows you to keep a pool of pre allocated pages to use
  33. */
  34. struct dmabuf_page_pool {
  35. int count[POOL_TYPE_SIZE];
  36. struct list_head items[POOL_TYPE_SIZE];
  37. spinlock_t lock;
  38. gfp_t gfp_mask;
  39. unsigned int order;
  40. struct list_head list;
  41. };
  42. static LIST_HEAD(pool_list);
  43. static DEFINE_MUTEX(pool_list_lock);
  44. static inline
  45. struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool)
  46. {
  47. if (fatal_signal_pending(current))
  48. return NULL;
  49. return alloc_pages(pool->gfp_mask, pool->order);
  50. }
  51. static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
  52. struct page *page)
  53. {
  54. __free_pages(page, pool->order);
  55. }
  56. static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
  57. {
  58. int index;
  59. if (PageHighMem(page))
  60. index = POOL_HIGHPAGE;
  61. else
  62. index = POOL_LOWPAGE;
  63. spin_lock(&pool->lock);
  64. list_add_tail(&page->lru, &pool->items[index]);
  65. pool->count[index]++;
  66. spin_unlock(&pool->lock);
  67. mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
  68. 1 << pool->order);
  69. }
  70. static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
  71. {
  72. struct page *page;
  73. spin_lock(&pool->lock);
  74. page = list_first_entry_or_null(&pool->items[index], struct page, lru);
  75. if (page) {
  76. pool->count[index]--;
  77. list_del(&page->lru);
  78. spin_unlock(&pool->lock);
  79. mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
  80. -(1 << pool->order));
  81. goto out;
  82. }
  83. spin_unlock(&pool->lock);
  84. out:
  85. return page;
  86. }
  87. static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool)
  88. {
  89. struct page *page = NULL;
  90. page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
  91. if (!page)
  92. page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
  93. return page;
  94. }
  95. struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool)
  96. {
  97. struct page *page = NULL;
  98. if (WARN_ON(!pool))
  99. return NULL;
  100. page = dmabuf_page_pool_fetch(pool);
  101. if (!page)
  102. page = dmabuf_page_pool_alloc_pages(pool);
  103. return page;
  104. }
  105. EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc);
  106. void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page)
  107. {
  108. if (WARN_ON(pool->order != compound_order(page)))
  109. return;
  110. dmabuf_page_pool_add(pool, page);
  111. }
  112. EXPORT_SYMBOL_GPL(dmabuf_page_pool_free);
  113. static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
  114. {
  115. int count = pool->count[POOL_LOWPAGE];
  116. if (high)
  117. count += pool->count[POOL_HIGHPAGE];
  118. return count << pool->order;
  119. }
  120. struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
  121. {
  122. struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  123. int i;
  124. if (!pool)
  125. return NULL;
  126. for (i = 0; i < POOL_TYPE_SIZE; i++) {
  127. pool->count[i] = 0;
  128. INIT_LIST_HEAD(&pool->items[i]);
  129. }
  130. pool->gfp_mask = gfp_mask | __GFP_COMP;
  131. pool->order = order;
  132. spin_lock_init(&pool->lock);
  133. mutex_lock(&pool_list_lock);
  134. list_add(&pool->list, &pool_list);
  135. mutex_unlock(&pool_list_lock);
  136. return pool;
  137. }
  138. EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
  139. void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
  140. {
  141. struct page *page;
  142. int i;
  143. /* Remove us from the pool list */
  144. mutex_lock(&pool_list_lock);
  145. list_del(&pool->list);
  146. mutex_unlock(&pool_list_lock);
  147. /* Free any remaining pages in the pool */
  148. for (i = 0; i < POOL_TYPE_SIZE; i++) {
  149. while ((page = dmabuf_page_pool_remove(pool, i)))
  150. dmabuf_page_pool_free_pages(pool, page);
  151. }
  152. kfree(pool);
  153. }
  154. EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
  155. unsigned long dmabuf_page_pool_get_size(struct dmabuf_page_pool *pool)
  156. {
  157. int i;
  158. unsigned long num_pages = 0;
  159. spin_lock(&pool->lock);
  160. for (i = 0; i < POOL_TYPE_SIZE; ++i)
  161. num_pages += pool->count[i];
  162. spin_unlock(&pool->lock);
  163. num_pages <<= pool->order; /* pool order is immutable */
  164. return num_pages * PAGE_SIZE;
  165. }
  166. EXPORT_SYMBOL_GPL(dmabuf_page_pool_get_size);
  167. static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask,
  168. int nr_to_scan)
  169. {
  170. int freed = 0;
  171. bool high;
  172. if (current_is_kswapd())
  173. high = true;
  174. else
  175. high = !!(gfp_mask & __GFP_HIGHMEM);
  176. if (nr_to_scan == 0)
  177. return dmabuf_page_pool_total(pool, high);
  178. while (freed < nr_to_scan) {
  179. struct page *page;
  180. /* Try to free low pages first */
  181. page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
  182. if (!page)
  183. page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
  184. if (!page)
  185. break;
  186. dmabuf_page_pool_free_pages(pool, page);
  187. freed += (1 << pool->order);
  188. }
  189. return freed;
  190. }
  191. static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan)
  192. {
  193. struct dmabuf_page_pool *pool;
  194. int nr_total = 0;
  195. int nr_freed;
  196. int only_scan = 0;
  197. if (!nr_to_scan)
  198. only_scan = 1;
  199. mutex_lock(&pool_list_lock);
  200. list_for_each_entry(pool, &pool_list, list) {
  201. if (only_scan) {
  202. nr_total += dmabuf_page_pool_do_shrink(pool,
  203. gfp_mask,
  204. nr_to_scan);
  205. } else {
  206. nr_freed = dmabuf_page_pool_do_shrink(pool,
  207. gfp_mask,
  208. nr_to_scan);
  209. nr_to_scan -= nr_freed;
  210. nr_total += nr_freed;
  211. if (nr_to_scan <= 0)
  212. break;
  213. }
  214. }
  215. mutex_unlock(&pool_list_lock);
  216. return nr_total;
  217. }
  218. static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker,
  219. struct shrink_control *sc)
  220. {
  221. return dmabuf_page_pool_shrink(sc->gfp_mask, 0);
  222. }
  223. static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker,
  224. struct shrink_control *sc)
  225. {
  226. if (sc->nr_to_scan == 0)
  227. return 0;
  228. return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan);
  229. }
  230. struct shrinker pool_shrinker = {
  231. .count_objects = dmabuf_page_pool_shrink_count,
  232. .scan_objects = dmabuf_page_pool_shrink_scan,
  233. .seeks = DEFAULT_SEEKS,
  234. .batch = 0,
  235. };
  236. static int dmabuf_page_pool_init_shrinker(void)
  237. {
  238. return register_shrinker(&pool_shrinker, "dmabuf-page-pool-shrinker");
  239. }
  240. module_init(dmabuf_page_pool_init_shrinker);
  241. MODULE_LICENSE("GPL v2");