kgsl_pool.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <asm/cacheflush.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/highmem.h>
  9. #include <linux/mempool.h>
  10. #include <linux/of.h>
  11. #include <linux/scatterlist.h>
  12. #include <linux/version.h>
  13. #include "kgsl_debugfs.h"
  14. #include "kgsl_device.h"
  15. #include "kgsl_pool.h"
  16. #include "kgsl_sharedmem.h"
  17. #include "kgsl_trace.h"
  18. #ifdef CONFIG_QCOM_KGSL_SORT_POOL
  19. struct kgsl_pool_page_entry {
  20. phys_addr_t physaddr;
  21. struct page *page;
  22. struct rb_node node;
  23. };
  24. static struct kmem_cache *addr_page_cache;
  25. /**
  26. * struct kgsl_page_pool - Structure to hold information for the pool
  27. * @pool_order: Page order describing the size of the page
  28. * @page_count: Number of pages currently present in the pool
  29. * @reserved_pages: Number of pages reserved at init for the pool
  30. * @list_lock: Spinlock for page list in the pool
  31. * @pool_rbtree: RB tree with all pages held/reserved in this pool
  32. * @mempool: Mempool to pre-allocate tracking structs for pages in this pool
  33. * @debug_root: Pointer to the debugfs root for this pool
  34. * @max_pages: Limit on number of pages this pool can hold
  35. */
  36. struct kgsl_page_pool {
  37. unsigned int pool_order;
  38. unsigned int page_count;
  39. unsigned int reserved_pages;
  40. spinlock_t list_lock;
  41. struct rb_root pool_rbtree;
  42. mempool_t *mempool;
  43. struct dentry *debug_root;
  44. unsigned int max_pages;
  45. };
  46. static void *_pool_entry_alloc(gfp_t gfp_mask, void *arg)
  47. {
  48. return kmem_cache_alloc(addr_page_cache, gfp_mask);
  49. }
  50. static void _pool_entry_free(void *element, void *arg)
  51. {
  52. return kmem_cache_free(addr_page_cache, element);
  53. }
  54. static int
  55. __kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
  56. {
  57. struct rb_node **node, *parent = NULL;
  58. struct kgsl_pool_page_entry *new_page, *entry;
  59. gfp_t gfp_mask = GFP_KERNEL & ~__GFP_DIRECT_RECLAIM;
  60. new_page = pool->mempool ? mempool_alloc(pool->mempool, gfp_mask) :
  61. kmem_cache_alloc(addr_page_cache, gfp_mask);
  62. if (new_page == NULL)
  63. return -ENOMEM;
  64. spin_lock(&pool->list_lock);
  65. node = &pool->pool_rbtree.rb_node;
  66. new_page->physaddr = page_to_phys(p);
  67. new_page->page = p;
  68. while (*node != NULL) {
  69. parent = *node;
  70. entry = rb_entry(parent, struct kgsl_pool_page_entry, node);
  71. if (new_page->physaddr < entry->physaddr)
  72. node = &parent->rb_left;
  73. else
  74. node = &parent->rb_right;
  75. }
  76. rb_link_node(&new_page->node, parent, node);
  77. rb_insert_color(&new_page->node, &pool->pool_rbtree);
  78. /*
  79. * page_count may be read without the list_lock held. Use WRITE_ONCE
  80. * to avoid compiler optimizations that may break consistency.
  81. */
  82. ASSERT_EXCLUSIVE_WRITER(pool->page_count);
  83. WRITE_ONCE(pool->page_count, pool->page_count + 1);
  84. spin_unlock(&pool->list_lock);
  85. return 0;
  86. }
  87. static struct page *
  88. __kgsl_pool_get_page(struct kgsl_page_pool *pool)
  89. {
  90. struct rb_node *node;
  91. struct kgsl_pool_page_entry *entry;
  92. struct page *p;
  93. node = rb_first(&pool->pool_rbtree);
  94. if (!node)
  95. return NULL;
  96. entry = rb_entry(node, struct kgsl_pool_page_entry, node);
  97. p = entry->page;
  98. rb_erase(&entry->node, &pool->pool_rbtree);
  99. if (pool->mempool)
  100. mempool_free(entry, pool->mempool);
  101. else
  102. kmem_cache_free(addr_page_cache, entry);
  103. /*
  104. * page_count may be read without the list_lock held. Use WRITE_ONCE
  105. * to avoid compiler optimizations that may break consistency.
  106. */
  107. ASSERT_EXCLUSIVE_WRITER(pool->page_count);
  108. WRITE_ONCE(pool->page_count, pool->page_count - 1);
  109. return p;
  110. }
  111. static void kgsl_pool_list_init(struct kgsl_page_pool *pool)
  112. {
  113. pool->pool_rbtree = RB_ROOT;
  114. }
  115. static void kgsl_pool_cache_init(void)
  116. {
  117. addr_page_cache = KMEM_CACHE(kgsl_pool_page_entry, 0);
  118. }
  119. static void kgsl_pool_cache_destroy(void)
  120. {
  121. kmem_cache_destroy(addr_page_cache);
  122. }
  123. static void kgsl_destroy_page_pool(struct kgsl_page_pool *pool)
  124. {
  125. mempool_destroy(pool->mempool);
  126. }
  127. #else
  128. /**
  129. * struct kgsl_page_pool - Structure to hold information for the pool
  130. * @pool_order: Page order describing the size of the page
  131. * @page_count: Number of pages currently present in the pool
  132. * @reserved_pages: Number of pages reserved at init for the pool
  133. * @list_lock: Spinlock for page list in the pool
  134. * @page_list: List of pages held/reserved in this pool
  135. * @debug_root: Pointer to the debugfs root for this pool
  136. * @max_pages: Limit on number of pages this pool can hold
  137. */
  138. struct kgsl_page_pool {
  139. unsigned int pool_order;
  140. unsigned int page_count;
  141. unsigned int reserved_pages;
  142. spinlock_t list_lock;
  143. struct list_head page_list;
  144. struct dentry *debug_root;
  145. unsigned int max_pages;
  146. };
  147. static int
  148. __kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
  149. {
  150. spin_lock(&pool->list_lock);
  151. list_add_tail(&p->lru, &pool->page_list);
  152. /*
  153. * page_count may be read without the list_lock held. Use WRITE_ONCE
  154. * to avoid compiler optimizations that may break consistency.
  155. */
  156. ASSERT_EXCLUSIVE_WRITER(pool->page_count);
  157. WRITE_ONCE(pool->page_count, pool->page_count + 1);
  158. spin_unlock(&pool->list_lock);
  159. return 0;
  160. }
  161. static struct page *
  162. __kgsl_pool_get_page(struct kgsl_page_pool *pool)
  163. {
  164. struct page *p;
  165. p = list_first_entry_or_null(&pool->page_list, struct page, lru);
  166. if (p) {
  167. /*
  168. * page_count may be read without the list_lock held. Use
  169. * WRITE_ONCE to avoid compiler optimizations that may break
  170. * consistency.
  171. */
  172. ASSERT_EXCLUSIVE_WRITER(pool->page_count);
  173. WRITE_ONCE(pool->page_count, pool->page_count - 1);
  174. list_del(&p->lru);
  175. }
  176. return p;
  177. }
  178. static void kgsl_pool_list_init(struct kgsl_page_pool *pool)
  179. {
  180. INIT_LIST_HEAD(&pool->page_list);
  181. }
  182. static void kgsl_pool_cache_init(void)
  183. {
  184. }
  185. static void kgsl_pool_cache_destroy(void)
  186. {
  187. }
  188. static void kgsl_destroy_page_pool(struct kgsl_page_pool *pool)
  189. {
  190. }
  191. #endif
  192. static struct kgsl_page_pool kgsl_pools[6];
  193. static int kgsl_num_pools;
  194. static int kgsl_pool_max_pages;
  195. /* Return the index of the pool for the specified order */
  196. static int kgsl_get_pool_index(int order)
  197. {
  198. int i;
  199. for (i = 0; i < kgsl_num_pools; i++) {
  200. if (kgsl_pools[i].pool_order == order)
  201. return i;
  202. }
  203. return -EINVAL;
  204. }
  205. /* Returns KGSL pool corresponding to input page order*/
  206. static struct kgsl_page_pool *
  207. _kgsl_get_pool_from_order(int order)
  208. {
  209. int index = kgsl_get_pool_index(order);
  210. return index >= 0 ? &kgsl_pools[index] : NULL;
  211. }
  212. /* Add a page to specified pool */
  213. static void
  214. _kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
  215. {
  216. if (!p)
  217. return;
  218. /*
  219. * Sanity check to make sure we don't re-pool a page that
  220. * somebody else has a reference to.
  221. */
  222. if (WARN_ON(unlikely(page_count(p) > 1))) {
  223. __free_pages(p, pool->pool_order);
  224. return;
  225. }
  226. if (__kgsl_pool_add_page(pool, p)) {
  227. __free_pages(p, pool->pool_order);
  228. trace_kgsl_pool_free_page(pool->pool_order);
  229. return;
  230. }
  231. /* Use READ_ONCE to read page_count without holding list_lock */
  232. trace_kgsl_pool_add_page(pool->pool_order, READ_ONCE(pool->page_count));
  233. mod_node_page_state(page_pgdat(p), NR_KERNEL_MISC_RECLAIMABLE,
  234. (1 << pool->pool_order));
  235. }
  236. /* Returns a page from specified pool */
  237. static struct page *
  238. _kgsl_pool_get_page(struct kgsl_page_pool *pool)
  239. {
  240. struct page *p = NULL;
  241. spin_lock(&pool->list_lock);
  242. p = __kgsl_pool_get_page(pool);
  243. spin_unlock(&pool->list_lock);
  244. if (p != NULL) {
  245. /* Use READ_ONCE to read page_count without holding list_lock */
  246. trace_kgsl_pool_get_page(pool->pool_order,
  247. READ_ONCE(pool->page_count));
  248. mod_node_page_state(page_pgdat(p), NR_KERNEL_MISC_RECLAIMABLE,
  249. -(1 << pool->pool_order));
  250. }
  251. return p;
  252. }
  253. int kgsl_pool_size_total(void)
  254. {
  255. int i;
  256. int total = 0;
  257. for (i = 0; i < kgsl_num_pools; i++) {
  258. struct kgsl_page_pool *kgsl_pool = &kgsl_pools[i];
  259. spin_lock(&kgsl_pool->list_lock);
  260. total += kgsl_pool->page_count * (1 << kgsl_pool->pool_order);
  261. spin_unlock(&kgsl_pool->list_lock);
  262. }
  263. return total;
  264. }
  265. /* Returns the total number of pages in all pools excluding reserved pages */
  266. static unsigned long kgsl_pool_size_nonreserved(void)
  267. {
  268. int i;
  269. unsigned long total = 0;
  270. for (i = 0; i < kgsl_num_pools; i++) {
  271. struct kgsl_page_pool *pool = &kgsl_pools[i];
  272. spin_lock(&pool->list_lock);
  273. if (pool->page_count > pool->reserved_pages)
  274. total += (pool->page_count - pool->reserved_pages) *
  275. (1 << pool->pool_order);
  276. spin_unlock(&pool->list_lock);
  277. }
  278. return total;
  279. }
  280. /*
  281. * Returns a page from specified pool only if pool
  282. * currently holds more number of pages than reserved
  283. * pages.
  284. */
  285. static struct page *
  286. _kgsl_pool_get_nonreserved_page(struct kgsl_page_pool *pool)
  287. {
  288. struct page *p = NULL;
  289. spin_lock(&pool->list_lock);
  290. if (pool->page_count <= pool->reserved_pages) {
  291. spin_unlock(&pool->list_lock);
  292. return NULL;
  293. }
  294. p = __kgsl_pool_get_page(pool);
  295. spin_unlock(&pool->list_lock);
  296. if (p != NULL) {
  297. /* Use READ_ONCE to read page_count without holding list_lock */
  298. trace_kgsl_pool_get_page(pool->pool_order,
  299. READ_ONCE(pool->page_count));
  300. mod_node_page_state(page_pgdat(p), NR_KERNEL_MISC_RECLAIMABLE,
  301. -(1 << pool->pool_order));
  302. }
  303. return p;
  304. }
  305. /*
  306. * This will shrink the specified pool by num_pages or by
  307. * (page_count - reserved_pages), whichever is smaller.
  308. */
  309. static unsigned int
  310. _kgsl_pool_shrink(struct kgsl_page_pool *pool,
  311. unsigned int num_pages, bool exit)
  312. {
  313. int j;
  314. unsigned int pcount = 0;
  315. struct page *(*get_page)(struct kgsl_page_pool *) =
  316. _kgsl_pool_get_nonreserved_page;
  317. if (pool == NULL || num_pages == 0)
  318. return pcount;
  319. num_pages = (num_pages + (1 << pool->pool_order) - 1) >>
  320. pool->pool_order;
  321. /* This is to ensure that we free reserved pages */
  322. if (exit)
  323. get_page = _kgsl_pool_get_page;
  324. for (j = 0; j < num_pages; j++) {
  325. struct page *page = get_page(pool);
  326. if (!page)
  327. break;
  328. __free_pages(page, pool->pool_order);
  329. pcount += (1 << pool->pool_order);
  330. trace_kgsl_pool_free_page(pool->pool_order);
  331. }
  332. return pcount;
  333. }
  334. /*
  335. * This function removes number of pages specified by
  336. * target_pages from the total pool size.
  337. *
  338. * Remove target_pages from the pool, starting from higher order pool.
  339. */
  340. static unsigned long
  341. kgsl_pool_reduce(int target_pages, bool exit)
  342. {
  343. int i, ret;
  344. unsigned long pcount = 0;
  345. for (i = (kgsl_num_pools - 1); i >= 0; i--) {
  346. if (target_pages <= 0)
  347. return pcount;
  348. /* Remove target_pages pages from this pool */
  349. ret = _kgsl_pool_shrink(&kgsl_pools[i], target_pages, exit);
  350. target_pages -= ret;
  351. pcount += ret;
  352. }
  353. return pcount;
  354. }
  355. void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
  356. {
  357. int i;
  358. if (!pages)
  359. return;
  360. for (i = 0; i < pcount;) {
  361. /*
  362. * Free each page or compound page group individually.
  363. */
  364. struct page *p = pages[i];
  365. i += 1 << compound_order(p);
  366. kgsl_pool_free_page(p);
  367. }
  368. }
  369. static int kgsl_pool_get_retry_order(unsigned int order)
  370. {
  371. int i;
  372. for (i = kgsl_num_pools-1; i > 0; i--)
  373. if (order >= kgsl_pools[i].pool_order)
  374. return kgsl_pools[i].pool_order;
  375. return 0;
  376. }
  377. /*
  378. * Return true if the pool of specified page size is supported
  379. * or no pools are supported otherwise return false.
  380. */
  381. static bool kgsl_pool_available(unsigned int page_size)
  382. {
  383. int order = get_order(page_size);
  384. if (!kgsl_num_pools)
  385. return true;
  386. return (kgsl_get_pool_index(order) >= 0);
  387. }
  388. u32 kgsl_get_page_size(size_t size, unsigned int align)
  389. {
  390. u32 pool;
  391. for (pool = rounddown_pow_of_two(size); pool > PAGE_SIZE; pool >>= 1)
  392. if ((align >= ilog2(pool)) && (size >= pool) &&
  393. kgsl_pool_available(pool))
  394. return pool;
  395. return PAGE_SIZE;
  396. }
  397. int kgsl_pool_alloc_page(int *page_size, struct page **pages,
  398. unsigned int pages_len, unsigned int *align,
  399. struct device *dev)
  400. {
  401. int j;
  402. int pcount = 0;
  403. struct kgsl_page_pool *pool;
  404. struct page *page = NULL;
  405. struct page *p = NULL;
  406. int order = get_order(*page_size);
  407. int pool_idx;
  408. size_t size = 0;
  409. if ((pages == NULL) || pages_len < (*page_size >> PAGE_SHIFT))
  410. return -EINVAL;
  411. /* If the pool is not configured get pages from the system */
  412. if (!kgsl_num_pools) {
  413. gfp_t gfp_mask = kgsl_gfp_mask(order);
  414. page = alloc_pages(gfp_mask, order);
  415. if (page == NULL) {
  416. /* Retry with lower order pages */
  417. if (order > 0) {
  418. size = PAGE_SIZE << --order;
  419. goto eagain;
  420. } else
  421. return -ENOMEM;
  422. }
  423. trace_kgsl_pool_alloc_page_system(order);
  424. goto done;
  425. }
  426. pool = _kgsl_get_pool_from_order(order);
  427. if (pool == NULL) {
  428. /* Retry with lower order pages */
  429. if (order > 0) {
  430. size = PAGE_SIZE << kgsl_pool_get_retry_order(order);
  431. goto eagain;
  432. } else {
  433. /*
  434. * Fall back to direct allocation in case
  435. * pool with zero order is not present
  436. */
  437. gfp_t gfp_mask = kgsl_gfp_mask(order);
  438. page = alloc_pages(gfp_mask, order);
  439. if (page == NULL)
  440. return -ENOMEM;
  441. trace_kgsl_pool_alloc_page_system(order);
  442. goto done;
  443. }
  444. }
  445. pool_idx = kgsl_get_pool_index(order);
  446. page = _kgsl_pool_get_page(pool);
  447. /* Allocate a new page if not allocated from pool */
  448. if (page == NULL) {
  449. gfp_t gfp_mask = kgsl_gfp_mask(order);
  450. page = alloc_pages(gfp_mask, order);
  451. if (!page) {
  452. if (pool_idx > 0) {
  453. /* Retry with lower order pages */
  454. size = PAGE_SIZE <<
  455. kgsl_pools[pool_idx-1].pool_order;
  456. goto eagain;
  457. } else
  458. return -ENOMEM;
  459. }
  460. trace_kgsl_pool_alloc_page_system(order);
  461. }
  462. done:
  463. kgsl_zero_page(page, order, dev);
  464. for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) {
  465. p = nth_page(page, j);
  466. pages[pcount] = p;
  467. pcount++;
  468. }
  469. return pcount;
  470. eagain:
  471. trace_kgsl_pool_try_page_lower(get_order(*page_size));
  472. *page_size = kgsl_get_page_size(size, ilog2(size));
  473. *align = ilog2(*page_size);
  474. return -EAGAIN;
  475. }
  476. void kgsl_pool_free_page(struct page *page)
  477. {
  478. struct kgsl_page_pool *pool;
  479. int page_order;
  480. if (page == NULL)
  481. return;
  482. page_order = compound_order(page);
  483. if (!kgsl_pool_max_pages ||
  484. (kgsl_pool_size_total() < kgsl_pool_max_pages)) {
  485. pool = _kgsl_get_pool_from_order(page_order);
  486. /* Use READ_ONCE to read page_count without holding list_lock */
  487. if (pool && (READ_ONCE(pool->page_count) < pool->max_pages)) {
  488. _kgsl_pool_add_page(pool, page);
  489. return;
  490. }
  491. }
  492. /* Give back to system as not added to pool */
  493. __free_pages(page, page_order);
  494. trace_kgsl_pool_free_page(page_order);
  495. }
  496. /* Functions for the shrinker */
  497. static unsigned long
  498. kgsl_pool_shrink_scan_objects(struct shrinker *shrinker,
  499. struct shrink_control *sc)
  500. {
  501. /* sc->nr_to_scan represents number of pages to be removed*/
  502. unsigned long pcount = kgsl_pool_reduce(sc->nr_to_scan, false);
  503. /* If pools are exhausted return SHRINK_STOP */
  504. return pcount ? pcount : SHRINK_STOP;
  505. }
  506. static unsigned long
  507. kgsl_pool_shrink_count_objects(struct shrinker *shrinker,
  508. struct shrink_control *sc)
  509. {
  510. /*
  511. * Return non-reserved pool size as we don't
  512. * want shrinker to free reserved pages.
  513. */
  514. return kgsl_pool_size_nonreserved();
  515. }
  516. /* Shrinker callback data*/
  517. static struct shrinker kgsl_pool_shrinker = {
  518. .count_objects = kgsl_pool_shrink_count_objects,
  519. .scan_objects = kgsl_pool_shrink_scan_objects,
  520. .seeks = DEFAULT_SEEKS,
  521. .batch = 0,
  522. };
  523. int kgsl_pool_reserved_get(void *data, u64 *val)
  524. {
  525. struct kgsl_page_pool *pool = data;
  526. *val = (u64) pool->reserved_pages;
  527. return 0;
  528. }
  529. int kgsl_pool_page_count_get(void *data, u64 *val)
  530. {
  531. struct kgsl_page_pool *pool = data;
  532. /* Use READ_ONCE to read page_count without holding list_lock */
  533. *val = (u64) READ_ONCE(pool->page_count);
  534. return 0;
  535. }
  536. static void kgsl_pool_reserve_pages(struct kgsl_page_pool *pool,
  537. struct device_node *node)
  538. {
  539. u32 reserved = 0;
  540. int i;
  541. of_property_read_u32(node, "qcom,mempool-reserved", &reserved);
  542. reserved = min_t(u32, reserved, pool->max_pages);
  543. /* Limit the total number of reserved pages to 4096 */
  544. pool->reserved_pages = min_t(u32, reserved, 4096);
  545. #if IS_ENABLED(CONFIG_QCOM_KGSL_SORT_POOL)
  546. /*
  547. * Pre-allocate tracking structs for reserved_pages so that
  548. * the pool can hold them even in low memory conditions
  549. */
  550. pool->mempool = mempool_create(pool->reserved_pages,
  551. _pool_entry_alloc, _pool_entry_free, NULL);
  552. #endif
  553. for (i = 0; i < pool->reserved_pages; i++) {
  554. gfp_t gfp_mask = kgsl_gfp_mask(pool->pool_order);
  555. struct page *page;
  556. page = alloc_pages(gfp_mask, pool->pool_order);
  557. _kgsl_pool_add_page(pool, page);
  558. }
  559. }
  560. static int kgsl_of_parse_mempool(struct kgsl_page_pool *pool,
  561. struct device_node *node)
  562. {
  563. u32 size;
  564. int order;
  565. unsigned char name[8];
  566. if (of_property_read_u32(node, "qcom,mempool-page-size", &size))
  567. return -EINVAL;
  568. order = get_order(size);
  569. if (order > 8) {
  570. pr_err("kgsl: %pOF: pool order %d is too big\n", node, order);
  571. return -EINVAL;
  572. }
  573. pool->pool_order = order;
  574. if (of_property_read_u32(node, "qcom,mempool-max-pages", &pool->max_pages))
  575. pool->max_pages = UINT_MAX;
  576. spin_lock_init(&pool->list_lock);
  577. kgsl_pool_list_init(pool);
  578. kgsl_pool_reserve_pages(pool, node);
  579. snprintf(name, sizeof(name), "%d_order", (pool->pool_order));
  580. kgsl_pool_init_debugfs(pool->debug_root, name, (void *) pool);
  581. return 0;
  582. }
  583. void kgsl_probe_page_pools(void)
  584. {
  585. struct device_node *node, *child;
  586. int index = 0;
  587. node = of_find_compatible_node(NULL, NULL, "qcom,gpu-mempools");
  588. if (!node)
  589. return;
  590. /* Get Max pages limit for mempool */
  591. of_property_read_u32(node, "qcom,mempool-max-pages",
  592. &kgsl_pool_max_pages);
  593. kgsl_pool_cache_init();
  594. for_each_child_of_node(node, child) {
  595. if (!kgsl_of_parse_mempool(&kgsl_pools[index], child))
  596. index++;
  597. if (index == ARRAY_SIZE(kgsl_pools)) {
  598. of_node_put(child);
  599. break;
  600. }
  601. }
  602. kgsl_num_pools = index;
  603. of_node_put(node);
  604. /* Initialize shrinker */
  605. #if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
  606. register_shrinker(&kgsl_pool_shrinker, "kgsl_pool_shrinker");
  607. #else
  608. register_shrinker(&kgsl_pool_shrinker);
  609. #endif
  610. }
  611. void kgsl_exit_page_pools(void)
  612. {
  613. int i;
  614. /* Release all pages in pools, if any.*/
  615. kgsl_pool_reduce(INT_MAX, true);
  616. /* Unregister shrinker */
  617. unregister_shrinker(&kgsl_pool_shrinker);
  618. /* Destroy helper structures */
  619. for (i = 0; i < kgsl_num_pools; i++)
  620. kgsl_destroy_page_pool(&kgsl_pools[i]);
  621. /* Destroy the kmem cache */
  622. kgsl_pool_cache_destroy();
  623. }