cma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Contiguous Memory Allocator
  4. *
  5. * Copyright (c) 2010-2011 by Samsung Electronics.
  6. * Copyright IBM Corporation, 2013
  7. * Copyright LG Electronics Inc., 2014
  8. * Written by:
  9. * Marek Szyprowski <[email protected]>
  10. * Michal Nazarewicz <[email protected]>
  11. * Aneesh Kumar K.V <[email protected]>
  12. * Joonsoo Kim <[email protected]>
  13. */
  14. #define pr_fmt(fmt) "cma: " fmt
  15. #ifdef CONFIG_CMA_DEBUG
  16. #ifndef DEBUG
  17. # define DEBUG
  18. #endif
  19. #endif
  20. #define CREATE_TRACE_POINTS
  21. #include <linux/memblock.h>
  22. #include <linux/err.h>
  23. #include <linux/mm.h>
  24. #include <linux/module.h>
  25. #include <linux/sizes.h>
  26. #include <linux/slab.h>
  27. #include <linux/log2.h>
  28. #include <linux/cma.h>
  29. #include <linux/highmem.h>
  30. #include <linux/io.h>
  31. #include <linux/kmemleak.h>
  32. #include <linux/sched.h>
  33. #include <linux/jiffies.h>
  34. #include <trace/events/cma.h>
  35. #include "cma.h"
  36. #undef CREATE_TRACE_POINTS
  37. #include <trace/hooks/mm.h>
  38. EXPORT_TRACEPOINT_SYMBOL_GPL(cma_alloc_start);
  39. EXPORT_TRACEPOINT_SYMBOL_GPL(cma_alloc_finish);
  40. struct cma cma_areas[MAX_CMA_AREAS];
  41. unsigned cma_area_count;
  42. static DEFINE_MUTEX(cma_mutex);
  43. phys_addr_t cma_get_base(const struct cma *cma)
  44. {
  45. return PFN_PHYS(cma->base_pfn);
  46. }
  47. unsigned long cma_get_size(const struct cma *cma)
  48. {
  49. return cma->count << PAGE_SHIFT;
  50. }
  51. const char *cma_get_name(const struct cma *cma)
  52. {
  53. return cma->name;
  54. }
  55. EXPORT_SYMBOL_GPL(cma_get_name);
  56. static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
  57. unsigned int align_order)
  58. {
  59. if (align_order <= cma->order_per_bit)
  60. return 0;
  61. return (1UL << (align_order - cma->order_per_bit)) - 1;
  62. }
  63. /*
  64. * Find the offset of the base PFN from the specified align_order.
  65. * The value returned is represented in order_per_bits.
  66. */
  67. static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
  68. unsigned int align_order)
  69. {
  70. return (cma->base_pfn & ((1UL << align_order) - 1))
  71. >> cma->order_per_bit;
  72. }
  73. static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
  74. unsigned long pages)
  75. {
  76. return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
  77. }
  78. static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
  79. unsigned long count)
  80. {
  81. unsigned long bitmap_no, bitmap_count;
  82. unsigned long flags;
  83. bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
  84. bitmap_count = cma_bitmap_pages_to_bits(cma, count);
  85. spin_lock_irqsave(&cma->lock, flags);
  86. bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
  87. spin_unlock_irqrestore(&cma->lock, flags);
  88. }
  89. static void __init cma_activate_area(struct cma *cma)
  90. {
  91. unsigned long base_pfn = cma->base_pfn, pfn;
  92. struct zone *zone;
  93. cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
  94. if (!cma->bitmap)
  95. goto out_error;
  96. /*
  97. * alloc_contig_range() requires the pfn range specified to be in the
  98. * same zone. Simplify by forcing the entire CMA resv range to be in the
  99. * same zone.
  100. */
  101. WARN_ON_ONCE(!pfn_valid(base_pfn));
  102. zone = page_zone(pfn_to_page(base_pfn));
  103. for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
  104. WARN_ON_ONCE(!pfn_valid(pfn));
  105. if (page_zone(pfn_to_page(pfn)) != zone)
  106. goto not_in_zone;
  107. }
  108. for (pfn = base_pfn; pfn < base_pfn + cma->count;
  109. pfn += pageblock_nr_pages)
  110. init_cma_reserved_pageblock(pfn_to_page(pfn));
  111. spin_lock_init(&cma->lock);
  112. #ifdef CONFIG_CMA_DEBUGFS
  113. INIT_HLIST_HEAD(&cma->mem_head);
  114. spin_lock_init(&cma->mem_head_lock);
  115. #endif
  116. return;
  117. not_in_zone:
  118. bitmap_free(cma->bitmap);
  119. out_error:
  120. /* Expose all pages to the buddy, they are useless for CMA. */
  121. if (!cma->reserve_pages_on_error) {
  122. for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
  123. free_reserved_page(pfn_to_page(pfn));
  124. }
  125. totalcma_pages -= cma->count;
  126. cma->count = 0;
  127. pr_err("CMA area %s could not be activated\n", cma->name);
  128. return;
  129. }
  130. static int __init cma_init_reserved_areas(void)
  131. {
  132. int i;
  133. for (i = 0; i < cma_area_count; i++)
  134. cma_activate_area(&cma_areas[i]);
  135. return 0;
  136. }
  137. core_initcall(cma_init_reserved_areas);
  138. void __init cma_reserve_pages_on_error(struct cma *cma)
  139. {
  140. cma->reserve_pages_on_error = true;
  141. }
  142. /**
  143. * cma_init_reserved_mem() - create custom contiguous area from reserved memory
  144. * @base: Base address of the reserved area
  145. * @size: Size of the reserved area (in bytes),
  146. * @order_per_bit: Order of pages represented by one bit on bitmap.
  147. * @name: The name of the area. If this parameter is NULL, the name of
  148. * the area will be set to "cmaN", where N is a running counter of
  149. * used areas.
  150. * @res_cma: Pointer to store the created cma region.
  151. *
  152. * This function creates custom contiguous area from already reserved memory.
  153. */
  154. int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  155. unsigned int order_per_bit,
  156. const char *name,
  157. struct cma **res_cma)
  158. {
  159. struct cma *cma;
  160. /* Sanity checks */
  161. if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  162. pr_err("Not enough slots for CMA reserved regions!\n");
  163. return -ENOSPC;
  164. }
  165. if (!size || !memblock_is_region_reserved(base, size))
  166. return -EINVAL;
  167. /* alignment should be aligned with order_per_bit */
  168. if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
  169. return -EINVAL;
  170. /* ensure minimal alignment required by mm core */
  171. if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
  172. return -EINVAL;
  173. /*
  174. * Each reserved area must be initialised later, when more kernel
  175. * subsystems (like slab allocator) are available.
  176. */
  177. cma = &cma_areas[cma_area_count];
  178. if (name)
  179. snprintf(cma->name, CMA_MAX_NAME, name);
  180. else
  181. snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
  182. cma->base_pfn = PFN_DOWN(base);
  183. cma->count = size >> PAGE_SHIFT;
  184. cma->order_per_bit = order_per_bit;
  185. *res_cma = cma;
  186. cma_area_count++;
  187. totalcma_pages += (size / PAGE_SIZE);
  188. return 0;
  189. }
  190. /**
  191. * cma_declare_contiguous_nid() - reserve custom contiguous area
  192. * @base: Base address of the reserved area optional, use 0 for any
  193. * @size: Size of the reserved area (in bytes),
  194. * @limit: End address of the reserved memory (optional, 0 for any).
  195. * @alignment: Alignment for the CMA area, should be power of 2 or zero
  196. * @order_per_bit: Order of pages represented by one bit on bitmap.
  197. * @fixed: hint about where to place the reserved area
  198. * @name: The name of the area. See function cma_init_reserved_mem()
  199. * @res_cma: Pointer to store the created cma region.
  200. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  201. *
  202. * This function reserves memory from early allocator. It should be
  203. * called by arch specific code once the early allocator (memblock or bootmem)
  204. * has been activated and all other subsystems have already allocated/reserved
  205. * memory. This function allows to create custom reserved areas.
  206. *
  207. * If @fixed is true, reserve contiguous area at exactly @base. If false,
  208. * reserve in range from @base to @limit.
  209. */
  210. int __init cma_declare_contiguous_nid(phys_addr_t base,
  211. phys_addr_t size, phys_addr_t limit,
  212. phys_addr_t alignment, unsigned int order_per_bit,
  213. bool fixed, const char *name, struct cma **res_cma,
  214. int nid)
  215. {
  216. phys_addr_t memblock_end = memblock_end_of_DRAM();
  217. phys_addr_t highmem_start;
  218. int ret = 0;
  219. /*
  220. * We can't use __pa(high_memory) directly, since high_memory
  221. * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
  222. * complain. Find the boundary by adding one to the last valid
  223. * address.
  224. */
  225. highmem_start = __pa(high_memory - 1) + 1;
  226. pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
  227. __func__, &size, &base, &limit, &alignment);
  228. if (cma_area_count == ARRAY_SIZE(cma_areas)) {
  229. pr_err("Not enough slots for CMA reserved regions!\n");
  230. return -ENOSPC;
  231. }
  232. if (!size)
  233. return -EINVAL;
  234. if (alignment && !is_power_of_2(alignment))
  235. return -EINVAL;
  236. /* Sanitise input arguments. */
  237. alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
  238. if (fixed && base & (alignment - 1)) {
  239. ret = -EINVAL;
  240. pr_err("Region at %pa must be aligned to %pa bytes\n",
  241. &base, &alignment);
  242. goto err;
  243. }
  244. base = ALIGN(base, alignment);
  245. size = ALIGN(size, alignment);
  246. limit &= ~(alignment - 1);
  247. if (!base)
  248. fixed = false;
  249. /* size should be aligned with order_per_bit */
  250. if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
  251. return -EINVAL;
  252. /*
  253. * If allocating at a fixed base the request region must not cross the
  254. * low/high memory boundary.
  255. */
  256. if (fixed && base < highmem_start && base + size > highmem_start) {
  257. ret = -EINVAL;
  258. pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
  259. &base, &highmem_start);
  260. goto err;
  261. }
  262. /*
  263. * If the limit is unspecified or above the memblock end, its effective
  264. * value will be the memblock end. Set it explicitly to simplify further
  265. * checks.
  266. */
  267. if (limit == 0 || limit > memblock_end)
  268. limit = memblock_end;
  269. if (base + size > limit) {
  270. ret = -EINVAL;
  271. pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
  272. &size, &base, &limit);
  273. goto err;
  274. }
  275. /* Reserve memory */
  276. if (fixed) {
  277. if (memblock_is_region_reserved(base, size) ||
  278. memblock_reserve(base, size) < 0) {
  279. ret = -EBUSY;
  280. goto err;
  281. }
  282. } else {
  283. phys_addr_t addr = 0;
  284. /*
  285. * All pages in the reserved area must come from the same zone.
  286. * If the requested region crosses the low/high memory boundary,
  287. * try allocating from high memory first and fall back to low
  288. * memory in case of failure.
  289. */
  290. if (base < highmem_start && limit > highmem_start) {
  291. addr = memblock_alloc_range_nid(size, alignment,
  292. highmem_start, limit, nid, true);
  293. limit = highmem_start;
  294. }
  295. /*
  296. * If there is enough memory, try a bottom-up allocation first.
  297. * It will place the new cma area close to the start of the node
  298. * and guarantee that the compaction is moving pages out of the
  299. * cma area and not into it.
  300. * Avoid using first 4GB to not interfere with constrained zones
  301. * like DMA/DMA32.
  302. */
  303. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  304. if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
  305. memblock_set_bottom_up(true);
  306. addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
  307. limit, nid, true);
  308. memblock_set_bottom_up(false);
  309. }
  310. #endif
  311. if (!addr) {
  312. addr = memblock_alloc_range_nid(size, alignment, base,
  313. limit, nid, true);
  314. if (!addr) {
  315. ret = -ENOMEM;
  316. goto err;
  317. }
  318. }
  319. /*
  320. * kmemleak scans/reads tracked objects for pointers to other
  321. * objects but this address isn't mapped and accessible
  322. */
  323. kmemleak_ignore_phys(addr);
  324. base = addr;
  325. }
  326. ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
  327. if (ret)
  328. goto free_mem;
  329. pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
  330. &base);
  331. return 0;
  332. free_mem:
  333. memblock_phys_free(base, size);
  334. err:
  335. pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
  336. return ret;
  337. }
  338. #ifdef CONFIG_CMA_DEBUG
  339. static void cma_debug_show_areas(struct cma *cma)
  340. {
  341. unsigned long next_zero_bit, next_set_bit, nr_zero;
  342. unsigned long start = 0;
  343. unsigned long nr_part, nr_total = 0;
  344. unsigned long nbits = cma_bitmap_maxno(cma);
  345. spin_lock_irq(&cma->lock);
  346. pr_info("number of available pages: ");
  347. for (;;) {
  348. next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
  349. if (next_zero_bit >= nbits)
  350. break;
  351. next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
  352. nr_zero = next_set_bit - next_zero_bit;
  353. nr_part = nr_zero << cma->order_per_bit;
  354. pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
  355. next_zero_bit);
  356. nr_total += nr_part;
  357. start = next_zero_bit + nr_zero;
  358. }
  359. pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
  360. spin_unlock_irq(&cma->lock);
  361. }
  362. #else
  363. static inline void cma_debug_show_areas(struct cma *cma) { }
  364. #endif
  365. /**
  366. * __cma_alloc() - allocate pages from contiguous area
  367. * @cma: Contiguous memory region for which the allocation is performed.
  368. * @count: Requested number of pages.
  369. * @align: Requested alignment of pages (in PAGE_SIZE order).
  370. * @gfp_mask: GFP mask to use during the cma allocation.
  371. *
  372. * This function is same with cma_alloc but supports gfp_mask.
  373. * Currently, the gfp_mask supports only __GFP_NOWARN and __GFP_NORETRY.
  374. * If user passes other flags, it fails the allocation.
  375. */
  376. struct page *__cma_alloc(struct cma *cma, unsigned long count,
  377. unsigned int align, gfp_t gfp_mask)
  378. {
  379. unsigned long mask, offset;
  380. unsigned long pfn = -1;
  381. unsigned long start = 0;
  382. unsigned long bitmap_maxno, bitmap_no, bitmap_count;
  383. unsigned long i;
  384. struct page *page = NULL;
  385. int ret = -ENOMEM;
  386. int num_attempts = 0;
  387. int max_retries = 10;
  388. const char *name = cma ? cma->name : NULL;
  389. trace_cma_alloc_start(name, count, align);
  390. if (WARN_ON_ONCE((gfp_mask & GFP_KERNEL) == 0 ||
  391. (gfp_mask & ~(GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY)) != 0))
  392. goto out;
  393. if (!cma || !cma->count || !cma->bitmap)
  394. goto out;
  395. pr_debug("%s(cma %p, count %lu, align %d)\n", __func__, (void *)cma,
  396. count, align);
  397. if (!count)
  398. goto out;
  399. mask = cma_bitmap_aligned_mask(cma, align);
  400. offset = cma_bitmap_aligned_offset(cma, align);
  401. bitmap_maxno = cma_bitmap_maxno(cma);
  402. bitmap_count = cma_bitmap_pages_to_bits(cma, count);
  403. if (bitmap_count > bitmap_maxno)
  404. goto out;
  405. for (;;) {
  406. spin_lock_irq(&cma->lock);
  407. bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
  408. bitmap_maxno, start, bitmap_count, mask,
  409. offset);
  410. if (bitmap_no >= bitmap_maxno) {
  411. if ((num_attempts < max_retries) && (ret == -EBUSY)) {
  412. spin_unlock_irq(&cma->lock);
  413. if (fatal_signal_pending(current) ||
  414. (gfp_mask & __GFP_NORETRY))
  415. break;
  416. /*
  417. * Page may be momentarily pinned by some other
  418. * process which has been scheduled out, e.g.
  419. * in exit path, during unmap call, or process
  420. * fork and so cannot be freed there. Sleep
  421. * for 100ms and retry the allocation.
  422. */
  423. start = 0;
  424. ret = -ENOMEM;
  425. schedule_timeout_killable(msecs_to_jiffies(100));
  426. num_attempts++;
  427. continue;
  428. } else {
  429. spin_unlock_irq(&cma->lock);
  430. break;
  431. }
  432. }
  433. bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
  434. /*
  435. * It's safe to drop the lock here. We've marked this region for
  436. * our exclusive use. If the migration fails we will take the
  437. * lock again and unmark it.
  438. */
  439. spin_unlock_irq(&cma->lock);
  440. pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
  441. mutex_lock(&cma_mutex);
  442. ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask);
  443. mutex_unlock(&cma_mutex);
  444. if (ret == 0) {
  445. page = pfn_to_page(pfn);
  446. break;
  447. }
  448. cma_clear_bitmap(cma, pfn, count);
  449. if (ret != -EBUSY)
  450. break;
  451. pr_debug("%s(): memory range at %p is busy, retrying\n",
  452. __func__, pfn_to_page(pfn));
  453. trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
  454. count, align);
  455. /* try again with a bit different memory target */
  456. start = bitmap_no + mask + 1;
  457. }
  458. /*
  459. * CMA can allocate multiple page blocks, which results in different
  460. * blocks being marked with different tags. Reset the tags to ignore
  461. * those page blocks.
  462. */
  463. if (page) {
  464. for (i = 0; i < count; i++)
  465. page_kasan_tag_reset(nth_page(page, i));
  466. }
  467. if (ret && !(gfp_mask & __GFP_NOWARN)) {
  468. pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
  469. __func__, cma->name, count, ret);
  470. cma_debug_show_areas(cma);
  471. }
  472. pr_debug("%s(): returned %p\n", __func__, page);
  473. out:
  474. trace_cma_alloc_finish(name, pfn, page, count, align);
  475. if (page) {
  476. count_vm_event(CMA_ALLOC_SUCCESS);
  477. cma_sysfs_account_success_pages(cma, count);
  478. } else {
  479. count_vm_event(CMA_ALLOC_FAIL);
  480. if (cma)
  481. cma_sysfs_account_fail_pages(cma, count);
  482. }
  483. return page;
  484. }
  485. EXPORT_SYMBOL_GPL(__cma_alloc);
  486. /**
  487. * cma_alloc() - allocate pages from contiguous area
  488. * @cma: Contiguous memory region for which the allocation is performed.
  489. * @count: Requested number of pages.
  490. * @align: Requested alignment of pages (in PAGE_SIZE order).
  491. * @no_warn: Avoid printing message about failed allocation
  492. *
  493. * This function allocates part of contiguous memory on specific
  494. * contiguous memory area.
  495. */
  496. struct page *cma_alloc(struct cma *cma, unsigned long count,
  497. unsigned int align, bool no_warn)
  498. {
  499. return __cma_alloc(cma, count, align, GFP_KERNEL |
  500. (no_warn ? __GFP_NOWARN : 0));
  501. }
  502. EXPORT_SYMBOL_GPL(cma_alloc);
  503. bool cma_pages_valid(struct cma *cma, const struct page *pages,
  504. unsigned long count)
  505. {
  506. unsigned long pfn;
  507. if (!cma || !pages)
  508. return false;
  509. pfn = page_to_pfn(pages);
  510. if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
  511. pr_debug("%s(page %p, count %lu)\n", __func__,
  512. (void *)pages, count);
  513. return false;
  514. }
  515. return true;
  516. }
  517. /**
  518. * cma_release() - release allocated pages
  519. * @cma: Contiguous memory region for which the allocation is performed.
  520. * @pages: Allocated pages.
  521. * @count: Number of allocated pages.
  522. *
  523. * This function releases memory allocated by cma_alloc().
  524. * It returns false when provided pages do not belong to contiguous area and
  525. * true otherwise.
  526. */
  527. bool cma_release(struct cma *cma, const struct page *pages,
  528. unsigned long count)
  529. {
  530. unsigned long pfn;
  531. if (!cma_pages_valid(cma, pages, count))
  532. return false;
  533. pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
  534. pfn = page_to_pfn(pages);
  535. VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
  536. free_contig_range(pfn, count);
  537. cma_clear_bitmap(cma, pfn, count);
  538. trace_cma_release(cma->name, pfn, pages, count);
  539. return true;
  540. }
  541. EXPORT_SYMBOL_GPL(cma_release);
  542. int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
  543. {
  544. int i;
  545. for (i = 0; i < cma_area_count; i++) {
  546. int ret = it(&cma_areas[i], data);
  547. if (ret)
  548. return ret;
  549. }
  550. return 0;
  551. }
  552. EXPORT_SYMBOL_GPL(cma_for_each_area);