drm_buddy.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815
  1. // SPDX-License-Identifier: MIT
  2. /*
  3. * Copyright © 2021 Intel Corporation
  4. */
  5. #include <linux/kmemleak.h>
  6. #include <linux/module.h>
  7. #include <linux/sizes.h>
  8. #include <drm/drm_buddy.h>
  9. static struct kmem_cache *slab_blocks;
  10. static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
  11. struct drm_buddy_block *parent,
  12. unsigned int order,
  13. u64 offset)
  14. {
  15. struct drm_buddy_block *block;
  16. BUG_ON(order > DRM_BUDDY_MAX_ORDER);
  17. block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
  18. if (!block)
  19. return NULL;
  20. block->header = offset;
  21. block->header |= order;
  22. block->parent = parent;
  23. BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
  24. return block;
  25. }
  26. static void drm_block_free(struct drm_buddy *mm,
  27. struct drm_buddy_block *block)
  28. {
  29. kmem_cache_free(slab_blocks, block);
  30. }
  31. static void list_insert_sorted(struct drm_buddy *mm,
  32. struct drm_buddy_block *block)
  33. {
  34. struct drm_buddy_block *node;
  35. struct list_head *head;
  36. head = &mm->free_list[drm_buddy_block_order(block)];
  37. if (list_empty(head)) {
  38. list_add(&block->link, head);
  39. return;
  40. }
  41. list_for_each_entry(node, head, link)
  42. if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
  43. break;
  44. __list_add(&block->link, node->link.prev, &node->link);
  45. }
  46. static void mark_allocated(struct drm_buddy_block *block)
  47. {
  48. block->header &= ~DRM_BUDDY_HEADER_STATE;
  49. block->header |= DRM_BUDDY_ALLOCATED;
  50. list_del(&block->link);
  51. }
  52. static void mark_free(struct drm_buddy *mm,
  53. struct drm_buddy_block *block)
  54. {
  55. block->header &= ~DRM_BUDDY_HEADER_STATE;
  56. block->header |= DRM_BUDDY_FREE;
  57. list_insert_sorted(mm, block);
  58. }
  59. static void mark_split(struct drm_buddy_block *block)
  60. {
  61. block->header &= ~DRM_BUDDY_HEADER_STATE;
  62. block->header |= DRM_BUDDY_SPLIT;
  63. list_del(&block->link);
  64. }
  65. /**
  66. * drm_buddy_init - init memory manager
  67. *
  68. * @mm: DRM buddy manager to initialize
  69. * @size: size in bytes to manage
  70. * @chunk_size: minimum page size in bytes for our allocations
  71. *
  72. * Initializes the memory manager and its resources.
  73. *
  74. * Returns:
  75. * 0 on success, error code on failure.
  76. */
  77. int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
  78. {
  79. unsigned int i;
  80. u64 offset;
  81. if (size < chunk_size)
  82. return -EINVAL;
  83. if (chunk_size < PAGE_SIZE)
  84. return -EINVAL;
  85. if (!is_power_of_2(chunk_size))
  86. return -EINVAL;
  87. size = round_down(size, chunk_size);
  88. mm->size = size;
  89. mm->avail = size;
  90. mm->chunk_size = chunk_size;
  91. mm->max_order = ilog2(size) - ilog2(chunk_size);
  92. BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
  93. mm->free_list = kmalloc_array(mm->max_order + 1,
  94. sizeof(struct list_head),
  95. GFP_KERNEL);
  96. if (!mm->free_list)
  97. return -ENOMEM;
  98. for (i = 0; i <= mm->max_order; ++i)
  99. INIT_LIST_HEAD(&mm->free_list[i]);
  100. mm->n_roots = hweight64(size);
  101. mm->roots = kmalloc_array(mm->n_roots,
  102. sizeof(struct drm_buddy_block *),
  103. GFP_KERNEL);
  104. if (!mm->roots)
  105. goto out_free_list;
  106. offset = 0;
  107. i = 0;
  108. /*
  109. * Split into power-of-two blocks, in case we are given a size that is
  110. * not itself a power-of-two.
  111. */
  112. do {
  113. struct drm_buddy_block *root;
  114. unsigned int order;
  115. u64 root_size;
  116. order = ilog2(size) - ilog2(chunk_size);
  117. root_size = chunk_size << order;
  118. root = drm_block_alloc(mm, NULL, order, offset);
  119. if (!root)
  120. goto out_free_roots;
  121. mark_free(mm, root);
  122. BUG_ON(i > mm->max_order);
  123. BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
  124. mm->roots[i] = root;
  125. offset += root_size;
  126. size -= root_size;
  127. i++;
  128. } while (size);
  129. return 0;
  130. out_free_roots:
  131. while (i--)
  132. drm_block_free(mm, mm->roots[i]);
  133. kfree(mm->roots);
  134. out_free_list:
  135. kfree(mm->free_list);
  136. return -ENOMEM;
  137. }
  138. EXPORT_SYMBOL(drm_buddy_init);
  139. /**
  140. * drm_buddy_fini - tear down the memory manager
  141. *
  142. * @mm: DRM buddy manager to free
  143. *
  144. * Cleanup memory manager resources and the freelist
  145. */
  146. void drm_buddy_fini(struct drm_buddy *mm)
  147. {
  148. int i;
  149. for (i = 0; i < mm->n_roots; ++i) {
  150. WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
  151. drm_block_free(mm, mm->roots[i]);
  152. }
  153. WARN_ON(mm->avail != mm->size);
  154. kfree(mm->roots);
  155. kfree(mm->free_list);
  156. }
  157. EXPORT_SYMBOL(drm_buddy_fini);
  158. static int split_block(struct drm_buddy *mm,
  159. struct drm_buddy_block *block)
  160. {
  161. unsigned int block_order = drm_buddy_block_order(block) - 1;
  162. u64 offset = drm_buddy_block_offset(block);
  163. BUG_ON(!drm_buddy_block_is_free(block));
  164. BUG_ON(!drm_buddy_block_order(block));
  165. block->left = drm_block_alloc(mm, block, block_order, offset);
  166. if (!block->left)
  167. return -ENOMEM;
  168. block->right = drm_block_alloc(mm, block, block_order,
  169. offset + (mm->chunk_size << block_order));
  170. if (!block->right) {
  171. drm_block_free(mm, block->left);
  172. return -ENOMEM;
  173. }
  174. mark_free(mm, block->left);
  175. mark_free(mm, block->right);
  176. mark_split(block);
  177. return 0;
  178. }
  179. static struct drm_buddy_block *
  180. __get_buddy(struct drm_buddy_block *block)
  181. {
  182. struct drm_buddy_block *parent;
  183. parent = block->parent;
  184. if (!parent)
  185. return NULL;
  186. if (parent->left == block)
  187. return parent->right;
  188. return parent->left;
  189. }
  190. /**
  191. * drm_get_buddy - get buddy address
  192. *
  193. * @block: DRM buddy block
  194. *
  195. * Returns the corresponding buddy block for @block, or NULL
  196. * if this is a root block and can't be merged further.
  197. * Requires some kind of locking to protect against
  198. * any concurrent allocate and free operations.
  199. */
  200. struct drm_buddy_block *
  201. drm_get_buddy(struct drm_buddy_block *block)
  202. {
  203. return __get_buddy(block);
  204. }
  205. EXPORT_SYMBOL(drm_get_buddy);
  206. static void __drm_buddy_free(struct drm_buddy *mm,
  207. struct drm_buddy_block *block)
  208. {
  209. struct drm_buddy_block *parent;
  210. while ((parent = block->parent)) {
  211. struct drm_buddy_block *buddy;
  212. buddy = __get_buddy(block);
  213. if (!drm_buddy_block_is_free(buddy))
  214. break;
  215. list_del(&buddy->link);
  216. drm_block_free(mm, block);
  217. drm_block_free(mm, buddy);
  218. block = parent;
  219. }
  220. mark_free(mm, block);
  221. }
  222. /**
  223. * drm_buddy_free_block - free a block
  224. *
  225. * @mm: DRM buddy manager
  226. * @block: block to be freed
  227. */
  228. void drm_buddy_free_block(struct drm_buddy *mm,
  229. struct drm_buddy_block *block)
  230. {
  231. BUG_ON(!drm_buddy_block_is_allocated(block));
  232. mm->avail += drm_buddy_block_size(mm, block);
  233. __drm_buddy_free(mm, block);
  234. }
  235. EXPORT_SYMBOL(drm_buddy_free_block);
  236. /**
  237. * drm_buddy_free_list - free blocks
  238. *
  239. * @mm: DRM buddy manager
  240. * @objects: input list head to free blocks
  241. */
  242. void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
  243. {
  244. struct drm_buddy_block *block, *on;
  245. list_for_each_entry_safe(block, on, objects, link) {
  246. drm_buddy_free_block(mm, block);
  247. cond_resched();
  248. }
  249. INIT_LIST_HEAD(objects);
  250. }
  251. EXPORT_SYMBOL(drm_buddy_free_list);
  252. static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
  253. {
  254. return s1 <= e2 && e1 >= s2;
  255. }
  256. static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
  257. {
  258. return s1 <= s2 && e1 >= e2;
  259. }
  260. static struct drm_buddy_block *
  261. alloc_range_bias(struct drm_buddy *mm,
  262. u64 start, u64 end,
  263. unsigned int order)
  264. {
  265. struct drm_buddy_block *block;
  266. struct drm_buddy_block *buddy;
  267. LIST_HEAD(dfs);
  268. int err;
  269. int i;
  270. end = end - 1;
  271. for (i = 0; i < mm->n_roots; ++i)
  272. list_add_tail(&mm->roots[i]->tmp_link, &dfs);
  273. do {
  274. u64 block_start;
  275. u64 block_end;
  276. block = list_first_entry_or_null(&dfs,
  277. struct drm_buddy_block,
  278. tmp_link);
  279. if (!block)
  280. break;
  281. list_del(&block->tmp_link);
  282. if (drm_buddy_block_order(block) < order)
  283. continue;
  284. block_start = drm_buddy_block_offset(block);
  285. block_end = block_start + drm_buddy_block_size(mm, block) - 1;
  286. if (!overlaps(start, end, block_start, block_end))
  287. continue;
  288. if (drm_buddy_block_is_allocated(block))
  289. continue;
  290. if (contains(start, end, block_start, block_end) &&
  291. order == drm_buddy_block_order(block)) {
  292. /*
  293. * Find the free block within the range.
  294. */
  295. if (drm_buddy_block_is_free(block))
  296. return block;
  297. continue;
  298. }
  299. if (!drm_buddy_block_is_split(block)) {
  300. err = split_block(mm, block);
  301. if (unlikely(err))
  302. goto err_undo;
  303. }
  304. list_add(&block->right->tmp_link, &dfs);
  305. list_add(&block->left->tmp_link, &dfs);
  306. } while (1);
  307. return ERR_PTR(-ENOSPC);
  308. err_undo:
  309. /*
  310. * We really don't want to leave around a bunch of split blocks, since
  311. * bigger is better, so make sure we merge everything back before we
  312. * free the allocated blocks.
  313. */
  314. buddy = __get_buddy(block);
  315. if (buddy &&
  316. (drm_buddy_block_is_free(block) &&
  317. drm_buddy_block_is_free(buddy)))
  318. __drm_buddy_free(mm, block);
  319. return ERR_PTR(err);
  320. }
  321. static struct drm_buddy_block *
  322. get_maxblock(struct drm_buddy *mm, unsigned int order)
  323. {
  324. struct drm_buddy_block *max_block = NULL, *node;
  325. unsigned int i;
  326. for (i = order; i <= mm->max_order; ++i) {
  327. if (!list_empty(&mm->free_list[i])) {
  328. node = list_last_entry(&mm->free_list[i],
  329. struct drm_buddy_block,
  330. link);
  331. if (!max_block) {
  332. max_block = node;
  333. continue;
  334. }
  335. if (drm_buddy_block_offset(node) >
  336. drm_buddy_block_offset(max_block)) {
  337. max_block = node;
  338. }
  339. }
  340. }
  341. return max_block;
  342. }
  343. static struct drm_buddy_block *
  344. alloc_from_freelist(struct drm_buddy *mm,
  345. unsigned int order,
  346. unsigned long flags)
  347. {
  348. struct drm_buddy_block *block = NULL;
  349. unsigned int tmp;
  350. int err;
  351. if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
  352. block = get_maxblock(mm, order);
  353. if (block)
  354. /* Store the obtained block order */
  355. tmp = drm_buddy_block_order(block);
  356. } else {
  357. for (tmp = order; tmp <= mm->max_order; ++tmp) {
  358. if (!list_empty(&mm->free_list[tmp])) {
  359. block = list_last_entry(&mm->free_list[tmp],
  360. struct drm_buddy_block,
  361. link);
  362. if (block)
  363. break;
  364. }
  365. }
  366. }
  367. if (!block)
  368. return ERR_PTR(-ENOSPC);
  369. BUG_ON(!drm_buddy_block_is_free(block));
  370. while (tmp != order) {
  371. err = split_block(mm, block);
  372. if (unlikely(err))
  373. goto err_undo;
  374. block = block->right;
  375. tmp--;
  376. }
  377. return block;
  378. err_undo:
  379. if (tmp != order)
  380. __drm_buddy_free(mm, block);
  381. return ERR_PTR(err);
  382. }
  383. static int __alloc_range(struct drm_buddy *mm,
  384. struct list_head *dfs,
  385. u64 start, u64 size,
  386. struct list_head *blocks)
  387. {
  388. struct drm_buddy_block *block;
  389. struct drm_buddy_block *buddy;
  390. LIST_HEAD(allocated);
  391. u64 end;
  392. int err;
  393. end = start + size - 1;
  394. do {
  395. u64 block_start;
  396. u64 block_end;
  397. block = list_first_entry_or_null(dfs,
  398. struct drm_buddy_block,
  399. tmp_link);
  400. if (!block)
  401. break;
  402. list_del(&block->tmp_link);
  403. block_start = drm_buddy_block_offset(block);
  404. block_end = block_start + drm_buddy_block_size(mm, block) - 1;
  405. if (!overlaps(start, end, block_start, block_end))
  406. continue;
  407. if (drm_buddy_block_is_allocated(block)) {
  408. err = -ENOSPC;
  409. goto err_free;
  410. }
  411. if (contains(start, end, block_start, block_end)) {
  412. if (!drm_buddy_block_is_free(block)) {
  413. err = -ENOSPC;
  414. goto err_free;
  415. }
  416. mark_allocated(block);
  417. mm->avail -= drm_buddy_block_size(mm, block);
  418. list_add_tail(&block->link, &allocated);
  419. continue;
  420. }
  421. if (!drm_buddy_block_is_split(block)) {
  422. err = split_block(mm, block);
  423. if (unlikely(err))
  424. goto err_undo;
  425. }
  426. list_add(&block->right->tmp_link, dfs);
  427. list_add(&block->left->tmp_link, dfs);
  428. } while (1);
  429. list_splice_tail(&allocated, blocks);
  430. return 0;
  431. err_undo:
  432. /*
  433. * We really don't want to leave around a bunch of split blocks, since
  434. * bigger is better, so make sure we merge everything back before we
  435. * free the allocated blocks.
  436. */
  437. buddy = __get_buddy(block);
  438. if (buddy &&
  439. (drm_buddy_block_is_free(block) &&
  440. drm_buddy_block_is_free(buddy)))
  441. __drm_buddy_free(mm, block);
  442. err_free:
  443. drm_buddy_free_list(mm, &allocated);
  444. return err;
  445. }
  446. static int __drm_buddy_alloc_range(struct drm_buddy *mm,
  447. u64 start,
  448. u64 size,
  449. struct list_head *blocks)
  450. {
  451. LIST_HEAD(dfs);
  452. int i;
  453. for (i = 0; i < mm->n_roots; ++i)
  454. list_add_tail(&mm->roots[i]->tmp_link, &dfs);
  455. return __alloc_range(mm, &dfs, start, size, blocks);
  456. }
  457. /**
  458. * drm_buddy_block_trim - free unused pages
  459. *
  460. * @mm: DRM buddy manager
  461. * @new_size: original size requested
  462. * @blocks: Input and output list of allocated blocks.
  463. * MUST contain single block as input to be trimmed.
  464. * On success will contain the newly allocated blocks
  465. * making up the @new_size. Blocks always appear in
  466. * ascending order
  467. *
  468. * For contiguous allocation, we round up the size to the nearest
  469. * power of two value, drivers consume *actual* size, so remaining
  470. * portions are unused and can be optionally freed with this function
  471. *
  472. * Returns:
  473. * 0 on success, error code on failure.
  474. */
  475. int drm_buddy_block_trim(struct drm_buddy *mm,
  476. u64 new_size,
  477. struct list_head *blocks)
  478. {
  479. struct drm_buddy_block *parent;
  480. struct drm_buddy_block *block;
  481. LIST_HEAD(dfs);
  482. u64 new_start;
  483. int err;
  484. if (!list_is_singular(blocks))
  485. return -EINVAL;
  486. block = list_first_entry(blocks,
  487. struct drm_buddy_block,
  488. link);
  489. if (WARN_ON(!drm_buddy_block_is_allocated(block)))
  490. return -EINVAL;
  491. if (new_size > drm_buddy_block_size(mm, block))
  492. return -EINVAL;
  493. if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
  494. return -EINVAL;
  495. if (new_size == drm_buddy_block_size(mm, block))
  496. return 0;
  497. list_del(&block->link);
  498. mark_free(mm, block);
  499. mm->avail += drm_buddy_block_size(mm, block);
  500. /* Prevent recursively freeing this node */
  501. parent = block->parent;
  502. block->parent = NULL;
  503. new_start = drm_buddy_block_offset(block);
  504. list_add(&block->tmp_link, &dfs);
  505. err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
  506. if (err) {
  507. mark_allocated(block);
  508. mm->avail -= drm_buddy_block_size(mm, block);
  509. list_add(&block->link, blocks);
  510. }
  511. block->parent = parent;
  512. return err;
  513. }
  514. EXPORT_SYMBOL(drm_buddy_block_trim);
  515. /**
  516. * drm_buddy_alloc_blocks - allocate power-of-two blocks
  517. *
  518. * @mm: DRM buddy manager to allocate from
  519. * @start: start of the allowed range for this block
  520. * @end: end of the allowed range for this block
  521. * @size: size of the allocation
  522. * @min_page_size: alignment of the allocation
  523. * @blocks: output list head to add allocated blocks
  524. * @flags: DRM_BUDDY_*_ALLOCATION flags
  525. *
  526. * alloc_range_bias() called on range limitations, which traverses
  527. * the tree and returns the desired block.
  528. *
  529. * alloc_from_freelist() called when *no* range restrictions
  530. * are enforced, which picks the block from the freelist.
  531. *
  532. * Returns:
  533. * 0 on success, error code on failure.
  534. */
  535. int drm_buddy_alloc_blocks(struct drm_buddy *mm,
  536. u64 start, u64 end, u64 size,
  537. u64 min_page_size,
  538. struct list_head *blocks,
  539. unsigned long flags)
  540. {
  541. struct drm_buddy_block *block = NULL;
  542. unsigned int min_order, order;
  543. unsigned long pages;
  544. LIST_HEAD(allocated);
  545. int err;
  546. if (size < mm->chunk_size)
  547. return -EINVAL;
  548. if (min_page_size < mm->chunk_size)
  549. return -EINVAL;
  550. if (!is_power_of_2(min_page_size))
  551. return -EINVAL;
  552. if (!IS_ALIGNED(start | end | size, mm->chunk_size))
  553. return -EINVAL;
  554. if (end > mm->size)
  555. return -EINVAL;
  556. if (range_overflows(start, size, mm->size))
  557. return -EINVAL;
  558. /* Actual range allocation */
  559. if (start + size == end)
  560. return __drm_buddy_alloc_range(mm, start, size, blocks);
  561. if (!IS_ALIGNED(size, min_page_size))
  562. return -EINVAL;
  563. pages = size >> ilog2(mm->chunk_size);
  564. order = fls(pages) - 1;
  565. min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
  566. do {
  567. order = min(order, (unsigned int)fls(pages) - 1);
  568. BUG_ON(order > mm->max_order);
  569. BUG_ON(order < min_order);
  570. do {
  571. if (flags & DRM_BUDDY_RANGE_ALLOCATION)
  572. /* Allocate traversing within the range */
  573. block = alloc_range_bias(mm, start, end, order);
  574. else
  575. /* Allocate from freelist */
  576. block = alloc_from_freelist(mm, order, flags);
  577. if (!IS_ERR(block))
  578. break;
  579. if (order-- == min_order) {
  580. err = -ENOSPC;
  581. goto err_free;
  582. }
  583. } while (1);
  584. mark_allocated(block);
  585. mm->avail -= drm_buddy_block_size(mm, block);
  586. kmemleak_update_trace(block);
  587. list_add_tail(&block->link, &allocated);
  588. pages -= BIT(order);
  589. if (!pages)
  590. break;
  591. } while (1);
  592. list_splice_tail(&allocated, blocks);
  593. return 0;
  594. err_free:
  595. drm_buddy_free_list(mm, &allocated);
  596. return err;
  597. }
  598. EXPORT_SYMBOL(drm_buddy_alloc_blocks);
  599. /**
  600. * drm_buddy_block_print - print block information
  601. *
  602. * @mm: DRM buddy manager
  603. * @block: DRM buddy block
  604. * @p: DRM printer to use
  605. */
  606. void drm_buddy_block_print(struct drm_buddy *mm,
  607. struct drm_buddy_block *block,
  608. struct drm_printer *p)
  609. {
  610. u64 start = drm_buddy_block_offset(block);
  611. u64 size = drm_buddy_block_size(mm, block);
  612. drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
  613. }
  614. EXPORT_SYMBOL(drm_buddy_block_print);
  615. /**
  616. * drm_buddy_print - print allocator state
  617. *
  618. * @mm: DRM buddy manager
  619. * @p: DRM printer to use
  620. */
  621. void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
  622. {
  623. int order;
  624. drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
  625. mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
  626. for (order = mm->max_order; order >= 0; order--) {
  627. struct drm_buddy_block *block;
  628. u64 count = 0, free;
  629. list_for_each_entry(block, &mm->free_list[order], link) {
  630. BUG_ON(!drm_buddy_block_is_free(block));
  631. count++;
  632. }
  633. drm_printf(p, "order-%d ", order);
  634. free = count * (mm->chunk_size << order);
  635. if (free < SZ_1M)
  636. drm_printf(p, "free: %lluKiB", free >> 10);
  637. else
  638. drm_printf(p, "free: %lluMiB", free >> 20);
  639. drm_printf(p, ", pages: %llu\n", count);
  640. }
  641. }
  642. EXPORT_SYMBOL(drm_buddy_print);
  643. static void drm_buddy_module_exit(void)
  644. {
  645. kmem_cache_destroy(slab_blocks);
  646. }
  647. static int __init drm_buddy_module_init(void)
  648. {
  649. slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
  650. if (!slab_blocks)
  651. return -ENOMEM;
  652. return 0;
  653. }
  654. module_init(drm_buddy_module_init);
  655. module_exit(drm_buddy_module_exit);
  656. MODULE_DESCRIPTION("DRM Buddy Allocator");
  657. MODULE_LICENSE("Dual MIT/GPL");