zbud.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * zbud.c
  4. *
  5. * Copyright (C) 2013, Seth Jennings, IBM
  6. *
  7. * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
  8. *
  9. * zbud is an special purpose allocator for storing compressed pages. Contrary
  10. * to what its name may suggest, zbud is not a buddy allocator, but rather an
  11. * allocator that "buddies" two compressed pages together in a single memory
  12. * page.
  13. *
  14. * While this design limits storage density, it has simple and deterministic
  15. * reclaim properties that make it preferable to a higher density approach when
  16. * reclaim will be used.
  17. *
  18. * zbud works by storing compressed pages, or "zpages", together in pairs in a
  19. * single memory page called a "zbud page". The first buddy is "left
  20. * justified" at the beginning of the zbud page, and the last buddy is "right
  21. * justified" at the end of the zbud page. The benefit is that if either
  22. * buddy is freed, the freed buddy space, coalesced with whatever slack space
  23. * that existed between the buddies, results in the largest possible free region
  24. * within the zbud page.
  25. *
  26. * zbud also provides an attractive lower bound on density. The ratio of zpages
  27. * to zbud pages can not be less than 1. This ensures that zbud can never "do
  28. * harm" by using more pages to store zpages than the uncompressed zpages would
  29. * have used on their own.
  30. *
  31. * zbud pages are divided into "chunks". The size of the chunks is fixed at
  32. * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages
  33. * into chunks allows organizing unbuddied zbud pages into a manageable number
  34. * of unbuddied lists according to the number of free chunks available in the
  35. * zbud page.
  36. *
  37. * The zbud API differs from that of conventional allocators in that the
  38. * allocation function, zbud_alloc(), returns an opaque handle to the user,
  39. * not a dereferenceable pointer. The user must map the handle using
  40. * zbud_map() in order to get a usable pointer by which to access the
  41. * allocation data and unmap the handle with zbud_unmap() when operations
  42. * on the allocation data are complete.
  43. */
  44. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45. #include <linux/atomic.h>
  46. #include <linux/list.h>
  47. #include <linux/mm.h>
  48. #include <linux/module.h>
  49. #include <linux/preempt.h>
  50. #include <linux/slab.h>
  51. #include <linux/spinlock.h>
  52. #include <linux/zpool.h>
  53. /*****************
  54. * Structures
  55. *****************/
  56. /*
  57. * NCHUNKS_ORDER determines the internal allocation granularity, effectively
  58. * adjusting internal fragmentation. It also determines the number of
  59. * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
  60. * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
  61. * in allocated page is occupied by zbud header, NCHUNKS will be calculated to
  62. * 63 which shows the max number of free chunks in zbud page, also there will be
  63. * 63 freelists per pool.
  64. */
  65. #define NCHUNKS_ORDER 6
  66. #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
  67. #define CHUNK_SIZE (1 << CHUNK_SHIFT)
  68. #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
  69. #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
  70. struct zbud_pool;
  71. struct zbud_ops {
  72. int (*evict)(struct zbud_pool *pool, unsigned long handle);
  73. };
  74. /**
  75. * struct zbud_pool - stores metadata for each zbud pool
  76. * @lock: protects all pool fields and first|last_chunk fields of any
  77. * zbud page in the pool
  78. * @unbuddied: array of lists tracking zbud pages that only contain one buddy;
  79. * the lists each zbud page is added to depends on the size of
  80. * its free region.
  81. * @buddied: list tracking the zbud pages that contain two buddies;
  82. * these zbud pages are full
  83. * @lru: list tracking the zbud pages in LRU order by most recently
  84. * added buddy.
  85. * @pages_nr: number of zbud pages in the pool.
  86. * @ops: pointer to a structure of user defined operations specified at
  87. * pool creation time.
  88. * @zpool: zpool driver
  89. * @zpool_ops: zpool operations structure with an evict callback
  90. *
  91. * This structure is allocated at pool creation time and maintains metadata
  92. * pertaining to a particular zbud pool.
  93. */
  94. struct zbud_pool {
  95. spinlock_t lock;
  96. union {
  97. /*
  98. * Reuse unbuddied[0] as buddied on the ground that
  99. * unbuddied[0] is unused.
  100. */
  101. struct list_head buddied;
  102. struct list_head unbuddied[NCHUNKS];
  103. };
  104. struct list_head lru;
  105. u64 pages_nr;
  106. const struct zbud_ops *ops;
  107. struct zpool *zpool;
  108. const struct zpool_ops *zpool_ops;
  109. };
  110. /*
  111. * struct zbud_header - zbud page metadata occupying the first chunk of each
  112. * zbud page.
  113. * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
  114. * @lru: links the zbud page into the lru list in the pool
  115. * @first_chunks: the size of the first buddy in chunks, 0 if free
  116. * @last_chunks: the size of the last buddy in chunks, 0 if free
  117. */
  118. struct zbud_header {
  119. struct list_head buddy;
  120. struct list_head lru;
  121. unsigned int first_chunks;
  122. unsigned int last_chunks;
  123. bool under_reclaim;
  124. };
  125. /*****************
  126. * Helpers
  127. *****************/
  128. /* Just to make the code easier to read */
  129. enum buddy {
  130. FIRST,
  131. LAST
  132. };
  133. /* Converts an allocation size in bytes to size in zbud chunks */
  134. static int size_to_chunks(size_t size)
  135. {
  136. return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
  137. }
  138. #define for_each_unbuddied_list(_iter, _begin) \
  139. for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
  140. /* Initializes the zbud header of a newly allocated zbud page */
  141. static struct zbud_header *init_zbud_page(struct page *page)
  142. {
  143. struct zbud_header *zhdr = page_address(page);
  144. zhdr->first_chunks = 0;
  145. zhdr->last_chunks = 0;
  146. INIT_LIST_HEAD(&zhdr->buddy);
  147. INIT_LIST_HEAD(&zhdr->lru);
  148. zhdr->under_reclaim = false;
  149. return zhdr;
  150. }
  151. /* Resets the struct page fields and frees the page */
  152. static void free_zbud_page(struct zbud_header *zhdr)
  153. {
  154. __free_page(virt_to_page(zhdr));
  155. }
  156. /*
  157. * Encodes the handle of a particular buddy within a zbud page
  158. * Pool lock should be held as this function accesses first|last_chunks
  159. */
  160. static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
  161. {
  162. unsigned long handle;
  163. /*
  164. * For now, the encoded handle is actually just the pointer to the data
  165. * but this might not always be the case. A little information hiding.
  166. * Add CHUNK_SIZE to the handle if it is the first allocation to jump
  167. * over the zbud header in the first chunk.
  168. */
  169. handle = (unsigned long)zhdr;
  170. if (bud == FIRST)
  171. /* skip over zbud header */
  172. handle += ZHDR_SIZE_ALIGNED;
  173. else /* bud == LAST */
  174. handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
  175. return handle;
  176. }
  177. /* Returns the zbud page where a given handle is stored */
  178. static struct zbud_header *handle_to_zbud_header(unsigned long handle)
  179. {
  180. return (struct zbud_header *)(handle & PAGE_MASK);
  181. }
  182. /* Returns the number of free chunks in a zbud page */
  183. static int num_free_chunks(struct zbud_header *zhdr)
  184. {
  185. /*
  186. * Rather than branch for different situations, just use the fact that
  187. * free buddies have a length of zero to simplify everything.
  188. */
  189. return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
  190. }
  191. /*****************
  192. * API Functions
  193. *****************/
  194. /**
  195. * zbud_create_pool() - create a new zbud pool
  196. * @gfp: gfp flags when allocating the zbud pool structure
  197. * @ops: user-defined operations for the zbud pool
  198. *
  199. * Return: pointer to the new zbud pool or NULL if the metadata allocation
  200. * failed.
  201. */
  202. static struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops)
  203. {
  204. struct zbud_pool *pool;
  205. int i;
  206. pool = kzalloc(sizeof(struct zbud_pool), gfp);
  207. if (!pool)
  208. return NULL;
  209. spin_lock_init(&pool->lock);
  210. for_each_unbuddied_list(i, 0)
  211. INIT_LIST_HEAD(&pool->unbuddied[i]);
  212. INIT_LIST_HEAD(&pool->buddied);
  213. INIT_LIST_HEAD(&pool->lru);
  214. pool->pages_nr = 0;
  215. pool->ops = ops;
  216. return pool;
  217. }
  218. /**
  219. * zbud_destroy_pool() - destroys an existing zbud pool
  220. * @pool: the zbud pool to be destroyed
  221. *
  222. * The pool should be emptied before this function is called.
  223. */
  224. static void zbud_destroy_pool(struct zbud_pool *pool)
  225. {
  226. kfree(pool);
  227. }
  228. /**
  229. * zbud_alloc() - allocates a region of a given size
  230. * @pool: zbud pool from which to allocate
  231. * @size: size in bytes of the desired allocation
  232. * @gfp: gfp flags used if the pool needs to grow
  233. * @handle: handle of the new allocation
  234. *
  235. * This function will attempt to find a free region in the pool large enough to
  236. * satisfy the allocation request. A search of the unbuddied lists is
  237. * performed first. If no suitable free region is found, then a new page is
  238. * allocated and added to the pool to satisfy the request.
  239. *
  240. * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
  241. * as zbud pool pages.
  242. *
  243. * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
  244. * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
  245. * a new page.
  246. */
  247. static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
  248. unsigned long *handle)
  249. {
  250. int chunks, i, freechunks;
  251. struct zbud_header *zhdr = NULL;
  252. enum buddy bud;
  253. struct page *page;
  254. if (!size || (gfp & __GFP_HIGHMEM))
  255. return -EINVAL;
  256. if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
  257. return -ENOSPC;
  258. chunks = size_to_chunks(size);
  259. spin_lock(&pool->lock);
  260. /* First, try to find an unbuddied zbud page. */
  261. for_each_unbuddied_list(i, chunks) {
  262. if (!list_empty(&pool->unbuddied[i])) {
  263. zhdr = list_first_entry(&pool->unbuddied[i],
  264. struct zbud_header, buddy);
  265. list_del(&zhdr->buddy);
  266. if (zhdr->first_chunks == 0)
  267. bud = FIRST;
  268. else
  269. bud = LAST;
  270. goto found;
  271. }
  272. }
  273. /* Couldn't find unbuddied zbud page, create new one */
  274. spin_unlock(&pool->lock);
  275. page = alloc_page(gfp);
  276. if (!page)
  277. return -ENOMEM;
  278. spin_lock(&pool->lock);
  279. pool->pages_nr++;
  280. zhdr = init_zbud_page(page);
  281. bud = FIRST;
  282. found:
  283. if (bud == FIRST)
  284. zhdr->first_chunks = chunks;
  285. else
  286. zhdr->last_chunks = chunks;
  287. if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
  288. /* Add to unbuddied list */
  289. freechunks = num_free_chunks(zhdr);
  290. list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
  291. } else {
  292. /* Add to buddied list */
  293. list_add(&zhdr->buddy, &pool->buddied);
  294. }
  295. /* Add/move zbud page to beginning of LRU */
  296. if (!list_empty(&zhdr->lru))
  297. list_del(&zhdr->lru);
  298. list_add(&zhdr->lru, &pool->lru);
  299. *handle = encode_handle(zhdr, bud);
  300. spin_unlock(&pool->lock);
  301. return 0;
  302. }
  303. /**
  304. * zbud_free() - frees the allocation associated with the given handle
  305. * @pool: pool in which the allocation resided
  306. * @handle: handle associated with the allocation returned by zbud_alloc()
  307. *
  308. * In the case that the zbud page in which the allocation resides is under
  309. * reclaim, as indicated by the PG_reclaim flag being set, this function
  310. * only sets the first|last_chunks to 0. The page is actually freed
  311. * once both buddies are evicted (see zbud_reclaim_page() below).
  312. */
  313. static void zbud_free(struct zbud_pool *pool, unsigned long handle)
  314. {
  315. struct zbud_header *zhdr;
  316. int freechunks;
  317. spin_lock(&pool->lock);
  318. zhdr = handle_to_zbud_header(handle);
  319. /* If first buddy, handle will be page aligned */
  320. if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
  321. zhdr->last_chunks = 0;
  322. else
  323. zhdr->first_chunks = 0;
  324. if (zhdr->under_reclaim) {
  325. /* zbud page is under reclaim, reclaim will free */
  326. spin_unlock(&pool->lock);
  327. return;
  328. }
  329. /* Remove from existing buddy list */
  330. list_del(&zhdr->buddy);
  331. if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
  332. /* zbud page is empty, free */
  333. list_del(&zhdr->lru);
  334. free_zbud_page(zhdr);
  335. pool->pages_nr--;
  336. } else {
  337. /* Add to unbuddied list */
  338. freechunks = num_free_chunks(zhdr);
  339. list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
  340. }
  341. spin_unlock(&pool->lock);
  342. }
  343. /**
  344. * zbud_reclaim_page() - evicts allocations from a pool page and frees it
  345. * @pool: pool from which a page will attempt to be evicted
  346. * @retries: number of pages on the LRU list for which eviction will
  347. * be attempted before failing
  348. *
  349. * zbud reclaim is different from normal system reclaim in that the reclaim is
  350. * done from the bottom, up. This is because only the bottom layer, zbud, has
  351. * information on how the allocations are organized within each zbud page. This
  352. * has the potential to create interesting locking situations between zbud and
  353. * the user, however.
  354. *
  355. * To avoid these, this is how zbud_reclaim_page() should be called:
  356. *
  357. * The user detects a page should be reclaimed and calls zbud_reclaim_page().
  358. * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
  359. * the user-defined eviction handler with the pool and handle as arguments.
  360. *
  361. * If the handle can not be evicted, the eviction handler should return
  362. * non-zero. zbud_reclaim_page() will add the zbud page back to the
  363. * appropriate list and try the next zbud page on the LRU up to
  364. * a user defined number of retries.
  365. *
  366. * If the handle is successfully evicted, the eviction handler should
  367. * return 0 _and_ should have called zbud_free() on the handle. zbud_free()
  368. * contains logic to delay freeing the page if the page is under reclaim,
  369. * as indicated by the setting of the PG_reclaim flag on the underlying page.
  370. *
  371. * If all buddies in the zbud page are successfully evicted, then the
  372. * zbud page can be freed.
  373. *
  374. * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
  375. * no pages to evict or an eviction handler is not registered, -EAGAIN if
  376. * the retry limit was hit.
  377. */
  378. static int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
  379. {
  380. int i, ret, freechunks;
  381. struct zbud_header *zhdr;
  382. unsigned long first_handle = 0, last_handle = 0;
  383. spin_lock(&pool->lock);
  384. if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
  385. retries == 0) {
  386. spin_unlock(&pool->lock);
  387. return -EINVAL;
  388. }
  389. for (i = 0; i < retries; i++) {
  390. zhdr = list_last_entry(&pool->lru, struct zbud_header, lru);
  391. list_del(&zhdr->lru);
  392. list_del(&zhdr->buddy);
  393. /* Protect zbud page against free */
  394. zhdr->under_reclaim = true;
  395. /*
  396. * We need encode the handles before unlocking, since we can
  397. * race with free that will set (first|last)_chunks to 0
  398. */
  399. first_handle = 0;
  400. last_handle = 0;
  401. if (zhdr->first_chunks)
  402. first_handle = encode_handle(zhdr, FIRST);
  403. if (zhdr->last_chunks)
  404. last_handle = encode_handle(zhdr, LAST);
  405. spin_unlock(&pool->lock);
  406. /* Issue the eviction callback(s) */
  407. if (first_handle) {
  408. ret = pool->ops->evict(pool, first_handle);
  409. if (ret)
  410. goto next;
  411. }
  412. if (last_handle) {
  413. ret = pool->ops->evict(pool, last_handle);
  414. if (ret)
  415. goto next;
  416. }
  417. next:
  418. spin_lock(&pool->lock);
  419. zhdr->under_reclaim = false;
  420. if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
  421. /*
  422. * Both buddies are now free, free the zbud page and
  423. * return success.
  424. */
  425. free_zbud_page(zhdr);
  426. pool->pages_nr--;
  427. spin_unlock(&pool->lock);
  428. return 0;
  429. } else if (zhdr->first_chunks == 0 ||
  430. zhdr->last_chunks == 0) {
  431. /* add to unbuddied list */
  432. freechunks = num_free_chunks(zhdr);
  433. list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
  434. } else {
  435. /* add to buddied list */
  436. list_add(&zhdr->buddy, &pool->buddied);
  437. }
  438. /* add to beginning of LRU */
  439. list_add(&zhdr->lru, &pool->lru);
  440. }
  441. spin_unlock(&pool->lock);
  442. return -EAGAIN;
  443. }
  444. /**
  445. * zbud_map() - maps the allocation associated with the given handle
  446. * @pool: pool in which the allocation resides
  447. * @handle: handle associated with the allocation to be mapped
  448. *
  449. * While trivial for zbud, the mapping functions for others allocators
  450. * implementing this allocation API could have more complex information encoded
  451. * in the handle and could create temporary mappings to make the data
  452. * accessible to the user.
  453. *
  454. * Returns: a pointer to the mapped allocation
  455. */
  456. static void *zbud_map(struct zbud_pool *pool, unsigned long handle)
  457. {
  458. return (void *)(handle);
  459. }
  460. /**
  461. * zbud_unmap() - maps the allocation associated with the given handle
  462. * @pool: pool in which the allocation resides
  463. * @handle: handle associated with the allocation to be unmapped
  464. */
  465. static void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
  466. {
  467. }
  468. /**
  469. * zbud_get_pool_size() - gets the zbud pool size in pages
  470. * @pool: pool whose size is being queried
  471. *
  472. * Returns: size in pages of the given pool. The pool lock need not be
  473. * taken to access pages_nr.
  474. */
  475. static u64 zbud_get_pool_size(struct zbud_pool *pool)
  476. {
  477. return pool->pages_nr;
  478. }
  479. /*****************
  480. * zpool
  481. ****************/
  482. static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
  483. {
  484. if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
  485. return pool->zpool_ops->evict(pool->zpool, handle);
  486. else
  487. return -ENOENT;
  488. }
  489. static const struct zbud_ops zbud_zpool_ops = {
  490. .evict = zbud_zpool_evict
  491. };
  492. static void *zbud_zpool_create(const char *name, gfp_t gfp,
  493. const struct zpool_ops *zpool_ops,
  494. struct zpool *zpool)
  495. {
  496. struct zbud_pool *pool;
  497. pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
  498. if (pool) {
  499. pool->zpool = zpool;
  500. pool->zpool_ops = zpool_ops;
  501. }
  502. return pool;
  503. }
  504. static void zbud_zpool_destroy(void *pool)
  505. {
  506. zbud_destroy_pool(pool);
  507. }
  508. static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
  509. unsigned long *handle)
  510. {
  511. return zbud_alloc(pool, size, gfp, handle);
  512. }
  513. static void zbud_zpool_free(void *pool, unsigned long handle)
  514. {
  515. zbud_free(pool, handle);
  516. }
  517. static int zbud_zpool_shrink(void *pool, unsigned int pages,
  518. unsigned int *reclaimed)
  519. {
  520. unsigned int total = 0;
  521. int ret = -EINVAL;
  522. while (total < pages) {
  523. ret = zbud_reclaim_page(pool, 8);
  524. if (ret < 0)
  525. break;
  526. total++;
  527. }
  528. if (reclaimed)
  529. *reclaimed = total;
  530. return ret;
  531. }
  532. static void *zbud_zpool_map(void *pool, unsigned long handle,
  533. enum zpool_mapmode mm)
  534. {
  535. return zbud_map(pool, handle);
  536. }
  537. static void zbud_zpool_unmap(void *pool, unsigned long handle)
  538. {
  539. zbud_unmap(pool, handle);
  540. }
  541. static u64 zbud_zpool_total_size(void *pool)
  542. {
  543. return zbud_get_pool_size(pool) * PAGE_SIZE;
  544. }
  545. static struct zpool_driver zbud_zpool_driver = {
  546. .type = "zbud",
  547. .sleep_mapped = true,
  548. .owner = THIS_MODULE,
  549. .create = zbud_zpool_create,
  550. .destroy = zbud_zpool_destroy,
  551. .malloc = zbud_zpool_malloc,
  552. .free = zbud_zpool_free,
  553. .shrink = zbud_zpool_shrink,
  554. .map = zbud_zpool_map,
  555. .unmap = zbud_zpool_unmap,
  556. .total_size = zbud_zpool_total_size,
  557. };
  558. MODULE_ALIAS("zpool-zbud");
  559. static int __init init_zbud(void)
  560. {
  561. /* Make sure the zbud header will fit in one chunk */
  562. BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
  563. pr_info("loaded\n");
  564. zpool_register_driver(&zbud_zpool_driver);
  565. return 0;
  566. }
  567. static void __exit exit_zbud(void)
  568. {
  569. zpool_unregister_driver(&zbud_zpool_driver);
  570. pr_info("unloaded\n");
  571. }
  572. module_init(init_zbud);
  573. module_exit(exit_zbud);
  574. MODULE_LICENSE("GPL");
  575. MODULE_AUTHOR("Seth Jennings <[email protected]>");
  576. MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");