z3fold.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * z3fold.c
  4. *
  5. * Author: Vitaly Wool <[email protected]>
  6. * Copyright (C) 2016, Sony Mobile Communications Inc.
  7. *
  8. * This implementation is based on zbud written by Seth Jennings.
  9. *
  10. * z3fold is an special purpose allocator for storing compressed pages. It
  11. * can store up to three compressed pages per page which improves the
  12. * compression ratio of zbud while retaining its main concepts (e. g. always
  13. * storing an integral number of objects per page) and simplicity.
  14. * It still has simple and deterministic reclaim properties that make it
  15. * preferable to a higher density approach (with no requirement on integral
  16. * number of object per page) when reclaim is used.
  17. *
  18. * As in zbud, pages are divided into "chunks". The size of the chunks is
  19. * fixed at compile time and is determined by NCHUNKS_ORDER below.
  20. *
  21. * z3fold doesn't export any API and is meant to be used via zpool API.
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/atomic.h>
  25. #include <linux/sched.h>
  26. #include <linux/cpumask.h>
  27. #include <linux/list.h>
  28. #include <linux/mm.h>
  29. #include <linux/module.h>
  30. #include <linux/page-flags.h>
  31. #include <linux/migrate.h>
  32. #include <linux/node.h>
  33. #include <linux/compaction.h>
  34. #include <linux/percpu.h>
  35. #include <linux/preempt.h>
  36. #include <linux/workqueue.h>
  37. #include <linux/slab.h>
  38. #include <linux/spinlock.h>
  39. #include <linux/zpool.h>
  40. #include <linux/kmemleak.h>
  41. /*
  42. * NCHUNKS_ORDER determines the internal allocation granularity, effectively
  43. * adjusting internal fragmentation. It also determines the number of
  44. * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
  45. * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
  46. * in the beginning of an allocated page are occupied by z3fold header, so
  47. * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
  48. * which shows the max number of free chunks in z3fold page, also there will
  49. * be 63, or 62, respectively, freelists per pool.
  50. */
  51. #define NCHUNKS_ORDER 6
  52. #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
  53. #define CHUNK_SIZE (1 << CHUNK_SHIFT)
  54. #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
  55. #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
  56. #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
  57. #define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
  58. #define BUDDY_MASK (0x3)
  59. #define BUDDY_SHIFT 2
  60. #define SLOTS_ALIGN (0x40)
  61. /*****************
  62. * Structures
  63. *****************/
  64. struct z3fold_pool;
  65. struct z3fold_ops {
  66. int (*evict)(struct z3fold_pool *pool, unsigned long handle);
  67. };
  68. enum buddy {
  69. HEADLESS = 0,
  70. FIRST,
  71. MIDDLE,
  72. LAST,
  73. BUDDIES_MAX = LAST
  74. };
  75. struct z3fold_buddy_slots {
  76. /*
  77. * we are using BUDDY_MASK in handle_to_buddy etc. so there should
  78. * be enough slots to hold all possible variants
  79. */
  80. unsigned long slot[BUDDY_MASK + 1];
  81. unsigned long pool; /* back link */
  82. rwlock_t lock;
  83. };
  84. #define HANDLE_FLAG_MASK (0x03)
  85. /*
  86. * struct z3fold_header - z3fold page metadata occupying first chunks of each
  87. * z3fold page, except for HEADLESS pages
  88. * @buddy: links the z3fold page into the relevant list in the
  89. * pool
  90. * @page_lock: per-page lock
  91. * @refcount: reference count for the z3fold page
  92. * @work: work_struct for page layout optimization
  93. * @slots: pointer to the structure holding buddy slots
  94. * @pool: pointer to the containing pool
  95. * @cpu: CPU which this page "belongs" to
  96. * @first_chunks: the size of the first buddy in chunks, 0 if free
  97. * @middle_chunks: the size of the middle buddy in chunks, 0 if free
  98. * @last_chunks: the size of the last buddy in chunks, 0 if free
  99. * @first_num: the starting number (for the first handle)
  100. * @mapped_count: the number of objects currently mapped
  101. */
  102. struct z3fold_header {
  103. struct list_head buddy;
  104. spinlock_t page_lock;
  105. struct kref refcount;
  106. struct work_struct work;
  107. struct z3fold_buddy_slots *slots;
  108. struct z3fold_pool *pool;
  109. short cpu;
  110. unsigned short first_chunks;
  111. unsigned short middle_chunks;
  112. unsigned short last_chunks;
  113. unsigned short start_middle;
  114. unsigned short first_num:2;
  115. unsigned short mapped_count:2;
  116. unsigned short foreign_handles:2;
  117. };
  118. /**
  119. * struct z3fold_pool - stores metadata for each z3fold pool
  120. * @name: pool name
  121. * @lock: protects pool unbuddied/lru lists
  122. * @stale_lock: protects pool stale page list
  123. * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
  124. * buddies; the list each z3fold page is added to depends on
  125. * the size of its free region.
  126. * @lru: list tracking the z3fold pages in LRU order by most recently
  127. * added buddy.
  128. * @stale: list of pages marked for freeing
  129. * @pages_nr: number of z3fold pages in the pool.
  130. * @c_handle: cache for z3fold_buddy_slots allocation
  131. * @ops: pointer to a structure of user defined operations specified at
  132. * pool creation time.
  133. * @zpool: zpool driver
  134. * @zpool_ops: zpool operations structure with an evict callback
  135. * @compact_wq: workqueue for page layout background optimization
  136. * @release_wq: workqueue for safe page release
  137. * @work: work_struct for safe page release
  138. *
  139. * This structure is allocated at pool creation time and maintains metadata
  140. * pertaining to a particular z3fold pool.
  141. */
  142. struct z3fold_pool {
  143. const char *name;
  144. spinlock_t lock;
  145. spinlock_t stale_lock;
  146. struct list_head *unbuddied;
  147. struct list_head lru;
  148. struct list_head stale;
  149. atomic64_t pages_nr;
  150. struct kmem_cache *c_handle;
  151. const struct z3fold_ops *ops;
  152. struct zpool *zpool;
  153. const struct zpool_ops *zpool_ops;
  154. struct workqueue_struct *compact_wq;
  155. struct workqueue_struct *release_wq;
  156. struct work_struct work;
  157. };
  158. /*
  159. * Internal z3fold page flags
  160. */
  161. enum z3fold_page_flags {
  162. PAGE_HEADLESS = 0,
  163. MIDDLE_CHUNK_MAPPED,
  164. NEEDS_COMPACTING,
  165. PAGE_STALE,
  166. PAGE_CLAIMED, /* by either reclaim or free */
  167. PAGE_MIGRATED, /* page is migrated and soon to be released */
  168. };
  169. /*
  170. * handle flags, go under HANDLE_FLAG_MASK
  171. */
  172. enum z3fold_handle_flags {
  173. HANDLES_NOFREE = 0,
  174. };
  175. /*
  176. * Forward declarations
  177. */
  178. static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
  179. static void compact_page_work(struct work_struct *w);
  180. /*****************
  181. * Helpers
  182. *****************/
  183. /* Converts an allocation size in bytes to size in z3fold chunks */
  184. static int size_to_chunks(size_t size)
  185. {
  186. return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
  187. }
  188. #define for_each_unbuddied_list(_iter, _begin) \
  189. for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
  190. static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
  191. gfp_t gfp)
  192. {
  193. struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
  194. gfp);
  195. if (slots) {
  196. /* It will be freed separately in free_handle(). */
  197. kmemleak_not_leak(slots);
  198. slots->pool = (unsigned long)pool;
  199. rwlock_init(&slots->lock);
  200. }
  201. return slots;
  202. }
  203. static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
  204. {
  205. return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
  206. }
  207. static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
  208. {
  209. return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
  210. }
  211. /* Lock a z3fold page */
  212. static inline void z3fold_page_lock(struct z3fold_header *zhdr)
  213. {
  214. spin_lock(&zhdr->page_lock);
  215. }
  216. /* Try to lock a z3fold page */
  217. static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
  218. {
  219. return spin_trylock(&zhdr->page_lock);
  220. }
  221. /* Unlock a z3fold page */
  222. static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
  223. {
  224. spin_unlock(&zhdr->page_lock);
  225. }
  226. /* return locked z3fold page if it's not headless */
  227. static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
  228. {
  229. struct z3fold_buddy_slots *slots;
  230. struct z3fold_header *zhdr;
  231. int locked = 0;
  232. if (!(handle & (1 << PAGE_HEADLESS))) {
  233. slots = handle_to_slots(handle);
  234. do {
  235. unsigned long addr;
  236. read_lock(&slots->lock);
  237. addr = *(unsigned long *)handle;
  238. zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
  239. locked = z3fold_page_trylock(zhdr);
  240. read_unlock(&slots->lock);
  241. if (locked) {
  242. struct page *page = virt_to_page(zhdr);
  243. if (!test_bit(PAGE_MIGRATED, &page->private))
  244. break;
  245. z3fold_page_unlock(zhdr);
  246. }
  247. cpu_relax();
  248. } while (true);
  249. } else {
  250. zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
  251. }
  252. return zhdr;
  253. }
  254. static inline void put_z3fold_header(struct z3fold_header *zhdr)
  255. {
  256. struct page *page = virt_to_page(zhdr);
  257. if (!test_bit(PAGE_HEADLESS, &page->private))
  258. z3fold_page_unlock(zhdr);
  259. }
  260. static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
  261. {
  262. struct z3fold_buddy_slots *slots;
  263. int i;
  264. bool is_free;
  265. if (WARN_ON(*(unsigned long *)handle == 0))
  266. return;
  267. slots = handle_to_slots(handle);
  268. write_lock(&slots->lock);
  269. *(unsigned long *)handle = 0;
  270. if (test_bit(HANDLES_NOFREE, &slots->pool)) {
  271. write_unlock(&slots->lock);
  272. return; /* simple case, nothing else to do */
  273. }
  274. if (zhdr->slots != slots)
  275. zhdr->foreign_handles--;
  276. is_free = true;
  277. for (i = 0; i <= BUDDY_MASK; i++) {
  278. if (slots->slot[i]) {
  279. is_free = false;
  280. break;
  281. }
  282. }
  283. write_unlock(&slots->lock);
  284. if (is_free) {
  285. struct z3fold_pool *pool = slots_to_pool(slots);
  286. if (zhdr->slots == slots)
  287. zhdr->slots = NULL;
  288. kmem_cache_free(pool->c_handle, slots);
  289. }
  290. }
  291. /* Initializes the z3fold header of a newly allocated z3fold page */
  292. static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
  293. struct z3fold_pool *pool, gfp_t gfp)
  294. {
  295. struct z3fold_header *zhdr = page_address(page);
  296. struct z3fold_buddy_slots *slots;
  297. INIT_LIST_HEAD(&page->lru);
  298. clear_bit(PAGE_HEADLESS, &page->private);
  299. clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  300. clear_bit(NEEDS_COMPACTING, &page->private);
  301. clear_bit(PAGE_STALE, &page->private);
  302. clear_bit(PAGE_CLAIMED, &page->private);
  303. clear_bit(PAGE_MIGRATED, &page->private);
  304. if (headless)
  305. return zhdr;
  306. slots = alloc_slots(pool, gfp);
  307. if (!slots)
  308. return NULL;
  309. memset(zhdr, 0, sizeof(*zhdr));
  310. spin_lock_init(&zhdr->page_lock);
  311. kref_init(&zhdr->refcount);
  312. zhdr->cpu = -1;
  313. zhdr->slots = slots;
  314. zhdr->pool = pool;
  315. INIT_LIST_HEAD(&zhdr->buddy);
  316. INIT_WORK(&zhdr->work, compact_page_work);
  317. return zhdr;
  318. }
  319. /* Resets the struct page fields and frees the page */
  320. static void free_z3fold_page(struct page *page, bool headless)
  321. {
  322. if (!headless) {
  323. lock_page(page);
  324. __ClearPageMovable(page);
  325. unlock_page(page);
  326. }
  327. __free_page(page);
  328. }
  329. /* Helper function to build the index */
  330. static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
  331. {
  332. return (bud + zhdr->first_num) & BUDDY_MASK;
  333. }
  334. /*
  335. * Encodes the handle of a particular buddy within a z3fold page
  336. * Pool lock should be held as this function accesses first_num
  337. */
  338. static unsigned long __encode_handle(struct z3fold_header *zhdr,
  339. struct z3fold_buddy_slots *slots,
  340. enum buddy bud)
  341. {
  342. unsigned long h = (unsigned long)zhdr;
  343. int idx = 0;
  344. /*
  345. * For a headless page, its handle is its pointer with the extra
  346. * PAGE_HEADLESS bit set
  347. */
  348. if (bud == HEADLESS)
  349. return h | (1 << PAGE_HEADLESS);
  350. /* otherwise, return pointer to encoded handle */
  351. idx = __idx(zhdr, bud);
  352. h += idx;
  353. if (bud == LAST)
  354. h |= (zhdr->last_chunks << BUDDY_SHIFT);
  355. write_lock(&slots->lock);
  356. slots->slot[idx] = h;
  357. write_unlock(&slots->lock);
  358. return (unsigned long)&slots->slot[idx];
  359. }
  360. static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
  361. {
  362. return __encode_handle(zhdr, zhdr->slots, bud);
  363. }
  364. /* only for LAST bud, returns zero otherwise */
  365. static unsigned short handle_to_chunks(unsigned long handle)
  366. {
  367. struct z3fold_buddy_slots *slots = handle_to_slots(handle);
  368. unsigned long addr;
  369. read_lock(&slots->lock);
  370. addr = *(unsigned long *)handle;
  371. read_unlock(&slots->lock);
  372. return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
  373. }
  374. /*
  375. * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
  376. * but that doesn't matter. because the masking will result in the
  377. * correct buddy number.
  378. */
  379. static enum buddy handle_to_buddy(unsigned long handle)
  380. {
  381. struct z3fold_header *zhdr;
  382. struct z3fold_buddy_slots *slots = handle_to_slots(handle);
  383. unsigned long addr;
  384. read_lock(&slots->lock);
  385. WARN_ON(handle & (1 << PAGE_HEADLESS));
  386. addr = *(unsigned long *)handle;
  387. read_unlock(&slots->lock);
  388. zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
  389. return (addr - zhdr->first_num) & BUDDY_MASK;
  390. }
  391. static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
  392. {
  393. return zhdr->pool;
  394. }
  395. static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
  396. {
  397. struct page *page = virt_to_page(zhdr);
  398. struct z3fold_pool *pool = zhdr_to_pool(zhdr);
  399. WARN_ON(!list_empty(&zhdr->buddy));
  400. set_bit(PAGE_STALE, &page->private);
  401. clear_bit(NEEDS_COMPACTING, &page->private);
  402. spin_lock(&pool->lock);
  403. if (!list_empty(&page->lru))
  404. list_del_init(&page->lru);
  405. spin_unlock(&pool->lock);
  406. if (locked)
  407. z3fold_page_unlock(zhdr);
  408. spin_lock(&pool->stale_lock);
  409. list_add(&zhdr->buddy, &pool->stale);
  410. queue_work(pool->release_wq, &pool->work);
  411. spin_unlock(&pool->stale_lock);
  412. atomic64_dec(&pool->pages_nr);
  413. }
  414. static void release_z3fold_page_locked(struct kref *ref)
  415. {
  416. struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
  417. refcount);
  418. WARN_ON(z3fold_page_trylock(zhdr));
  419. __release_z3fold_page(zhdr, true);
  420. }
  421. static void release_z3fold_page_locked_list(struct kref *ref)
  422. {
  423. struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
  424. refcount);
  425. struct z3fold_pool *pool = zhdr_to_pool(zhdr);
  426. spin_lock(&pool->lock);
  427. list_del_init(&zhdr->buddy);
  428. spin_unlock(&pool->lock);
  429. WARN_ON(z3fold_page_trylock(zhdr));
  430. __release_z3fold_page(zhdr, true);
  431. }
  432. static void free_pages_work(struct work_struct *w)
  433. {
  434. struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
  435. spin_lock(&pool->stale_lock);
  436. while (!list_empty(&pool->stale)) {
  437. struct z3fold_header *zhdr = list_first_entry(&pool->stale,
  438. struct z3fold_header, buddy);
  439. struct page *page = virt_to_page(zhdr);
  440. list_del(&zhdr->buddy);
  441. if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
  442. continue;
  443. spin_unlock(&pool->stale_lock);
  444. cancel_work_sync(&zhdr->work);
  445. free_z3fold_page(page, false);
  446. cond_resched();
  447. spin_lock(&pool->stale_lock);
  448. }
  449. spin_unlock(&pool->stale_lock);
  450. }
  451. /*
  452. * Returns the number of free chunks in a z3fold page.
  453. * NB: can't be used with HEADLESS pages.
  454. */
  455. static int num_free_chunks(struct z3fold_header *zhdr)
  456. {
  457. int nfree;
  458. /*
  459. * If there is a middle object, pick up the bigger free space
  460. * either before or after it. Otherwise just subtract the number
  461. * of chunks occupied by the first and the last objects.
  462. */
  463. if (zhdr->middle_chunks != 0) {
  464. int nfree_before = zhdr->first_chunks ?
  465. 0 : zhdr->start_middle - ZHDR_CHUNKS;
  466. int nfree_after = zhdr->last_chunks ?
  467. 0 : TOTAL_CHUNKS -
  468. (zhdr->start_middle + zhdr->middle_chunks);
  469. nfree = max(nfree_before, nfree_after);
  470. } else
  471. nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
  472. return nfree;
  473. }
  474. /* Add to the appropriate unbuddied list */
  475. static inline void add_to_unbuddied(struct z3fold_pool *pool,
  476. struct z3fold_header *zhdr)
  477. {
  478. if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
  479. zhdr->middle_chunks == 0) {
  480. struct list_head *unbuddied;
  481. int freechunks = num_free_chunks(zhdr);
  482. migrate_disable();
  483. unbuddied = this_cpu_ptr(pool->unbuddied);
  484. spin_lock(&pool->lock);
  485. list_add(&zhdr->buddy, &unbuddied[freechunks]);
  486. spin_unlock(&pool->lock);
  487. zhdr->cpu = smp_processor_id();
  488. migrate_enable();
  489. }
  490. }
  491. static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
  492. {
  493. enum buddy bud = HEADLESS;
  494. if (zhdr->middle_chunks) {
  495. if (!zhdr->first_chunks &&
  496. chunks <= zhdr->start_middle - ZHDR_CHUNKS)
  497. bud = FIRST;
  498. else if (!zhdr->last_chunks)
  499. bud = LAST;
  500. } else {
  501. if (!zhdr->first_chunks)
  502. bud = FIRST;
  503. else if (!zhdr->last_chunks)
  504. bud = LAST;
  505. else
  506. bud = MIDDLE;
  507. }
  508. return bud;
  509. }
  510. static inline void *mchunk_memmove(struct z3fold_header *zhdr,
  511. unsigned short dst_chunk)
  512. {
  513. void *beg = zhdr;
  514. return memmove(beg + (dst_chunk << CHUNK_SHIFT),
  515. beg + (zhdr->start_middle << CHUNK_SHIFT),
  516. zhdr->middle_chunks << CHUNK_SHIFT);
  517. }
  518. static inline bool buddy_single(struct z3fold_header *zhdr)
  519. {
  520. return !((zhdr->first_chunks && zhdr->middle_chunks) ||
  521. (zhdr->first_chunks && zhdr->last_chunks) ||
  522. (zhdr->middle_chunks && zhdr->last_chunks));
  523. }
  524. static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
  525. {
  526. struct z3fold_pool *pool = zhdr_to_pool(zhdr);
  527. void *p = zhdr;
  528. unsigned long old_handle = 0;
  529. size_t sz = 0;
  530. struct z3fold_header *new_zhdr = NULL;
  531. int first_idx = __idx(zhdr, FIRST);
  532. int middle_idx = __idx(zhdr, MIDDLE);
  533. int last_idx = __idx(zhdr, LAST);
  534. unsigned short *moved_chunks = NULL;
  535. /*
  536. * No need to protect slots here -- all the slots are "local" and
  537. * the page lock is already taken
  538. */
  539. if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
  540. p += ZHDR_SIZE_ALIGNED;
  541. sz = zhdr->first_chunks << CHUNK_SHIFT;
  542. old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
  543. moved_chunks = &zhdr->first_chunks;
  544. } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
  545. p += zhdr->start_middle << CHUNK_SHIFT;
  546. sz = zhdr->middle_chunks << CHUNK_SHIFT;
  547. old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
  548. moved_chunks = &zhdr->middle_chunks;
  549. } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
  550. p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
  551. sz = zhdr->last_chunks << CHUNK_SHIFT;
  552. old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
  553. moved_chunks = &zhdr->last_chunks;
  554. }
  555. if (sz > 0) {
  556. enum buddy new_bud = HEADLESS;
  557. short chunks = size_to_chunks(sz);
  558. void *q;
  559. new_zhdr = __z3fold_alloc(pool, sz, false);
  560. if (!new_zhdr)
  561. return NULL;
  562. if (WARN_ON(new_zhdr == zhdr))
  563. goto out_fail;
  564. new_bud = get_free_buddy(new_zhdr, chunks);
  565. q = new_zhdr;
  566. switch (new_bud) {
  567. case FIRST:
  568. new_zhdr->first_chunks = chunks;
  569. q += ZHDR_SIZE_ALIGNED;
  570. break;
  571. case MIDDLE:
  572. new_zhdr->middle_chunks = chunks;
  573. new_zhdr->start_middle =
  574. new_zhdr->first_chunks + ZHDR_CHUNKS;
  575. q += new_zhdr->start_middle << CHUNK_SHIFT;
  576. break;
  577. case LAST:
  578. new_zhdr->last_chunks = chunks;
  579. q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
  580. break;
  581. default:
  582. goto out_fail;
  583. }
  584. new_zhdr->foreign_handles++;
  585. memcpy(q, p, sz);
  586. write_lock(&zhdr->slots->lock);
  587. *(unsigned long *)old_handle = (unsigned long)new_zhdr +
  588. __idx(new_zhdr, new_bud);
  589. if (new_bud == LAST)
  590. *(unsigned long *)old_handle |=
  591. (new_zhdr->last_chunks << BUDDY_SHIFT);
  592. write_unlock(&zhdr->slots->lock);
  593. add_to_unbuddied(pool, new_zhdr);
  594. z3fold_page_unlock(new_zhdr);
  595. *moved_chunks = 0;
  596. }
  597. return new_zhdr;
  598. out_fail:
  599. if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) {
  600. add_to_unbuddied(pool, new_zhdr);
  601. z3fold_page_unlock(new_zhdr);
  602. }
  603. return NULL;
  604. }
  605. #define BIG_CHUNK_GAP 3
  606. /* Has to be called with lock held */
  607. static int z3fold_compact_page(struct z3fold_header *zhdr)
  608. {
  609. struct page *page = virt_to_page(zhdr);
  610. if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
  611. return 0; /* can't move middle chunk, it's used */
  612. if (unlikely(PageIsolated(page)))
  613. return 0;
  614. if (zhdr->middle_chunks == 0)
  615. return 0; /* nothing to compact */
  616. if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
  617. /* move to the beginning */
  618. mchunk_memmove(zhdr, ZHDR_CHUNKS);
  619. zhdr->first_chunks = zhdr->middle_chunks;
  620. zhdr->middle_chunks = 0;
  621. zhdr->start_middle = 0;
  622. zhdr->first_num++;
  623. return 1;
  624. }
  625. /*
  626. * moving data is expensive, so let's only do that if
  627. * there's substantial gain (at least BIG_CHUNK_GAP chunks)
  628. */
  629. if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
  630. zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
  631. BIG_CHUNK_GAP) {
  632. mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
  633. zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
  634. return 1;
  635. } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
  636. TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
  637. + zhdr->middle_chunks) >=
  638. BIG_CHUNK_GAP) {
  639. unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
  640. zhdr->middle_chunks;
  641. mchunk_memmove(zhdr, new_start);
  642. zhdr->start_middle = new_start;
  643. return 1;
  644. }
  645. return 0;
  646. }
  647. static void do_compact_page(struct z3fold_header *zhdr, bool locked)
  648. {
  649. struct z3fold_pool *pool = zhdr_to_pool(zhdr);
  650. struct page *page;
  651. page = virt_to_page(zhdr);
  652. if (locked)
  653. WARN_ON(z3fold_page_trylock(zhdr));
  654. else
  655. z3fold_page_lock(zhdr);
  656. if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
  657. z3fold_page_unlock(zhdr);
  658. return;
  659. }
  660. spin_lock(&pool->lock);
  661. list_del_init(&zhdr->buddy);
  662. spin_unlock(&pool->lock);
  663. if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
  664. return;
  665. if (test_bit(PAGE_STALE, &page->private) ||
  666. test_and_set_bit(PAGE_CLAIMED, &page->private)) {
  667. z3fold_page_unlock(zhdr);
  668. return;
  669. }
  670. if (!zhdr->foreign_handles && buddy_single(zhdr) &&
  671. zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
  672. if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
  673. clear_bit(PAGE_CLAIMED, &page->private);
  674. z3fold_page_unlock(zhdr);
  675. }
  676. return;
  677. }
  678. z3fold_compact_page(zhdr);
  679. add_to_unbuddied(pool, zhdr);
  680. clear_bit(PAGE_CLAIMED, &page->private);
  681. z3fold_page_unlock(zhdr);
  682. }
  683. static void compact_page_work(struct work_struct *w)
  684. {
  685. struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
  686. work);
  687. do_compact_page(zhdr, false);
  688. }
  689. /* returns _locked_ z3fold page header or NULL */
  690. static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
  691. size_t size, bool can_sleep)
  692. {
  693. struct z3fold_header *zhdr = NULL;
  694. struct page *page;
  695. struct list_head *unbuddied;
  696. int chunks = size_to_chunks(size), i;
  697. lookup:
  698. migrate_disable();
  699. /* First, try to find an unbuddied z3fold page. */
  700. unbuddied = this_cpu_ptr(pool->unbuddied);
  701. for_each_unbuddied_list(i, chunks) {
  702. struct list_head *l = &unbuddied[i];
  703. zhdr = list_first_entry_or_null(READ_ONCE(l),
  704. struct z3fold_header, buddy);
  705. if (!zhdr)
  706. continue;
  707. /* Re-check under lock. */
  708. spin_lock(&pool->lock);
  709. if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
  710. struct z3fold_header, buddy)) ||
  711. !z3fold_page_trylock(zhdr)) {
  712. spin_unlock(&pool->lock);
  713. zhdr = NULL;
  714. migrate_enable();
  715. if (can_sleep)
  716. cond_resched();
  717. goto lookup;
  718. }
  719. list_del_init(&zhdr->buddy);
  720. zhdr->cpu = -1;
  721. spin_unlock(&pool->lock);
  722. page = virt_to_page(zhdr);
  723. if (test_bit(NEEDS_COMPACTING, &page->private) ||
  724. test_bit(PAGE_CLAIMED, &page->private)) {
  725. z3fold_page_unlock(zhdr);
  726. zhdr = NULL;
  727. migrate_enable();
  728. if (can_sleep)
  729. cond_resched();
  730. goto lookup;
  731. }
  732. /*
  733. * this page could not be removed from its unbuddied
  734. * list while pool lock was held, and then we've taken
  735. * page lock so kref_put could not be called before
  736. * we got here, so it's safe to just call kref_get()
  737. */
  738. kref_get(&zhdr->refcount);
  739. break;
  740. }
  741. migrate_enable();
  742. if (!zhdr) {
  743. int cpu;
  744. /* look for _exact_ match on other cpus' lists */
  745. for_each_online_cpu(cpu) {
  746. struct list_head *l;
  747. unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
  748. spin_lock(&pool->lock);
  749. l = &unbuddied[chunks];
  750. zhdr = list_first_entry_or_null(READ_ONCE(l),
  751. struct z3fold_header, buddy);
  752. if (!zhdr || !z3fold_page_trylock(zhdr)) {
  753. spin_unlock(&pool->lock);
  754. zhdr = NULL;
  755. continue;
  756. }
  757. list_del_init(&zhdr->buddy);
  758. zhdr->cpu = -1;
  759. spin_unlock(&pool->lock);
  760. page = virt_to_page(zhdr);
  761. if (test_bit(NEEDS_COMPACTING, &page->private) ||
  762. test_bit(PAGE_CLAIMED, &page->private)) {
  763. z3fold_page_unlock(zhdr);
  764. zhdr = NULL;
  765. if (can_sleep)
  766. cond_resched();
  767. continue;
  768. }
  769. kref_get(&zhdr->refcount);
  770. break;
  771. }
  772. }
  773. if (zhdr && !zhdr->slots) {
  774. zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
  775. if (!zhdr->slots)
  776. goto out_fail;
  777. }
  778. return zhdr;
  779. out_fail:
  780. if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
  781. add_to_unbuddied(pool, zhdr);
  782. z3fold_page_unlock(zhdr);
  783. }
  784. return NULL;
  785. }
  786. /*
  787. * API Functions
  788. */
  789. /**
  790. * z3fold_create_pool() - create a new z3fold pool
  791. * @name: pool name
  792. * @gfp: gfp flags when allocating the z3fold pool structure
  793. * @ops: user-defined operations for the z3fold pool
  794. *
  795. * Return: pointer to the new z3fold pool or NULL if the metadata allocation
  796. * failed.
  797. */
  798. static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
  799. const struct z3fold_ops *ops)
  800. {
  801. struct z3fold_pool *pool = NULL;
  802. int i, cpu;
  803. pool = kzalloc(sizeof(struct z3fold_pool), gfp);
  804. if (!pool)
  805. goto out;
  806. pool->c_handle = kmem_cache_create("z3fold_handle",
  807. sizeof(struct z3fold_buddy_slots),
  808. SLOTS_ALIGN, 0, NULL);
  809. if (!pool->c_handle)
  810. goto out_c;
  811. spin_lock_init(&pool->lock);
  812. spin_lock_init(&pool->stale_lock);
  813. pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
  814. __alignof__(struct list_head));
  815. if (!pool->unbuddied)
  816. goto out_pool;
  817. for_each_possible_cpu(cpu) {
  818. struct list_head *unbuddied =
  819. per_cpu_ptr(pool->unbuddied, cpu);
  820. for_each_unbuddied_list(i, 0)
  821. INIT_LIST_HEAD(&unbuddied[i]);
  822. }
  823. INIT_LIST_HEAD(&pool->lru);
  824. INIT_LIST_HEAD(&pool->stale);
  825. atomic64_set(&pool->pages_nr, 0);
  826. pool->name = name;
  827. pool->compact_wq = create_singlethread_workqueue(pool->name);
  828. if (!pool->compact_wq)
  829. goto out_unbuddied;
  830. pool->release_wq = create_singlethread_workqueue(pool->name);
  831. if (!pool->release_wq)
  832. goto out_wq;
  833. INIT_WORK(&pool->work, free_pages_work);
  834. pool->ops = ops;
  835. return pool;
  836. out_wq:
  837. destroy_workqueue(pool->compact_wq);
  838. out_unbuddied:
  839. free_percpu(pool->unbuddied);
  840. out_pool:
  841. kmem_cache_destroy(pool->c_handle);
  842. out_c:
  843. kfree(pool);
  844. out:
  845. return NULL;
  846. }
  847. /**
  848. * z3fold_destroy_pool() - destroys an existing z3fold pool
  849. * @pool: the z3fold pool to be destroyed
  850. *
  851. * The pool should be emptied before this function is called.
  852. */
  853. static void z3fold_destroy_pool(struct z3fold_pool *pool)
  854. {
  855. kmem_cache_destroy(pool->c_handle);
  856. /*
  857. * We need to destroy pool->compact_wq before pool->release_wq,
  858. * as any pending work on pool->compact_wq will call
  859. * queue_work(pool->release_wq, &pool->work).
  860. *
  861. * There are still outstanding pages until both workqueues are drained,
  862. * so we cannot unregister migration until then.
  863. */
  864. destroy_workqueue(pool->compact_wq);
  865. destroy_workqueue(pool->release_wq);
  866. free_percpu(pool->unbuddied);
  867. kfree(pool);
  868. }
  869. static const struct movable_operations z3fold_mops;
  870. /**
  871. * z3fold_alloc() - allocates a region of a given size
  872. * @pool: z3fold pool from which to allocate
  873. * @size: size in bytes of the desired allocation
  874. * @gfp: gfp flags used if the pool needs to grow
  875. * @handle: handle of the new allocation
  876. *
  877. * This function will attempt to find a free region in the pool large enough to
  878. * satisfy the allocation request. A search of the unbuddied lists is
  879. * performed first. If no suitable free region is found, then a new page is
  880. * allocated and added to the pool to satisfy the request.
  881. *
  882. * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
  883. * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
  884. * a new page.
  885. */
  886. static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
  887. unsigned long *handle)
  888. {
  889. int chunks = size_to_chunks(size);
  890. struct z3fold_header *zhdr = NULL;
  891. struct page *page = NULL;
  892. enum buddy bud;
  893. bool can_sleep = gfpflags_allow_blocking(gfp);
  894. if (!size || (gfp & __GFP_HIGHMEM))
  895. return -EINVAL;
  896. if (size > PAGE_SIZE)
  897. return -ENOSPC;
  898. if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
  899. bud = HEADLESS;
  900. else {
  901. retry:
  902. zhdr = __z3fold_alloc(pool, size, can_sleep);
  903. if (zhdr) {
  904. bud = get_free_buddy(zhdr, chunks);
  905. if (bud == HEADLESS) {
  906. if (!kref_put(&zhdr->refcount,
  907. release_z3fold_page_locked))
  908. z3fold_page_unlock(zhdr);
  909. pr_err("No free chunks in unbuddied\n");
  910. WARN_ON(1);
  911. goto retry;
  912. }
  913. page = virt_to_page(zhdr);
  914. goto found;
  915. }
  916. bud = FIRST;
  917. }
  918. page = alloc_page(gfp);
  919. if (!page)
  920. return -ENOMEM;
  921. zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
  922. if (!zhdr) {
  923. __free_page(page);
  924. return -ENOMEM;
  925. }
  926. atomic64_inc(&pool->pages_nr);
  927. if (bud == HEADLESS) {
  928. set_bit(PAGE_HEADLESS, &page->private);
  929. goto headless;
  930. }
  931. if (can_sleep) {
  932. lock_page(page);
  933. __SetPageMovable(page, &z3fold_mops);
  934. unlock_page(page);
  935. } else {
  936. WARN_ON(!trylock_page(page));
  937. __SetPageMovable(page, &z3fold_mops);
  938. unlock_page(page);
  939. }
  940. z3fold_page_lock(zhdr);
  941. found:
  942. if (bud == FIRST)
  943. zhdr->first_chunks = chunks;
  944. else if (bud == LAST)
  945. zhdr->last_chunks = chunks;
  946. else {
  947. zhdr->middle_chunks = chunks;
  948. zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
  949. }
  950. add_to_unbuddied(pool, zhdr);
  951. headless:
  952. spin_lock(&pool->lock);
  953. /* Add/move z3fold page to beginning of LRU */
  954. if (!list_empty(&page->lru))
  955. list_del(&page->lru);
  956. list_add(&page->lru, &pool->lru);
  957. *handle = encode_handle(zhdr, bud);
  958. spin_unlock(&pool->lock);
  959. if (bud != HEADLESS)
  960. z3fold_page_unlock(zhdr);
  961. return 0;
  962. }
  963. /**
  964. * z3fold_free() - frees the allocation associated with the given handle
  965. * @pool: pool in which the allocation resided
  966. * @handle: handle associated with the allocation returned by z3fold_alloc()
  967. *
  968. * In the case that the z3fold page in which the allocation resides is under
  969. * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
  970. * only sets the first|middle|last_chunks to 0. The page is actually freed
  971. * once all buddies are evicted (see z3fold_reclaim_page() below).
  972. */
  973. static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
  974. {
  975. struct z3fold_header *zhdr;
  976. struct page *page;
  977. enum buddy bud;
  978. bool page_claimed;
  979. zhdr = get_z3fold_header(handle);
  980. page = virt_to_page(zhdr);
  981. page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
  982. if (test_bit(PAGE_HEADLESS, &page->private)) {
  983. /* if a headless page is under reclaim, just leave.
  984. * NB: we use test_and_set_bit for a reason: if the bit
  985. * has not been set before, we release this page
  986. * immediately so we don't care about its value any more.
  987. */
  988. if (!page_claimed) {
  989. spin_lock(&pool->lock);
  990. list_del(&page->lru);
  991. spin_unlock(&pool->lock);
  992. put_z3fold_header(zhdr);
  993. free_z3fold_page(page, true);
  994. atomic64_dec(&pool->pages_nr);
  995. }
  996. return;
  997. }
  998. /* Non-headless case */
  999. bud = handle_to_buddy(handle);
  1000. switch (bud) {
  1001. case FIRST:
  1002. zhdr->first_chunks = 0;
  1003. break;
  1004. case MIDDLE:
  1005. zhdr->middle_chunks = 0;
  1006. break;
  1007. case LAST:
  1008. zhdr->last_chunks = 0;
  1009. break;
  1010. default:
  1011. pr_err("%s: unknown bud %d\n", __func__, bud);
  1012. WARN_ON(1);
  1013. put_z3fold_header(zhdr);
  1014. return;
  1015. }
  1016. if (!page_claimed)
  1017. free_handle(handle, zhdr);
  1018. if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))
  1019. return;
  1020. if (page_claimed) {
  1021. /* the page has not been claimed by us */
  1022. put_z3fold_header(zhdr);
  1023. return;
  1024. }
  1025. if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
  1026. clear_bit(PAGE_CLAIMED, &page->private);
  1027. put_z3fold_header(zhdr);
  1028. return;
  1029. }
  1030. if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
  1031. zhdr->cpu = -1;
  1032. kref_get(&zhdr->refcount);
  1033. clear_bit(PAGE_CLAIMED, &page->private);
  1034. do_compact_page(zhdr, true);
  1035. return;
  1036. }
  1037. kref_get(&zhdr->refcount);
  1038. clear_bit(PAGE_CLAIMED, &page->private);
  1039. queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
  1040. put_z3fold_header(zhdr);
  1041. }
  1042. /**
  1043. * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
  1044. * @pool: pool from which a page will attempt to be evicted
  1045. * @retries: number of pages on the LRU list for which eviction will
  1046. * be attempted before failing
  1047. *
  1048. * z3fold reclaim is different from normal system reclaim in that it is done
  1049. * from the bottom, up. This is because only the bottom layer, z3fold, has
  1050. * information on how the allocations are organized within each z3fold page.
  1051. * This has the potential to create interesting locking situations between
  1052. * z3fold and the user, however.
  1053. *
  1054. * To avoid these, this is how z3fold_reclaim_page() should be called:
  1055. *
  1056. * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
  1057. * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
  1058. * call the user-defined eviction handler with the pool and handle as
  1059. * arguments.
  1060. *
  1061. * If the handle can not be evicted, the eviction handler should return
  1062. * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
  1063. * appropriate list and try the next z3fold page on the LRU up to
  1064. * a user defined number of retries.
  1065. *
  1066. * If the handle is successfully evicted, the eviction handler should
  1067. * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
  1068. * contains logic to delay freeing the page if the page is under reclaim,
  1069. * as indicated by the setting of the PG_reclaim flag on the underlying page.
  1070. *
  1071. * If all buddies in the z3fold page are successfully evicted, then the
  1072. * z3fold page can be freed.
  1073. *
  1074. * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
  1075. * no pages to evict or an eviction handler is not registered, -EAGAIN if
  1076. * the retry limit was hit.
  1077. */
  1078. static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
  1079. {
  1080. int i, ret = -1;
  1081. struct z3fold_header *zhdr = NULL;
  1082. struct page *page = NULL;
  1083. struct list_head *pos;
  1084. unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
  1085. struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
  1086. rwlock_init(&slots.lock);
  1087. slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
  1088. spin_lock(&pool->lock);
  1089. if (!pool->ops || !pool->ops->evict || retries == 0) {
  1090. spin_unlock(&pool->lock);
  1091. return -EINVAL;
  1092. }
  1093. for (i = 0; i < retries; i++) {
  1094. if (list_empty(&pool->lru)) {
  1095. spin_unlock(&pool->lock);
  1096. return -EINVAL;
  1097. }
  1098. list_for_each_prev(pos, &pool->lru) {
  1099. page = list_entry(pos, struct page, lru);
  1100. zhdr = page_address(page);
  1101. if (test_bit(PAGE_HEADLESS, &page->private)) {
  1102. /*
  1103. * For non-headless pages, we wait to do this
  1104. * until we have the page lock to avoid racing
  1105. * with __z3fold_alloc(). Headless pages don't
  1106. * have a lock (and __z3fold_alloc() will never
  1107. * see them), but we still need to test and set
  1108. * PAGE_CLAIMED to avoid racing with
  1109. * z3fold_free(), so just do it now before
  1110. * leaving the loop.
  1111. */
  1112. if (test_and_set_bit(PAGE_CLAIMED, &page->private))
  1113. continue;
  1114. break;
  1115. }
  1116. if (!z3fold_page_trylock(zhdr)) {
  1117. zhdr = NULL;
  1118. continue; /* can't evict at this point */
  1119. }
  1120. /* test_and_set_bit is of course atomic, but we still
  1121. * need to do it under page lock, otherwise checking
  1122. * that bit in __z3fold_alloc wouldn't make sense
  1123. */
  1124. if (zhdr->foreign_handles ||
  1125. test_and_set_bit(PAGE_CLAIMED, &page->private)) {
  1126. z3fold_page_unlock(zhdr);
  1127. zhdr = NULL;
  1128. continue; /* can't evict such page */
  1129. }
  1130. list_del_init(&zhdr->buddy);
  1131. zhdr->cpu = -1;
  1132. /* See comment in __z3fold_alloc. */
  1133. kref_get(&zhdr->refcount);
  1134. break;
  1135. }
  1136. if (!zhdr)
  1137. break;
  1138. list_del_init(&page->lru);
  1139. spin_unlock(&pool->lock);
  1140. if (!test_bit(PAGE_HEADLESS, &page->private)) {
  1141. /*
  1142. * We need encode the handles before unlocking, and
  1143. * use our local slots structure because z3fold_free
  1144. * can zero out zhdr->slots and we can't do much
  1145. * about that
  1146. */
  1147. first_handle = 0;
  1148. last_handle = 0;
  1149. middle_handle = 0;
  1150. memset(slots.slot, 0, sizeof(slots.slot));
  1151. if (zhdr->first_chunks)
  1152. first_handle = __encode_handle(zhdr, &slots,
  1153. FIRST);
  1154. if (zhdr->middle_chunks)
  1155. middle_handle = __encode_handle(zhdr, &slots,
  1156. MIDDLE);
  1157. if (zhdr->last_chunks)
  1158. last_handle = __encode_handle(zhdr, &slots,
  1159. LAST);
  1160. /*
  1161. * it's safe to unlock here because we hold a
  1162. * reference to this page
  1163. */
  1164. z3fold_page_unlock(zhdr);
  1165. } else {
  1166. first_handle = encode_handle(zhdr, HEADLESS);
  1167. last_handle = middle_handle = 0;
  1168. }
  1169. /* Issue the eviction callback(s) */
  1170. if (middle_handle) {
  1171. ret = pool->ops->evict(pool, middle_handle);
  1172. if (ret)
  1173. goto next;
  1174. }
  1175. if (first_handle) {
  1176. ret = pool->ops->evict(pool, first_handle);
  1177. if (ret)
  1178. goto next;
  1179. }
  1180. if (last_handle) {
  1181. ret = pool->ops->evict(pool, last_handle);
  1182. if (ret)
  1183. goto next;
  1184. }
  1185. next:
  1186. if (test_bit(PAGE_HEADLESS, &page->private)) {
  1187. if (ret == 0) {
  1188. free_z3fold_page(page, true);
  1189. atomic64_dec(&pool->pages_nr);
  1190. return 0;
  1191. }
  1192. spin_lock(&pool->lock);
  1193. list_add(&page->lru, &pool->lru);
  1194. spin_unlock(&pool->lock);
  1195. clear_bit(PAGE_CLAIMED, &page->private);
  1196. } else {
  1197. struct z3fold_buddy_slots *slots = zhdr->slots;
  1198. z3fold_page_lock(zhdr);
  1199. if (kref_put(&zhdr->refcount,
  1200. release_z3fold_page_locked)) {
  1201. kmem_cache_free(pool->c_handle, slots);
  1202. return 0;
  1203. }
  1204. /*
  1205. * if we are here, the page is still not completely
  1206. * free. Take the global pool lock then to be able
  1207. * to add it back to the lru list
  1208. */
  1209. spin_lock(&pool->lock);
  1210. list_add(&page->lru, &pool->lru);
  1211. spin_unlock(&pool->lock);
  1212. if (list_empty(&zhdr->buddy))
  1213. add_to_unbuddied(pool, zhdr);
  1214. clear_bit(PAGE_CLAIMED, &page->private);
  1215. z3fold_page_unlock(zhdr);
  1216. }
  1217. /* We started off locked to we need to lock the pool back */
  1218. spin_lock(&pool->lock);
  1219. }
  1220. spin_unlock(&pool->lock);
  1221. return -EAGAIN;
  1222. }
  1223. /**
  1224. * z3fold_map() - maps the allocation associated with the given handle
  1225. * @pool: pool in which the allocation resides
  1226. * @handle: handle associated with the allocation to be mapped
  1227. *
  1228. * Extracts the buddy number from handle and constructs the pointer to the
  1229. * correct starting chunk within the page.
  1230. *
  1231. * Returns: a pointer to the mapped allocation
  1232. */
  1233. static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
  1234. {
  1235. struct z3fold_header *zhdr;
  1236. struct page *page;
  1237. void *addr;
  1238. enum buddy buddy;
  1239. zhdr = get_z3fold_header(handle);
  1240. addr = zhdr;
  1241. page = virt_to_page(zhdr);
  1242. if (test_bit(PAGE_HEADLESS, &page->private))
  1243. goto out;
  1244. buddy = handle_to_buddy(handle);
  1245. switch (buddy) {
  1246. case FIRST:
  1247. addr += ZHDR_SIZE_ALIGNED;
  1248. break;
  1249. case MIDDLE:
  1250. addr += zhdr->start_middle << CHUNK_SHIFT;
  1251. set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  1252. break;
  1253. case LAST:
  1254. addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
  1255. break;
  1256. default:
  1257. pr_err("unknown buddy id %d\n", buddy);
  1258. WARN_ON(1);
  1259. addr = NULL;
  1260. break;
  1261. }
  1262. if (addr)
  1263. zhdr->mapped_count++;
  1264. out:
  1265. put_z3fold_header(zhdr);
  1266. return addr;
  1267. }
  1268. /**
  1269. * z3fold_unmap() - unmaps the allocation associated with the given handle
  1270. * @pool: pool in which the allocation resides
  1271. * @handle: handle associated with the allocation to be unmapped
  1272. */
  1273. static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
  1274. {
  1275. struct z3fold_header *zhdr;
  1276. struct page *page;
  1277. enum buddy buddy;
  1278. zhdr = get_z3fold_header(handle);
  1279. page = virt_to_page(zhdr);
  1280. if (test_bit(PAGE_HEADLESS, &page->private))
  1281. return;
  1282. buddy = handle_to_buddy(handle);
  1283. if (buddy == MIDDLE)
  1284. clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  1285. zhdr->mapped_count--;
  1286. put_z3fold_header(zhdr);
  1287. }
  1288. /**
  1289. * z3fold_get_pool_size() - gets the z3fold pool size in pages
  1290. * @pool: pool whose size is being queried
  1291. *
  1292. * Returns: size in pages of the given pool.
  1293. */
  1294. static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
  1295. {
  1296. return atomic64_read(&pool->pages_nr);
  1297. }
  1298. static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
  1299. {
  1300. struct z3fold_header *zhdr;
  1301. struct z3fold_pool *pool;
  1302. VM_BUG_ON_PAGE(!PageMovable(page), page);
  1303. VM_BUG_ON_PAGE(PageIsolated(page), page);
  1304. if (test_bit(PAGE_HEADLESS, &page->private))
  1305. return false;
  1306. zhdr = page_address(page);
  1307. z3fold_page_lock(zhdr);
  1308. if (test_bit(NEEDS_COMPACTING, &page->private) ||
  1309. test_bit(PAGE_STALE, &page->private))
  1310. goto out;
  1311. if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
  1312. goto out;
  1313. if (test_and_set_bit(PAGE_CLAIMED, &page->private))
  1314. goto out;
  1315. pool = zhdr_to_pool(zhdr);
  1316. spin_lock(&pool->lock);
  1317. if (!list_empty(&zhdr->buddy))
  1318. list_del_init(&zhdr->buddy);
  1319. if (!list_empty(&page->lru))
  1320. list_del_init(&page->lru);
  1321. spin_unlock(&pool->lock);
  1322. kref_get(&zhdr->refcount);
  1323. z3fold_page_unlock(zhdr);
  1324. return true;
  1325. out:
  1326. z3fold_page_unlock(zhdr);
  1327. return false;
  1328. }
  1329. static int z3fold_page_migrate(struct page *newpage, struct page *page,
  1330. enum migrate_mode mode)
  1331. {
  1332. struct z3fold_header *zhdr, *new_zhdr;
  1333. struct z3fold_pool *pool;
  1334. VM_BUG_ON_PAGE(!PageMovable(page), page);
  1335. VM_BUG_ON_PAGE(!PageIsolated(page), page);
  1336. VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
  1337. VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
  1338. zhdr = page_address(page);
  1339. pool = zhdr_to_pool(zhdr);
  1340. if (!z3fold_page_trylock(zhdr))
  1341. return -EAGAIN;
  1342. if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
  1343. clear_bit(PAGE_CLAIMED, &page->private);
  1344. z3fold_page_unlock(zhdr);
  1345. return -EBUSY;
  1346. }
  1347. if (work_pending(&zhdr->work)) {
  1348. z3fold_page_unlock(zhdr);
  1349. return -EAGAIN;
  1350. }
  1351. new_zhdr = page_address(newpage);
  1352. memcpy(new_zhdr, zhdr, PAGE_SIZE);
  1353. newpage->private = page->private;
  1354. set_bit(PAGE_MIGRATED, &page->private);
  1355. z3fold_page_unlock(zhdr);
  1356. spin_lock_init(&new_zhdr->page_lock);
  1357. INIT_WORK(&new_zhdr->work, compact_page_work);
  1358. /*
  1359. * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
  1360. * so we only have to reinitialize it.
  1361. */
  1362. INIT_LIST_HEAD(&new_zhdr->buddy);
  1363. __ClearPageMovable(page);
  1364. get_page(newpage);
  1365. z3fold_page_lock(new_zhdr);
  1366. if (new_zhdr->first_chunks)
  1367. encode_handle(new_zhdr, FIRST);
  1368. if (new_zhdr->last_chunks)
  1369. encode_handle(new_zhdr, LAST);
  1370. if (new_zhdr->middle_chunks)
  1371. encode_handle(new_zhdr, MIDDLE);
  1372. set_bit(NEEDS_COMPACTING, &newpage->private);
  1373. new_zhdr->cpu = smp_processor_id();
  1374. spin_lock(&pool->lock);
  1375. list_add(&newpage->lru, &pool->lru);
  1376. spin_unlock(&pool->lock);
  1377. __SetPageMovable(newpage, &z3fold_mops);
  1378. z3fold_page_unlock(new_zhdr);
  1379. queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
  1380. /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */
  1381. page->private = 0;
  1382. put_page(page);
  1383. return 0;
  1384. }
  1385. static void z3fold_page_putback(struct page *page)
  1386. {
  1387. struct z3fold_header *zhdr;
  1388. struct z3fold_pool *pool;
  1389. zhdr = page_address(page);
  1390. pool = zhdr_to_pool(zhdr);
  1391. z3fold_page_lock(zhdr);
  1392. if (!list_empty(&zhdr->buddy))
  1393. list_del_init(&zhdr->buddy);
  1394. INIT_LIST_HEAD(&page->lru);
  1395. if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
  1396. return;
  1397. spin_lock(&pool->lock);
  1398. list_add(&page->lru, &pool->lru);
  1399. spin_unlock(&pool->lock);
  1400. if (list_empty(&zhdr->buddy))
  1401. add_to_unbuddied(pool, zhdr);
  1402. clear_bit(PAGE_CLAIMED, &page->private);
  1403. z3fold_page_unlock(zhdr);
  1404. }
  1405. static const struct movable_operations z3fold_mops = {
  1406. .isolate_page = z3fold_page_isolate,
  1407. .migrate_page = z3fold_page_migrate,
  1408. .putback_page = z3fold_page_putback,
  1409. };
  1410. /*****************
  1411. * zpool
  1412. ****************/
  1413. static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
  1414. {
  1415. if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
  1416. return pool->zpool_ops->evict(pool->zpool, handle);
  1417. else
  1418. return -ENOENT;
  1419. }
  1420. static const struct z3fold_ops z3fold_zpool_ops = {
  1421. .evict = z3fold_zpool_evict
  1422. };
  1423. static void *z3fold_zpool_create(const char *name, gfp_t gfp,
  1424. const struct zpool_ops *zpool_ops,
  1425. struct zpool *zpool)
  1426. {
  1427. struct z3fold_pool *pool;
  1428. pool = z3fold_create_pool(name, gfp,
  1429. zpool_ops ? &z3fold_zpool_ops : NULL);
  1430. if (pool) {
  1431. pool->zpool = zpool;
  1432. pool->zpool_ops = zpool_ops;
  1433. }
  1434. return pool;
  1435. }
  1436. static void z3fold_zpool_destroy(void *pool)
  1437. {
  1438. z3fold_destroy_pool(pool);
  1439. }
  1440. static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
  1441. unsigned long *handle)
  1442. {
  1443. return z3fold_alloc(pool, size, gfp, handle);
  1444. }
  1445. static void z3fold_zpool_free(void *pool, unsigned long handle)
  1446. {
  1447. z3fold_free(pool, handle);
  1448. }
  1449. static int z3fold_zpool_shrink(void *pool, unsigned int pages,
  1450. unsigned int *reclaimed)
  1451. {
  1452. unsigned int total = 0;
  1453. int ret = -EINVAL;
  1454. while (total < pages) {
  1455. ret = z3fold_reclaim_page(pool, 8);
  1456. if (ret < 0)
  1457. break;
  1458. total++;
  1459. }
  1460. if (reclaimed)
  1461. *reclaimed = total;
  1462. return ret;
  1463. }
  1464. static void *z3fold_zpool_map(void *pool, unsigned long handle,
  1465. enum zpool_mapmode mm)
  1466. {
  1467. return z3fold_map(pool, handle);
  1468. }
  1469. static void z3fold_zpool_unmap(void *pool, unsigned long handle)
  1470. {
  1471. z3fold_unmap(pool, handle);
  1472. }
  1473. static u64 z3fold_zpool_total_size(void *pool)
  1474. {
  1475. return z3fold_get_pool_size(pool) * PAGE_SIZE;
  1476. }
  1477. static struct zpool_driver z3fold_zpool_driver = {
  1478. .type = "z3fold",
  1479. .sleep_mapped = true,
  1480. .owner = THIS_MODULE,
  1481. .create = z3fold_zpool_create,
  1482. .destroy = z3fold_zpool_destroy,
  1483. .malloc = z3fold_zpool_malloc,
  1484. .free = z3fold_zpool_free,
  1485. .shrink = z3fold_zpool_shrink,
  1486. .map = z3fold_zpool_map,
  1487. .unmap = z3fold_zpool_unmap,
  1488. .total_size = z3fold_zpool_total_size,
  1489. };
  1490. MODULE_ALIAS("zpool-z3fold");
  1491. static int __init init_z3fold(void)
  1492. {
  1493. /*
  1494. * Make sure the z3fold header is not larger than the page size and
  1495. * there has remaining spaces for its buddy.
  1496. */
  1497. BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
  1498. zpool_register_driver(&z3fold_zpool_driver);
  1499. return 0;
  1500. }
  1501. static void __exit exit_z3fold(void)
  1502. {
  1503. zpool_unregister_driver(&z3fold_zpool_driver);
  1504. }
  1505. module_init(init_z3fold);
  1506. module_exit(exit_z3fold);
  1507. MODULE_LICENSE("GPL");
  1508. MODULE_AUTHOR("Vitaly Wool <[email protected]>");
  1509. MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");