dm-zoned-reclaim.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017 Western Digital Corporation or its affiliates.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-zoned.h"
  8. #include <linux/module.h>
  9. #define DM_MSG_PREFIX "zoned reclaim"
  10. struct dmz_reclaim {
  11. struct dmz_metadata *metadata;
  12. struct delayed_work work;
  13. struct workqueue_struct *wq;
  14. struct dm_kcopyd_client *kc;
  15. struct dm_kcopyd_throttle kc_throttle;
  16. int kc_err;
  17. int dev_idx;
  18. unsigned long flags;
  19. /* Last target access time */
  20. unsigned long atime;
  21. };
  22. /*
  23. * Reclaim state flags.
  24. */
  25. enum {
  26. DMZ_RECLAIM_KCOPY,
  27. };
  28. /*
  29. * Number of seconds of target BIO inactivity to consider the target idle.
  30. */
  31. #define DMZ_IDLE_PERIOD (10UL * HZ)
  32. /*
  33. * Percentage of unmapped (free) random zones below which reclaim starts
  34. * even if the target is busy.
  35. */
  36. #define DMZ_RECLAIM_LOW_UNMAP_ZONES 30
  37. /*
  38. * Percentage of unmapped (free) random zones above which reclaim will
  39. * stop if the target is busy.
  40. */
  41. #define DMZ_RECLAIM_HIGH_UNMAP_ZONES 50
  42. /*
  43. * Align a sequential zone write pointer to chunk_block.
  44. */
  45. static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
  46. sector_t block)
  47. {
  48. struct dmz_metadata *zmd = zrc->metadata;
  49. struct dmz_dev *dev = zone->dev;
  50. sector_t wp_block = zone->wp_block;
  51. unsigned int nr_blocks;
  52. int ret;
  53. if (wp_block == block)
  54. return 0;
  55. if (wp_block > block)
  56. return -EIO;
  57. /*
  58. * Zeroout the space between the write
  59. * pointer and the requested position.
  60. */
  61. nr_blocks = block - wp_block;
  62. ret = blkdev_issue_zeroout(dev->bdev,
  63. dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
  64. dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
  65. if (ret) {
  66. dmz_dev_err(dev,
  67. "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
  68. zone->id, (unsigned long long)wp_block,
  69. (unsigned long long)block, nr_blocks, ret);
  70. dmz_check_bdev(dev);
  71. return ret;
  72. }
  73. zone->wp_block = block;
  74. return 0;
  75. }
  76. /*
  77. * dm_kcopyd_copy end notification.
  78. */
  79. static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
  80. void *context)
  81. {
  82. struct dmz_reclaim *zrc = context;
  83. if (read_err || write_err)
  84. zrc->kc_err = -EIO;
  85. else
  86. zrc->kc_err = 0;
  87. clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
  88. smp_mb__after_atomic();
  89. wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
  90. }
  91. /*
  92. * Copy valid blocks of src_zone into dst_zone.
  93. */
  94. static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
  95. struct dm_zone *src_zone, struct dm_zone *dst_zone)
  96. {
  97. struct dmz_metadata *zmd = zrc->metadata;
  98. struct dm_io_region src, dst;
  99. sector_t block = 0, end_block;
  100. sector_t nr_blocks;
  101. sector_t src_zone_block;
  102. sector_t dst_zone_block;
  103. unsigned long flags = 0;
  104. int ret;
  105. if (dmz_is_seq(src_zone))
  106. end_block = src_zone->wp_block;
  107. else
  108. end_block = dmz_zone_nr_blocks(zmd);
  109. src_zone_block = dmz_start_block(zmd, src_zone);
  110. dst_zone_block = dmz_start_block(zmd, dst_zone);
  111. if (dmz_is_seq(dst_zone))
  112. flags |= BIT(DM_KCOPYD_WRITE_SEQ);
  113. while (block < end_block) {
  114. if (src_zone->dev->flags & DMZ_BDEV_DYING)
  115. return -EIO;
  116. if (dst_zone->dev->flags & DMZ_BDEV_DYING)
  117. return -EIO;
  118. if (dmz_reclaim_should_terminate(src_zone))
  119. return -EINTR;
  120. /* Get a valid region from the source zone */
  121. ret = dmz_first_valid_block(zmd, src_zone, &block);
  122. if (ret <= 0)
  123. return ret;
  124. nr_blocks = ret;
  125. /*
  126. * If we are writing in a sequential zone, we must make sure
  127. * that writes are sequential. So Zeroout any eventual hole
  128. * between writes.
  129. */
  130. if (dmz_is_seq(dst_zone)) {
  131. ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
  132. if (ret)
  133. return ret;
  134. }
  135. src.bdev = src_zone->dev->bdev;
  136. src.sector = dmz_blk2sect(src_zone_block + block);
  137. src.count = dmz_blk2sect(nr_blocks);
  138. dst.bdev = dst_zone->dev->bdev;
  139. dst.sector = dmz_blk2sect(dst_zone_block + block);
  140. dst.count = src.count;
  141. /* Copy the valid region */
  142. set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
  143. dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
  144. dmz_reclaim_kcopy_end, zrc);
  145. /* Wait for copy to complete */
  146. wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
  147. TASK_UNINTERRUPTIBLE);
  148. if (zrc->kc_err)
  149. return zrc->kc_err;
  150. block += nr_blocks;
  151. if (dmz_is_seq(dst_zone))
  152. dst_zone->wp_block = block;
  153. }
  154. return 0;
  155. }
  156. /*
  157. * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
  158. * and free the buffer zone.
  159. */
  160. static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  161. {
  162. struct dm_zone *bzone = dzone->bzone;
  163. sector_t chunk_block = dzone->wp_block;
  164. struct dmz_metadata *zmd = zrc->metadata;
  165. int ret;
  166. DMDEBUG("(%s/%u): Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
  167. dmz_metadata_label(zmd), zrc->dev_idx,
  168. dzone->chunk, bzone->id, dmz_weight(bzone),
  169. dzone->id, dmz_weight(dzone));
  170. /* Flush data zone into the buffer zone */
  171. ret = dmz_reclaim_copy(zrc, bzone, dzone);
  172. if (ret < 0)
  173. return ret;
  174. dmz_lock_flush(zmd);
  175. /* Validate copied blocks */
  176. ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
  177. if (ret == 0) {
  178. /* Free the buffer zone */
  179. dmz_invalidate_blocks(zmd, bzone, 0, dmz_zone_nr_blocks(zmd));
  180. dmz_lock_map(zmd);
  181. dmz_unmap_zone(zmd, bzone);
  182. dmz_unlock_zone_reclaim(dzone);
  183. dmz_free_zone(zmd, bzone);
  184. dmz_unlock_map(zmd);
  185. }
  186. dmz_unlock_flush(zmd);
  187. return ret;
  188. }
  189. /*
  190. * Merge valid blocks of dzone into its buffer zone and free dzone.
  191. */
  192. static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  193. {
  194. unsigned int chunk = dzone->chunk;
  195. struct dm_zone *bzone = dzone->bzone;
  196. struct dmz_metadata *zmd = zrc->metadata;
  197. int ret = 0;
  198. DMDEBUG("(%s/%u): Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
  199. dmz_metadata_label(zmd), zrc->dev_idx,
  200. chunk, dzone->id, dmz_weight(dzone),
  201. bzone->id, dmz_weight(bzone));
  202. /* Flush data zone into the buffer zone */
  203. ret = dmz_reclaim_copy(zrc, dzone, bzone);
  204. if (ret < 0)
  205. return ret;
  206. dmz_lock_flush(zmd);
  207. /* Validate copied blocks */
  208. ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
  209. if (ret == 0) {
  210. /*
  211. * Free the data zone and remap the chunk to
  212. * the buffer zone.
  213. */
  214. dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
  215. dmz_lock_map(zmd);
  216. dmz_unmap_zone(zmd, bzone);
  217. dmz_unmap_zone(zmd, dzone);
  218. dmz_unlock_zone_reclaim(dzone);
  219. dmz_free_zone(zmd, dzone);
  220. dmz_map_zone(zmd, bzone, chunk);
  221. dmz_unlock_map(zmd);
  222. }
  223. dmz_unlock_flush(zmd);
  224. return ret;
  225. }
  226. /*
  227. * Move valid blocks of the random data zone dzone into a free sequential zone.
  228. * Once blocks are moved, remap the zone chunk to the sequential zone.
  229. */
  230. static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  231. {
  232. unsigned int chunk = dzone->chunk;
  233. struct dm_zone *szone = NULL;
  234. struct dmz_metadata *zmd = zrc->metadata;
  235. int ret;
  236. int alloc_flags = DMZ_ALLOC_SEQ;
  237. /* Get a free random or sequential zone */
  238. dmz_lock_map(zmd);
  239. again:
  240. szone = dmz_alloc_zone(zmd, zrc->dev_idx,
  241. alloc_flags | DMZ_ALLOC_RECLAIM);
  242. if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) {
  243. alloc_flags = DMZ_ALLOC_RND;
  244. goto again;
  245. }
  246. dmz_unlock_map(zmd);
  247. if (!szone)
  248. return -ENOSPC;
  249. DMDEBUG("(%s/%u): Chunk %u, move %s zone %u (weight %u) to %s zone %u",
  250. dmz_metadata_label(zmd), zrc->dev_idx, chunk,
  251. dmz_is_cache(dzone) ? "cache" : "rnd",
  252. dzone->id, dmz_weight(dzone),
  253. dmz_is_rnd(szone) ? "rnd" : "seq", szone->id);
  254. /* Flush the random data zone into the sequential zone */
  255. ret = dmz_reclaim_copy(zrc, dzone, szone);
  256. dmz_lock_flush(zmd);
  257. if (ret == 0) {
  258. /* Validate copied blocks */
  259. ret = dmz_copy_valid_blocks(zmd, dzone, szone);
  260. }
  261. if (ret) {
  262. /* Free the sequential zone */
  263. dmz_lock_map(zmd);
  264. dmz_free_zone(zmd, szone);
  265. dmz_unlock_map(zmd);
  266. } else {
  267. /* Free the data zone and remap the chunk */
  268. dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
  269. dmz_lock_map(zmd);
  270. dmz_unmap_zone(zmd, dzone);
  271. dmz_unlock_zone_reclaim(dzone);
  272. dmz_free_zone(zmd, dzone);
  273. dmz_map_zone(zmd, szone, chunk);
  274. dmz_unlock_map(zmd);
  275. }
  276. dmz_unlock_flush(zmd);
  277. return ret;
  278. }
  279. /*
  280. * Reclaim an empty zone.
  281. */
  282. static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
  283. {
  284. struct dmz_metadata *zmd = zrc->metadata;
  285. dmz_lock_flush(zmd);
  286. dmz_lock_map(zmd);
  287. dmz_unmap_zone(zmd, dzone);
  288. dmz_unlock_zone_reclaim(dzone);
  289. dmz_free_zone(zmd, dzone);
  290. dmz_unlock_map(zmd);
  291. dmz_unlock_flush(zmd);
  292. }
  293. /*
  294. * Test if the target device is idle.
  295. */
  296. static inline int dmz_target_idle(struct dmz_reclaim *zrc)
  297. {
  298. return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
  299. }
  300. /*
  301. * Find a candidate zone for reclaim and process it.
  302. */
  303. static int dmz_do_reclaim(struct dmz_reclaim *zrc)
  304. {
  305. struct dmz_metadata *zmd = zrc->metadata;
  306. struct dm_zone *dzone;
  307. struct dm_zone *rzone;
  308. unsigned long start;
  309. int ret;
  310. /* Get a data zone */
  311. dzone = dmz_get_zone_for_reclaim(zmd, zrc->dev_idx,
  312. dmz_target_idle(zrc));
  313. if (!dzone) {
  314. DMDEBUG("(%s/%u): No zone found to reclaim",
  315. dmz_metadata_label(zmd), zrc->dev_idx);
  316. return -EBUSY;
  317. }
  318. rzone = dzone;
  319. start = jiffies;
  320. if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
  321. if (!dmz_weight(dzone)) {
  322. /* Empty zone */
  323. dmz_reclaim_empty(zrc, dzone);
  324. ret = 0;
  325. } else {
  326. /*
  327. * Reclaim the random data zone by moving its
  328. * valid data blocks to a free sequential zone.
  329. */
  330. ret = dmz_reclaim_rnd_data(zrc, dzone);
  331. }
  332. } else {
  333. struct dm_zone *bzone = dzone->bzone;
  334. sector_t chunk_block = 0;
  335. ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
  336. if (ret < 0)
  337. goto out;
  338. if (ret == 0 || chunk_block >= dzone->wp_block) {
  339. /*
  340. * The buffer zone is empty or its valid blocks are
  341. * after the data zone write pointer.
  342. */
  343. ret = dmz_reclaim_buf(zrc, dzone);
  344. rzone = bzone;
  345. } else {
  346. /*
  347. * Reclaim the data zone by merging it into the
  348. * buffer zone so that the buffer zone itself can
  349. * be later reclaimed.
  350. */
  351. ret = dmz_reclaim_seq_data(zrc, dzone);
  352. }
  353. }
  354. out:
  355. if (ret) {
  356. if (ret == -EINTR)
  357. DMDEBUG("(%s/%u): reclaim zone %u interrupted",
  358. dmz_metadata_label(zmd), zrc->dev_idx,
  359. rzone->id);
  360. else
  361. DMDEBUG("(%s/%u): Failed to reclaim zone %u, err %d",
  362. dmz_metadata_label(zmd), zrc->dev_idx,
  363. rzone->id, ret);
  364. dmz_unlock_zone_reclaim(dzone);
  365. return ret;
  366. }
  367. ret = dmz_flush_metadata(zrc->metadata);
  368. if (ret) {
  369. DMDEBUG("(%s/%u): Metadata flush for zone %u failed, err %d",
  370. dmz_metadata_label(zmd), zrc->dev_idx, rzone->id, ret);
  371. return ret;
  372. }
  373. DMDEBUG("(%s/%u): Reclaimed zone %u in %u ms",
  374. dmz_metadata_label(zmd), zrc->dev_idx,
  375. rzone->id, jiffies_to_msecs(jiffies - start));
  376. return 0;
  377. }
  378. static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
  379. {
  380. struct dmz_metadata *zmd = zrc->metadata;
  381. unsigned int nr_cache = dmz_nr_cache_zones(zmd);
  382. unsigned int nr_unmap, nr_zones;
  383. if (nr_cache) {
  384. nr_zones = nr_cache;
  385. nr_unmap = dmz_nr_unmap_cache_zones(zmd);
  386. } else {
  387. nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
  388. nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
  389. }
  390. if (nr_unmap <= 1)
  391. return 0;
  392. return nr_unmap * 100 / nr_zones;
  393. }
  394. /*
  395. * Test if reclaim is necessary.
  396. */
  397. static bool dmz_should_reclaim(struct dmz_reclaim *zrc, unsigned int p_unmap)
  398. {
  399. unsigned int nr_reclaim;
  400. nr_reclaim = dmz_nr_rnd_zones(zrc->metadata, zrc->dev_idx);
  401. if (dmz_nr_cache_zones(zrc->metadata)) {
  402. /*
  403. * The first device in a multi-device
  404. * setup only contains cache zones, so
  405. * never start reclaim there.
  406. */
  407. if (zrc->dev_idx == 0)
  408. return false;
  409. nr_reclaim += dmz_nr_cache_zones(zrc->metadata);
  410. }
  411. /* Reclaim when idle */
  412. if (dmz_target_idle(zrc) && nr_reclaim)
  413. return true;
  414. /* If there are still plenty of cache zones, do not reclaim */
  415. if (p_unmap >= DMZ_RECLAIM_HIGH_UNMAP_ZONES)
  416. return false;
  417. /*
  418. * If the percentage of unmapped cache zones is low,
  419. * reclaim even if the target is busy.
  420. */
  421. return p_unmap <= DMZ_RECLAIM_LOW_UNMAP_ZONES;
  422. }
  423. /*
  424. * Reclaim work function.
  425. */
  426. static void dmz_reclaim_work(struct work_struct *work)
  427. {
  428. struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
  429. struct dmz_metadata *zmd = zrc->metadata;
  430. unsigned int p_unmap;
  431. int ret;
  432. if (dmz_dev_is_dying(zmd))
  433. return;
  434. p_unmap = dmz_reclaim_percentage(zrc);
  435. if (!dmz_should_reclaim(zrc, p_unmap)) {
  436. mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
  437. return;
  438. }
  439. /*
  440. * We need to start reclaiming random zones: set up zone copy
  441. * throttling to either go fast if we are very low on random zones
  442. * and slower if there are still some free random zones to avoid
  443. * as much as possible to negatively impact the user workload.
  444. */
  445. if (dmz_target_idle(zrc) || p_unmap < DMZ_RECLAIM_LOW_UNMAP_ZONES / 2) {
  446. /* Idle or very low percentage: go fast */
  447. zrc->kc_throttle.throttle = 100;
  448. } else {
  449. /* Busy but we still have some random zone: throttle */
  450. zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
  451. }
  452. DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
  453. dmz_metadata_label(zmd), zrc->dev_idx,
  454. zrc->kc_throttle.throttle,
  455. (dmz_target_idle(zrc) ? "Idle" : "Busy"),
  456. p_unmap, dmz_nr_unmap_cache_zones(zmd),
  457. dmz_nr_cache_zones(zmd),
  458. dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx),
  459. dmz_nr_rnd_zones(zmd, zrc->dev_idx));
  460. ret = dmz_do_reclaim(zrc);
  461. if (ret && ret != -EINTR) {
  462. if (!dmz_check_dev(zmd))
  463. return;
  464. }
  465. dmz_schedule_reclaim(zrc);
  466. }
  467. /*
  468. * Initialize reclaim.
  469. */
  470. int dmz_ctr_reclaim(struct dmz_metadata *zmd,
  471. struct dmz_reclaim **reclaim, int idx)
  472. {
  473. struct dmz_reclaim *zrc;
  474. int ret;
  475. zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
  476. if (!zrc)
  477. return -ENOMEM;
  478. zrc->metadata = zmd;
  479. zrc->atime = jiffies;
  480. zrc->dev_idx = idx;
  481. /* Reclaim kcopyd client */
  482. zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
  483. if (IS_ERR(zrc->kc)) {
  484. ret = PTR_ERR(zrc->kc);
  485. zrc->kc = NULL;
  486. goto err;
  487. }
  488. /* Reclaim work */
  489. INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
  490. zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s_%d", WQ_MEM_RECLAIM,
  491. dmz_metadata_label(zmd), idx);
  492. if (!zrc->wq) {
  493. ret = -ENOMEM;
  494. goto err;
  495. }
  496. *reclaim = zrc;
  497. queue_delayed_work(zrc->wq, &zrc->work, 0);
  498. return 0;
  499. err:
  500. if (zrc->kc)
  501. dm_kcopyd_client_destroy(zrc->kc);
  502. kfree(zrc);
  503. return ret;
  504. }
  505. /*
  506. * Terminate reclaim.
  507. */
  508. void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
  509. {
  510. cancel_delayed_work_sync(&zrc->work);
  511. destroy_workqueue(zrc->wq);
  512. dm_kcopyd_client_destroy(zrc->kc);
  513. kfree(zrc);
  514. }
  515. /*
  516. * Suspend reclaim.
  517. */
  518. void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
  519. {
  520. cancel_delayed_work_sync(&zrc->work);
  521. }
  522. /*
  523. * Resume reclaim.
  524. */
  525. void dmz_resume_reclaim(struct dmz_reclaim *zrc)
  526. {
  527. queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
  528. }
  529. /*
  530. * BIO accounting.
  531. */
  532. void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
  533. {
  534. zrc->atime = jiffies;
  535. }
  536. /*
  537. * Start reclaim if necessary.
  538. */
  539. void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
  540. {
  541. unsigned int p_unmap = dmz_reclaim_percentage(zrc);
  542. if (dmz_should_reclaim(zrc, p_unmap))
  543. mod_delayed_work(zrc->wq, &zrc->work, 0);
  544. }