dm-clone-metadata.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2019 Arrikto, Inc. All Rights Reserved.
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/err.h>
  7. #include <linux/slab.h>
  8. #include <linux/rwsem.h>
  9. #include <linux/bitops.h>
  10. #include <linux/bitmap.h>
  11. #include <linux/device-mapper.h>
  12. #include "persistent-data/dm-bitset.h"
  13. #include "persistent-data/dm-space-map.h"
  14. #include "persistent-data/dm-block-manager.h"
  15. #include "persistent-data/dm-transaction-manager.h"
  16. #include "dm-clone-metadata.h"
  17. #define DM_MSG_PREFIX "clone metadata"
  18. #define SUPERBLOCK_LOCATION 0
  19. #define SUPERBLOCK_MAGIC 0x8af27f64
  20. #define SUPERBLOCK_CSUM_XOR 257649492
  21. #define DM_CLONE_MAX_CONCURRENT_LOCKS 5
  22. #define UUID_LEN 16
  23. /* Min and max dm-clone metadata versions supported */
  24. #define DM_CLONE_MIN_METADATA_VERSION 1
  25. #define DM_CLONE_MAX_METADATA_VERSION 1
  26. /*
  27. * On-disk metadata layout
  28. */
  29. struct superblock_disk {
  30. __le32 csum;
  31. __le32 flags;
  32. __le64 blocknr;
  33. __u8 uuid[UUID_LEN];
  34. __le64 magic;
  35. __le32 version;
  36. __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
  37. __le64 region_size;
  38. __le64 target_size;
  39. __le64 bitset_root;
  40. } __packed;
  41. /*
  42. * Region and Dirty bitmaps.
  43. *
  44. * dm-clone logically splits the source and destination devices in regions of
  45. * fixed size. The destination device's regions are gradually hydrated, i.e.,
  46. * we copy (clone) the source's regions to the destination device. Eventually,
  47. * all regions will get hydrated and all I/O will be served from the
  48. * destination device.
  49. *
  50. * We maintain an on-disk bitmap which tracks the state of each of the
  51. * destination device's regions, i.e., whether they are hydrated or not.
  52. *
  53. * To save constantly doing look ups on disk we keep an in core copy of the
  54. * on-disk bitmap, the region_map.
  55. *
  56. * In order to track which regions are hydrated during a metadata transaction,
  57. * we use a second set of bitmaps, the dmap (dirty bitmap), which includes two
  58. * bitmaps, namely dirty_regions and dirty_words. The dirty_regions bitmap
  59. * tracks the regions that got hydrated during the current metadata
  60. * transaction. The dirty_words bitmap tracks the dirty words, i.e. longs, of
  61. * the dirty_regions bitmap.
  62. *
  63. * This allows us to precisely track the regions that were hydrated during the
  64. * current metadata transaction and update the metadata accordingly, when we
  65. * commit the current transaction. This is important because dm-clone should
  66. * only commit the metadata of regions that were properly flushed to the
  67. * destination device beforehand. Otherwise, in case of a crash, we could end
  68. * up with a corrupted dm-clone device.
  69. *
  70. * When a region finishes hydrating dm-clone calls
  71. * dm_clone_set_region_hydrated(), or for discard requests
  72. * dm_clone_cond_set_range(), which sets the corresponding bits in region_map
  73. * and dmap.
  74. *
  75. * During a metadata commit we scan dmap->dirty_words and dmap->dirty_regions
  76. * and update the on-disk metadata accordingly. Thus, we don't have to flush to
  77. * disk the whole region_map. We can just flush the dirty region_map bits.
  78. *
  79. * We use the helper dmap->dirty_words bitmap, which is smaller than the
  80. * original region_map, to reduce the amount of memory accesses during a
  81. * metadata commit. Moreover, as dm-bitset also accesses the on-disk bitmap in
  82. * 64-bit word granularity, the dirty_words bitmap helps us avoid useless disk
  83. * accesses.
  84. *
  85. * We could update directly the on-disk bitmap, when dm-clone calls either
  86. * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), buts this
  87. * inserts significant metadata I/O overhead in dm-clone's I/O path. Also, as
  88. * these two functions don't block, we can call them in interrupt context,
  89. * e.g., in a hooked overwrite bio's completion routine, and further reduce the
  90. * I/O completion latency.
  91. *
  92. * We maintain two dirty bitmap sets. During a metadata commit we atomically
  93. * swap the currently used dmap with the unused one. This allows the metadata
  94. * update functions to run concurrently with an ongoing commit.
  95. */
  96. struct dirty_map {
  97. unsigned long *dirty_words;
  98. unsigned long *dirty_regions;
  99. unsigned int changed;
  100. };
  101. struct dm_clone_metadata {
  102. /* The metadata block device */
  103. struct block_device *bdev;
  104. sector_t target_size;
  105. sector_t region_size;
  106. unsigned long nr_regions;
  107. unsigned long nr_words;
  108. /* Spinlock protecting the region and dirty bitmaps. */
  109. spinlock_t bitmap_lock;
  110. struct dirty_map dmap[2];
  111. struct dirty_map *current_dmap;
  112. /* Protected by lock */
  113. struct dirty_map *committing_dmap;
  114. /*
  115. * In core copy of the on-disk bitmap to save constantly doing look ups
  116. * on disk.
  117. */
  118. unsigned long *region_map;
  119. /* Protected by bitmap_lock */
  120. unsigned int read_only;
  121. struct dm_block_manager *bm;
  122. struct dm_space_map *sm;
  123. struct dm_transaction_manager *tm;
  124. struct rw_semaphore lock;
  125. struct dm_disk_bitset bitset_info;
  126. dm_block_t bitset_root;
  127. /*
  128. * Reading the space map root can fail, so we read it into this
  129. * buffer before the superblock is locked and updated.
  130. */
  131. __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
  132. bool hydration_done:1;
  133. bool fail_io:1;
  134. };
  135. /*---------------------------------------------------------------------------*/
  136. /*
  137. * Superblock validation.
  138. */
  139. static void sb_prepare_for_write(struct dm_block_validator *v,
  140. struct dm_block *b, size_t sb_block_size)
  141. {
  142. struct superblock_disk *sb;
  143. u32 csum;
  144. sb = dm_block_data(b);
  145. sb->blocknr = cpu_to_le64(dm_block_location(b));
  146. csum = dm_bm_checksum(&sb->flags, sb_block_size - sizeof(__le32),
  147. SUPERBLOCK_CSUM_XOR);
  148. sb->csum = cpu_to_le32(csum);
  149. }
  150. static int sb_check(struct dm_block_validator *v, struct dm_block *b,
  151. size_t sb_block_size)
  152. {
  153. struct superblock_disk *sb;
  154. u32 csum, metadata_version;
  155. sb = dm_block_data(b);
  156. if (dm_block_location(b) != le64_to_cpu(sb->blocknr)) {
  157. DMERR("Superblock check failed: blocknr %llu, expected %llu",
  158. le64_to_cpu(sb->blocknr),
  159. (unsigned long long)dm_block_location(b));
  160. return -ENOTBLK;
  161. }
  162. if (le64_to_cpu(sb->magic) != SUPERBLOCK_MAGIC) {
  163. DMERR("Superblock check failed: magic %llu, expected %llu",
  164. le64_to_cpu(sb->magic),
  165. (unsigned long long)SUPERBLOCK_MAGIC);
  166. return -EILSEQ;
  167. }
  168. csum = dm_bm_checksum(&sb->flags, sb_block_size - sizeof(__le32),
  169. SUPERBLOCK_CSUM_XOR);
  170. if (sb->csum != cpu_to_le32(csum)) {
  171. DMERR("Superblock check failed: checksum %u, expected %u",
  172. csum, le32_to_cpu(sb->csum));
  173. return -EILSEQ;
  174. }
  175. /* Check metadata version */
  176. metadata_version = le32_to_cpu(sb->version);
  177. if (metadata_version < DM_CLONE_MIN_METADATA_VERSION ||
  178. metadata_version > DM_CLONE_MAX_METADATA_VERSION) {
  179. DMERR("Clone metadata version %u found, but only versions between %u and %u supported.",
  180. metadata_version, DM_CLONE_MIN_METADATA_VERSION,
  181. DM_CLONE_MAX_METADATA_VERSION);
  182. return -EINVAL;
  183. }
  184. return 0;
  185. }
  186. static struct dm_block_validator sb_validator = {
  187. .name = "superblock",
  188. .prepare_for_write = sb_prepare_for_write,
  189. .check = sb_check
  190. };
  191. /*
  192. * Check if the superblock is formatted or not. We consider the superblock to
  193. * be formatted in case we find non-zero bytes in it.
  194. */
  195. static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *formatted)
  196. {
  197. int r;
  198. unsigned int i, nr_words;
  199. struct dm_block *sblock;
  200. __le64 *data_le, zero = cpu_to_le64(0);
  201. /*
  202. * We don't use a validator here because the superblock could be all
  203. * zeroes.
  204. */
  205. r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &sblock);
  206. if (r) {
  207. DMERR("Failed to read_lock superblock");
  208. return r;
  209. }
  210. data_le = dm_block_data(sblock);
  211. *formatted = false;
  212. /* This assumes that the block size is a multiple of 8 bytes */
  213. BUG_ON(dm_bm_block_size(bm) % sizeof(__le64));
  214. nr_words = dm_bm_block_size(bm) / sizeof(__le64);
  215. for (i = 0; i < nr_words; i++) {
  216. if (data_le[i] != zero) {
  217. *formatted = true;
  218. break;
  219. }
  220. }
  221. dm_bm_unlock(sblock);
  222. return 0;
  223. }
  224. /*---------------------------------------------------------------------------*/
  225. /*
  226. * Low-level metadata handling.
  227. */
  228. static inline int superblock_read_lock(struct dm_clone_metadata *cmd,
  229. struct dm_block **sblock)
  230. {
  231. return dm_bm_read_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
  232. }
  233. static inline int superblock_write_lock_zero(struct dm_clone_metadata *cmd,
  234. struct dm_block **sblock)
  235. {
  236. return dm_bm_write_lock_zero(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
  237. }
  238. static int __copy_sm_root(struct dm_clone_metadata *cmd)
  239. {
  240. int r;
  241. size_t root_size;
  242. r = dm_sm_root_size(cmd->sm, &root_size);
  243. if (r)
  244. return r;
  245. return dm_sm_copy_root(cmd->sm, &cmd->metadata_space_map_root, root_size);
  246. }
  247. /* Save dm-clone metadata in superblock */
  248. static void __prepare_superblock(struct dm_clone_metadata *cmd,
  249. struct superblock_disk *sb)
  250. {
  251. sb->flags = cpu_to_le32(0UL);
  252. /* FIXME: UUID is currently unused */
  253. memset(sb->uuid, 0, sizeof(sb->uuid));
  254. sb->magic = cpu_to_le64(SUPERBLOCK_MAGIC);
  255. sb->version = cpu_to_le32(DM_CLONE_MAX_METADATA_VERSION);
  256. /* Save the metadata space_map root */
  257. memcpy(&sb->metadata_space_map_root, &cmd->metadata_space_map_root,
  258. sizeof(cmd->metadata_space_map_root));
  259. sb->region_size = cpu_to_le64(cmd->region_size);
  260. sb->target_size = cpu_to_le64(cmd->target_size);
  261. sb->bitset_root = cpu_to_le64(cmd->bitset_root);
  262. }
  263. static int __open_metadata(struct dm_clone_metadata *cmd)
  264. {
  265. int r;
  266. struct dm_block *sblock;
  267. struct superblock_disk *sb;
  268. r = superblock_read_lock(cmd, &sblock);
  269. if (r) {
  270. DMERR("Failed to read_lock superblock");
  271. return r;
  272. }
  273. sb = dm_block_data(sblock);
  274. /* Verify that target_size and region_size haven't changed. */
  275. if (cmd->region_size != le64_to_cpu(sb->region_size) ||
  276. cmd->target_size != le64_to_cpu(sb->target_size)) {
  277. DMERR("Region and/or target size don't match the ones in metadata");
  278. r = -EINVAL;
  279. goto out_with_lock;
  280. }
  281. r = dm_tm_open_with_sm(cmd->bm, SUPERBLOCK_LOCATION,
  282. sb->metadata_space_map_root,
  283. sizeof(sb->metadata_space_map_root),
  284. &cmd->tm, &cmd->sm);
  285. if (r) {
  286. DMERR("dm_tm_open_with_sm failed");
  287. goto out_with_lock;
  288. }
  289. dm_disk_bitset_init(cmd->tm, &cmd->bitset_info);
  290. cmd->bitset_root = le64_to_cpu(sb->bitset_root);
  291. out_with_lock:
  292. dm_bm_unlock(sblock);
  293. return r;
  294. }
  295. static int __format_metadata(struct dm_clone_metadata *cmd)
  296. {
  297. int r;
  298. struct dm_block *sblock;
  299. struct superblock_disk *sb;
  300. r = dm_tm_create_with_sm(cmd->bm, SUPERBLOCK_LOCATION, &cmd->tm, &cmd->sm);
  301. if (r) {
  302. DMERR("Failed to create transaction manager");
  303. return r;
  304. }
  305. dm_disk_bitset_init(cmd->tm, &cmd->bitset_info);
  306. r = dm_bitset_empty(&cmd->bitset_info, &cmd->bitset_root);
  307. if (r) {
  308. DMERR("Failed to create empty on-disk bitset");
  309. goto err_with_tm;
  310. }
  311. r = dm_bitset_resize(&cmd->bitset_info, cmd->bitset_root, 0,
  312. cmd->nr_regions, false, &cmd->bitset_root);
  313. if (r) {
  314. DMERR("Failed to resize on-disk bitset to %lu entries", cmd->nr_regions);
  315. goto err_with_tm;
  316. }
  317. /* Flush to disk all blocks, except the superblock */
  318. r = dm_tm_pre_commit(cmd->tm);
  319. if (r) {
  320. DMERR("dm_tm_pre_commit failed");
  321. goto err_with_tm;
  322. }
  323. r = __copy_sm_root(cmd);
  324. if (r) {
  325. DMERR("__copy_sm_root failed");
  326. goto err_with_tm;
  327. }
  328. r = superblock_write_lock_zero(cmd, &sblock);
  329. if (r) {
  330. DMERR("Failed to write_lock superblock");
  331. goto err_with_tm;
  332. }
  333. sb = dm_block_data(sblock);
  334. __prepare_superblock(cmd, sb);
  335. r = dm_tm_commit(cmd->tm, sblock);
  336. if (r) {
  337. DMERR("Failed to commit superblock");
  338. goto err_with_tm;
  339. }
  340. return 0;
  341. err_with_tm:
  342. dm_sm_destroy(cmd->sm);
  343. dm_tm_destroy(cmd->tm);
  344. return r;
  345. }
  346. static int __open_or_format_metadata(struct dm_clone_metadata *cmd, bool may_format_device)
  347. {
  348. int r;
  349. bool formatted = false;
  350. r = __superblock_all_zeroes(cmd->bm, &formatted);
  351. if (r)
  352. return r;
  353. if (!formatted)
  354. return may_format_device ? __format_metadata(cmd) : -EPERM;
  355. return __open_metadata(cmd);
  356. }
  357. static int __create_persistent_data_structures(struct dm_clone_metadata *cmd,
  358. bool may_format_device)
  359. {
  360. int r;
  361. /* Create block manager */
  362. cmd->bm = dm_block_manager_create(cmd->bdev,
  363. DM_CLONE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
  364. DM_CLONE_MAX_CONCURRENT_LOCKS);
  365. if (IS_ERR(cmd->bm)) {
  366. DMERR("Failed to create block manager");
  367. return PTR_ERR(cmd->bm);
  368. }
  369. r = __open_or_format_metadata(cmd, may_format_device);
  370. if (r)
  371. dm_block_manager_destroy(cmd->bm);
  372. return r;
  373. }
  374. static void __destroy_persistent_data_structures(struct dm_clone_metadata *cmd)
  375. {
  376. dm_sm_destroy(cmd->sm);
  377. dm_tm_destroy(cmd->tm);
  378. dm_block_manager_destroy(cmd->bm);
  379. }
  380. /*---------------------------------------------------------------------------*/
  381. static size_t bitmap_size(unsigned long nr_bits)
  382. {
  383. return BITS_TO_LONGS(nr_bits) * sizeof(long);
  384. }
  385. static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words,
  386. unsigned long nr_regions)
  387. {
  388. dmap->changed = 0;
  389. dmap->dirty_words = kvzalloc(bitmap_size(nr_words), GFP_KERNEL);
  390. if (!dmap->dirty_words)
  391. return -ENOMEM;
  392. dmap->dirty_regions = kvzalloc(bitmap_size(nr_regions), GFP_KERNEL);
  393. if (!dmap->dirty_regions) {
  394. kvfree(dmap->dirty_words);
  395. return -ENOMEM;
  396. }
  397. return 0;
  398. }
  399. static void __dirty_map_exit(struct dirty_map *dmap)
  400. {
  401. kvfree(dmap->dirty_words);
  402. kvfree(dmap->dirty_regions);
  403. }
  404. static int dirty_map_init(struct dm_clone_metadata *cmd)
  405. {
  406. if (__dirty_map_init(&cmd->dmap[0], cmd->nr_words, cmd->nr_regions)) {
  407. DMERR("Failed to allocate dirty bitmap");
  408. return -ENOMEM;
  409. }
  410. if (__dirty_map_init(&cmd->dmap[1], cmd->nr_words, cmd->nr_regions)) {
  411. DMERR("Failed to allocate dirty bitmap");
  412. __dirty_map_exit(&cmd->dmap[0]);
  413. return -ENOMEM;
  414. }
  415. cmd->current_dmap = &cmd->dmap[0];
  416. cmd->committing_dmap = NULL;
  417. return 0;
  418. }
  419. static void dirty_map_exit(struct dm_clone_metadata *cmd)
  420. {
  421. __dirty_map_exit(&cmd->dmap[0]);
  422. __dirty_map_exit(&cmd->dmap[1]);
  423. }
  424. static int __load_bitset_in_core(struct dm_clone_metadata *cmd)
  425. {
  426. int r;
  427. unsigned long i;
  428. struct dm_bitset_cursor c;
  429. /* Flush bitset cache */
  430. r = dm_bitset_flush(&cmd->bitset_info, cmd->bitset_root, &cmd->bitset_root);
  431. if (r)
  432. return r;
  433. r = dm_bitset_cursor_begin(&cmd->bitset_info, cmd->bitset_root, cmd->nr_regions, &c);
  434. if (r)
  435. return r;
  436. for (i = 0; ; i++) {
  437. if (dm_bitset_cursor_get_value(&c))
  438. __set_bit(i, cmd->region_map);
  439. else
  440. __clear_bit(i, cmd->region_map);
  441. if (i >= (cmd->nr_regions - 1))
  442. break;
  443. r = dm_bitset_cursor_next(&c);
  444. if (r)
  445. break;
  446. }
  447. dm_bitset_cursor_end(&c);
  448. return r;
  449. }
  450. struct dm_clone_metadata *dm_clone_metadata_open(struct block_device *bdev,
  451. sector_t target_size,
  452. sector_t region_size)
  453. {
  454. int r;
  455. struct dm_clone_metadata *cmd;
  456. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  457. if (!cmd) {
  458. DMERR("Failed to allocate memory for dm-clone metadata");
  459. return ERR_PTR(-ENOMEM);
  460. }
  461. cmd->bdev = bdev;
  462. cmd->target_size = target_size;
  463. cmd->region_size = region_size;
  464. cmd->nr_regions = dm_sector_div_up(cmd->target_size, cmd->region_size);
  465. cmd->nr_words = BITS_TO_LONGS(cmd->nr_regions);
  466. init_rwsem(&cmd->lock);
  467. spin_lock_init(&cmd->bitmap_lock);
  468. cmd->read_only = 0;
  469. cmd->fail_io = false;
  470. cmd->hydration_done = false;
  471. cmd->region_map = kvmalloc(bitmap_size(cmd->nr_regions), GFP_KERNEL);
  472. if (!cmd->region_map) {
  473. DMERR("Failed to allocate memory for region bitmap");
  474. r = -ENOMEM;
  475. goto out_with_md;
  476. }
  477. r = __create_persistent_data_structures(cmd, true);
  478. if (r)
  479. goto out_with_region_map;
  480. r = __load_bitset_in_core(cmd);
  481. if (r) {
  482. DMERR("Failed to load on-disk region map");
  483. goto out_with_pds;
  484. }
  485. r = dirty_map_init(cmd);
  486. if (r)
  487. goto out_with_pds;
  488. if (bitmap_full(cmd->region_map, cmd->nr_regions))
  489. cmd->hydration_done = true;
  490. return cmd;
  491. out_with_pds:
  492. __destroy_persistent_data_structures(cmd);
  493. out_with_region_map:
  494. kvfree(cmd->region_map);
  495. out_with_md:
  496. kfree(cmd);
  497. return ERR_PTR(r);
  498. }
  499. void dm_clone_metadata_close(struct dm_clone_metadata *cmd)
  500. {
  501. if (!cmd->fail_io)
  502. __destroy_persistent_data_structures(cmd);
  503. dirty_map_exit(cmd);
  504. kvfree(cmd->region_map);
  505. kfree(cmd);
  506. }
  507. bool dm_clone_is_hydration_done(struct dm_clone_metadata *cmd)
  508. {
  509. return cmd->hydration_done;
  510. }
  511. bool dm_clone_is_region_hydrated(struct dm_clone_metadata *cmd, unsigned long region_nr)
  512. {
  513. return dm_clone_is_hydration_done(cmd) || test_bit(region_nr, cmd->region_map);
  514. }
  515. bool dm_clone_is_range_hydrated(struct dm_clone_metadata *cmd,
  516. unsigned long start, unsigned long nr_regions)
  517. {
  518. unsigned long bit;
  519. if (dm_clone_is_hydration_done(cmd))
  520. return true;
  521. bit = find_next_zero_bit(cmd->region_map, cmd->nr_regions, start);
  522. return (bit >= (start + nr_regions));
  523. }
  524. unsigned int dm_clone_nr_of_hydrated_regions(struct dm_clone_metadata *cmd)
  525. {
  526. return bitmap_weight(cmd->region_map, cmd->nr_regions);
  527. }
  528. unsigned long dm_clone_find_next_unhydrated_region(struct dm_clone_metadata *cmd,
  529. unsigned long start)
  530. {
  531. return find_next_zero_bit(cmd->region_map, cmd->nr_regions, start);
  532. }
  533. static int __update_metadata_word(struct dm_clone_metadata *cmd,
  534. unsigned long *dirty_regions,
  535. unsigned long word)
  536. {
  537. int r;
  538. unsigned long index = word * BITS_PER_LONG;
  539. unsigned long max_index = min(cmd->nr_regions, (word + 1) * BITS_PER_LONG);
  540. while (index < max_index) {
  541. if (test_bit(index, dirty_regions)) {
  542. r = dm_bitset_set_bit(&cmd->bitset_info, cmd->bitset_root,
  543. index, &cmd->bitset_root);
  544. if (r) {
  545. DMERR("dm_bitset_set_bit failed");
  546. return r;
  547. }
  548. __clear_bit(index, dirty_regions);
  549. }
  550. index++;
  551. }
  552. return 0;
  553. }
  554. static int __metadata_commit(struct dm_clone_metadata *cmd)
  555. {
  556. int r;
  557. struct dm_block *sblock;
  558. struct superblock_disk *sb;
  559. /* Flush bitset cache */
  560. r = dm_bitset_flush(&cmd->bitset_info, cmd->bitset_root, &cmd->bitset_root);
  561. if (r) {
  562. DMERR("dm_bitset_flush failed");
  563. return r;
  564. }
  565. /* Flush to disk all blocks, except the superblock */
  566. r = dm_tm_pre_commit(cmd->tm);
  567. if (r) {
  568. DMERR("dm_tm_pre_commit failed");
  569. return r;
  570. }
  571. /* Save the space map root in cmd->metadata_space_map_root */
  572. r = __copy_sm_root(cmd);
  573. if (r) {
  574. DMERR("__copy_sm_root failed");
  575. return r;
  576. }
  577. /* Lock the superblock */
  578. r = superblock_write_lock_zero(cmd, &sblock);
  579. if (r) {
  580. DMERR("Failed to write_lock superblock");
  581. return r;
  582. }
  583. /* Save the metadata in superblock */
  584. sb = dm_block_data(sblock);
  585. __prepare_superblock(cmd, sb);
  586. /* Unlock superblock and commit it to disk */
  587. r = dm_tm_commit(cmd->tm, sblock);
  588. if (r) {
  589. DMERR("Failed to commit superblock");
  590. return r;
  591. }
  592. /*
  593. * FIXME: Find a more efficient way to check if the hydration is done.
  594. */
  595. if (bitmap_full(cmd->region_map, cmd->nr_regions))
  596. cmd->hydration_done = true;
  597. return 0;
  598. }
  599. static int __flush_dmap(struct dm_clone_metadata *cmd, struct dirty_map *dmap)
  600. {
  601. int r;
  602. unsigned long word;
  603. word = 0;
  604. do {
  605. word = find_next_bit(dmap->dirty_words, cmd->nr_words, word);
  606. if (word == cmd->nr_words)
  607. break;
  608. r = __update_metadata_word(cmd, dmap->dirty_regions, word);
  609. if (r)
  610. return r;
  611. __clear_bit(word, dmap->dirty_words);
  612. word++;
  613. } while (word < cmd->nr_words);
  614. r = __metadata_commit(cmd);
  615. if (r)
  616. return r;
  617. /* Update the changed flag */
  618. spin_lock_irq(&cmd->bitmap_lock);
  619. dmap->changed = 0;
  620. spin_unlock_irq(&cmd->bitmap_lock);
  621. return 0;
  622. }
  623. int dm_clone_metadata_pre_commit(struct dm_clone_metadata *cmd)
  624. {
  625. int r = 0;
  626. struct dirty_map *dmap, *next_dmap;
  627. down_write(&cmd->lock);
  628. if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
  629. r = -EPERM;
  630. goto out;
  631. }
  632. /* Get current dirty bitmap */
  633. dmap = cmd->current_dmap;
  634. /* Get next dirty bitmap */
  635. next_dmap = (dmap == &cmd->dmap[0]) ? &cmd->dmap[1] : &cmd->dmap[0];
  636. /*
  637. * The last commit failed, so we don't have a clean dirty-bitmap to
  638. * use.
  639. */
  640. if (WARN_ON(next_dmap->changed || cmd->committing_dmap)) {
  641. r = -EINVAL;
  642. goto out;
  643. }
  644. /* Swap dirty bitmaps */
  645. spin_lock_irq(&cmd->bitmap_lock);
  646. cmd->current_dmap = next_dmap;
  647. spin_unlock_irq(&cmd->bitmap_lock);
  648. /* Set old dirty bitmap as currently committing */
  649. cmd->committing_dmap = dmap;
  650. out:
  651. up_write(&cmd->lock);
  652. return r;
  653. }
  654. int dm_clone_metadata_commit(struct dm_clone_metadata *cmd)
  655. {
  656. int r = -EPERM;
  657. down_write(&cmd->lock);
  658. if (cmd->fail_io || dm_bm_is_read_only(cmd->bm))
  659. goto out;
  660. if (WARN_ON(!cmd->committing_dmap)) {
  661. r = -EINVAL;
  662. goto out;
  663. }
  664. r = __flush_dmap(cmd, cmd->committing_dmap);
  665. if (!r) {
  666. /* Clear committing dmap */
  667. cmd->committing_dmap = NULL;
  668. }
  669. out:
  670. up_write(&cmd->lock);
  671. return r;
  672. }
  673. int dm_clone_set_region_hydrated(struct dm_clone_metadata *cmd, unsigned long region_nr)
  674. {
  675. int r = 0;
  676. struct dirty_map *dmap;
  677. unsigned long word, flags;
  678. if (unlikely(region_nr >= cmd->nr_regions)) {
  679. DMERR("Region %lu out of range (total number of regions %lu)",
  680. region_nr, cmd->nr_regions);
  681. return -ERANGE;
  682. }
  683. word = region_nr / BITS_PER_LONG;
  684. spin_lock_irqsave(&cmd->bitmap_lock, flags);
  685. if (cmd->read_only) {
  686. r = -EPERM;
  687. goto out;
  688. }
  689. dmap = cmd->current_dmap;
  690. __set_bit(word, dmap->dirty_words);
  691. __set_bit(region_nr, dmap->dirty_regions);
  692. __set_bit(region_nr, cmd->region_map);
  693. dmap->changed = 1;
  694. out:
  695. spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
  696. return r;
  697. }
  698. int dm_clone_cond_set_range(struct dm_clone_metadata *cmd, unsigned long start,
  699. unsigned long nr_regions)
  700. {
  701. int r = 0;
  702. struct dirty_map *dmap;
  703. unsigned long word, region_nr;
  704. if (unlikely(start >= cmd->nr_regions || (start + nr_regions) < start ||
  705. (start + nr_regions) > cmd->nr_regions)) {
  706. DMERR("Invalid region range: start %lu, nr_regions %lu (total number of regions %lu)",
  707. start, nr_regions, cmd->nr_regions);
  708. return -ERANGE;
  709. }
  710. spin_lock_irq(&cmd->bitmap_lock);
  711. if (cmd->read_only) {
  712. r = -EPERM;
  713. goto out;
  714. }
  715. dmap = cmd->current_dmap;
  716. for (region_nr = start; region_nr < (start + nr_regions); region_nr++) {
  717. if (!test_bit(region_nr, cmd->region_map)) {
  718. word = region_nr / BITS_PER_LONG;
  719. __set_bit(word, dmap->dirty_words);
  720. __set_bit(region_nr, dmap->dirty_regions);
  721. __set_bit(region_nr, cmd->region_map);
  722. dmap->changed = 1;
  723. }
  724. }
  725. out:
  726. spin_unlock_irq(&cmd->bitmap_lock);
  727. return r;
  728. }
  729. /*
  730. * WARNING: This must not be called concurrently with either
  731. * dm_clone_set_region_hydrated() or dm_clone_cond_set_range(), as it changes
  732. * cmd->region_map without taking the cmd->bitmap_lock spinlock. The only
  733. * exception is after setting the metadata to read-only mode, using
  734. * dm_clone_metadata_set_read_only().
  735. *
  736. * We don't take the spinlock because __load_bitset_in_core() does I/O, so it
  737. * may block.
  738. */
  739. int dm_clone_reload_in_core_bitset(struct dm_clone_metadata *cmd)
  740. {
  741. int r = -EINVAL;
  742. down_write(&cmd->lock);
  743. if (cmd->fail_io)
  744. goto out;
  745. r = __load_bitset_in_core(cmd);
  746. out:
  747. up_write(&cmd->lock);
  748. return r;
  749. }
  750. bool dm_clone_changed_this_transaction(struct dm_clone_metadata *cmd)
  751. {
  752. bool r;
  753. unsigned long flags;
  754. spin_lock_irqsave(&cmd->bitmap_lock, flags);
  755. r = cmd->dmap[0].changed || cmd->dmap[1].changed;
  756. spin_unlock_irqrestore(&cmd->bitmap_lock, flags);
  757. return r;
  758. }
  759. int dm_clone_metadata_abort(struct dm_clone_metadata *cmd)
  760. {
  761. int r = -EPERM;
  762. down_write(&cmd->lock);
  763. if (cmd->fail_io || dm_bm_is_read_only(cmd->bm))
  764. goto out;
  765. __destroy_persistent_data_structures(cmd);
  766. r = __create_persistent_data_structures(cmd, false);
  767. if (r) {
  768. /* If something went wrong we can neither write nor read the metadata */
  769. cmd->fail_io = true;
  770. }
  771. out:
  772. up_write(&cmd->lock);
  773. return r;
  774. }
  775. void dm_clone_metadata_set_read_only(struct dm_clone_metadata *cmd)
  776. {
  777. down_write(&cmd->lock);
  778. spin_lock_irq(&cmd->bitmap_lock);
  779. cmd->read_only = 1;
  780. spin_unlock_irq(&cmd->bitmap_lock);
  781. if (!cmd->fail_io)
  782. dm_bm_set_read_only(cmd->bm);
  783. up_write(&cmd->lock);
  784. }
  785. void dm_clone_metadata_set_read_write(struct dm_clone_metadata *cmd)
  786. {
  787. down_write(&cmd->lock);
  788. spin_lock_irq(&cmd->bitmap_lock);
  789. cmd->read_only = 0;
  790. spin_unlock_irq(&cmd->bitmap_lock);
  791. if (!cmd->fail_io)
  792. dm_bm_set_read_write(cmd->bm);
  793. up_write(&cmd->lock);
  794. }
  795. int dm_clone_get_free_metadata_block_count(struct dm_clone_metadata *cmd,
  796. dm_block_t *result)
  797. {
  798. int r = -EINVAL;
  799. down_read(&cmd->lock);
  800. if (!cmd->fail_io)
  801. r = dm_sm_get_nr_free(cmd->sm, result);
  802. up_read(&cmd->lock);
  803. return r;
  804. }
  805. int dm_clone_get_metadata_dev_size(struct dm_clone_metadata *cmd,
  806. dm_block_t *result)
  807. {
  808. int r = -EINVAL;
  809. down_read(&cmd->lock);
  810. if (!cmd->fail_io)
  811. r = dm_sm_get_nr_blocks(cmd->sm, result);
  812. up_read(&cmd->lock);
  813. return r;
  814. }