dm-thin-metadata.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162
  1. /*
  2. * Copyright (C) 2011-2012 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-thin-metadata.h"
  7. #include "persistent-data/dm-btree.h"
  8. #include "persistent-data/dm-space-map.h"
  9. #include "persistent-data/dm-space-map-disk.h"
  10. #include "persistent-data/dm-transaction-manager.h"
  11. #include <linux/list.h>
  12. #include <linux/device-mapper.h>
  13. #include <linux/workqueue.h>
  14. /*--------------------------------------------------------------------------
  15. * As far as the metadata goes, there is:
  16. *
  17. * - A superblock in block zero, taking up fewer than 512 bytes for
  18. * atomic writes.
  19. *
  20. * - A space map managing the metadata blocks.
  21. *
  22. * - A space map managing the data blocks.
  23. *
  24. * - A btree mapping our internal thin dev ids onto struct disk_device_details.
  25. *
  26. * - A hierarchical btree, with 2 levels which effectively maps (thin
  27. * dev id, virtual block) -> block_time. Block time is a 64-bit
  28. * field holding the time in the low 24 bits, and block in the top 40
  29. * bits.
  30. *
  31. * BTrees consist solely of btree_nodes, that fill a block. Some are
  32. * internal nodes, as such their values are a __le64 pointing to other
  33. * nodes. Leaf nodes can store data of any reasonable size (ie. much
  34. * smaller than the block size). The nodes consist of the header,
  35. * followed by an array of keys, followed by an array of values. We have
  36. * to binary search on the keys so they're all held together to help the
  37. * cpu cache.
  38. *
  39. * Space maps have 2 btrees:
  40. *
  41. * - One maps a uint64_t onto a struct index_entry. Which points to a
  42. * bitmap block, and has some details about how many free entries there
  43. * are etc.
  44. *
  45. * - The bitmap blocks have a header (for the checksum). Then the rest
  46. * of the block is pairs of bits. With the meaning being:
  47. *
  48. * 0 - ref count is 0
  49. * 1 - ref count is 1
  50. * 2 - ref count is 2
  51. * 3 - ref count is higher than 2
  52. *
  53. * - If the count is higher than 2 then the ref count is entered in a
  54. * second btree that directly maps the block_address to a uint32_t ref
  55. * count.
  56. *
  57. * The space map metadata variant doesn't have a bitmaps btree. Instead
  58. * it has one single blocks worth of index_entries. This avoids
  59. * recursive issues with the bitmap btree needing to allocate space in
  60. * order to insert. With a small data block size such as 64k the
  61. * metadata support data devices that are hundreds of terrabytes.
  62. *
  63. * The space maps allocate space linearly from front to back. Space that
  64. * is freed in a transaction is never recycled within that transaction.
  65. * To try and avoid fragmenting _free_ space the allocator always goes
  66. * back and fills in gaps.
  67. *
  68. * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
  69. * from the block manager.
  70. *--------------------------------------------------------------------------*/
  71. #define DM_MSG_PREFIX "thin metadata"
  72. #define THIN_SUPERBLOCK_MAGIC 27022010
  73. #define THIN_SUPERBLOCK_LOCATION 0
  74. #define THIN_VERSION 2
  75. #define SECTOR_TO_BLOCK_SHIFT 3
  76. /*
  77. * For btree insert:
  78. * 3 for btree insert +
  79. * 2 for btree lookup used within space map
  80. * For btree remove:
  81. * 2 for shadow spine +
  82. * 4 for rebalance 3 child node
  83. */
  84. #define THIN_MAX_CONCURRENT_LOCKS 6
  85. /* This should be plenty */
  86. #define SPACE_MAP_ROOT_SIZE 128
  87. /*
  88. * Little endian on-disk superblock and device details.
  89. */
  90. struct thin_disk_superblock {
  91. __le32 csum; /* Checksum of superblock except for this field. */
  92. __le32 flags;
  93. __le64 blocknr; /* This block number, dm_block_t. */
  94. __u8 uuid[16];
  95. __le64 magic;
  96. __le32 version;
  97. __le32 time;
  98. __le64 trans_id;
  99. /*
  100. * Root held by userspace transactions.
  101. */
  102. __le64 held_root;
  103. __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
  104. __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
  105. /*
  106. * 2-level btree mapping (dev_id, (dev block, time)) -> data block
  107. */
  108. __le64 data_mapping_root;
  109. /*
  110. * Device detail root mapping dev_id -> device_details
  111. */
  112. __le64 device_details_root;
  113. __le32 data_block_size; /* In 512-byte sectors. */
  114. __le32 metadata_block_size; /* In 512-byte sectors. */
  115. __le64 metadata_nr_blocks;
  116. __le32 compat_flags;
  117. __le32 compat_ro_flags;
  118. __le32 incompat_flags;
  119. } __packed;
  120. struct disk_device_details {
  121. __le64 mapped_blocks;
  122. __le64 transaction_id; /* When created. */
  123. __le32 creation_time;
  124. __le32 snapshotted_time;
  125. } __packed;
  126. struct dm_pool_metadata {
  127. struct hlist_node hash;
  128. struct block_device *bdev;
  129. struct dm_block_manager *bm;
  130. struct dm_space_map *metadata_sm;
  131. struct dm_space_map *data_sm;
  132. struct dm_transaction_manager *tm;
  133. struct dm_transaction_manager *nb_tm;
  134. /*
  135. * Two-level btree.
  136. * First level holds thin_dev_t.
  137. * Second level holds mappings.
  138. */
  139. struct dm_btree_info info;
  140. /*
  141. * Non-blocking version of the above.
  142. */
  143. struct dm_btree_info nb_info;
  144. /*
  145. * Just the top level for deleting whole devices.
  146. */
  147. struct dm_btree_info tl_info;
  148. /*
  149. * Just the bottom level for creating new devices.
  150. */
  151. struct dm_btree_info bl_info;
  152. /*
  153. * Describes the device details btree.
  154. */
  155. struct dm_btree_info details_info;
  156. struct rw_semaphore root_lock;
  157. uint32_t time;
  158. dm_block_t root;
  159. dm_block_t details_root;
  160. struct list_head thin_devices;
  161. uint64_t trans_id;
  162. unsigned long flags;
  163. sector_t data_block_size;
  164. /*
  165. * Pre-commit callback.
  166. *
  167. * This allows the thin provisioning target to run a callback before
  168. * the metadata are committed.
  169. */
  170. dm_pool_pre_commit_fn pre_commit_fn;
  171. void *pre_commit_context;
  172. /*
  173. * We reserve a section of the metadata for commit overhead.
  174. * All reported space does *not* include this.
  175. */
  176. dm_block_t metadata_reserve;
  177. /*
  178. * Set if a transaction has to be aborted but the attempt to roll back
  179. * to the previous (good) transaction failed. The only pool metadata
  180. * operation possible in this state is the closing of the device.
  181. */
  182. bool fail_io:1;
  183. /*
  184. * Set once a thin-pool has been accessed through one of the interfaces
  185. * that imply the pool is in-service (e.g. thin devices created/deleted,
  186. * thin-pool message, metadata snapshots, etc).
  187. */
  188. bool in_service:1;
  189. /*
  190. * Reading the space map roots can fail, so we read it into these
  191. * buffers before the superblock is locked and updated.
  192. */
  193. __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
  194. __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
  195. };
  196. struct dm_thin_device {
  197. struct list_head list;
  198. struct dm_pool_metadata *pmd;
  199. dm_thin_id id;
  200. int open_count;
  201. bool changed:1;
  202. bool aborted_with_changes:1;
  203. uint64_t mapped_blocks;
  204. uint64_t transaction_id;
  205. uint32_t creation_time;
  206. uint32_t snapshotted_time;
  207. };
  208. /*----------------------------------------------------------------
  209. * superblock validator
  210. *--------------------------------------------------------------*/
  211. #define SUPERBLOCK_CSUM_XOR 160774
  212. static void sb_prepare_for_write(struct dm_block_validator *v,
  213. struct dm_block *b,
  214. size_t block_size)
  215. {
  216. struct thin_disk_superblock *disk_super = dm_block_data(b);
  217. disk_super->blocknr = cpu_to_le64(dm_block_location(b));
  218. disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
  219. block_size - sizeof(__le32),
  220. SUPERBLOCK_CSUM_XOR));
  221. }
  222. static int sb_check(struct dm_block_validator *v,
  223. struct dm_block *b,
  224. size_t block_size)
  225. {
  226. struct thin_disk_superblock *disk_super = dm_block_data(b);
  227. __le32 csum_le;
  228. if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
  229. DMERR("sb_check failed: blocknr %llu: wanted %llu",
  230. le64_to_cpu(disk_super->blocknr),
  231. (unsigned long long)dm_block_location(b));
  232. return -ENOTBLK;
  233. }
  234. if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
  235. DMERR("sb_check failed: magic %llu: wanted %llu",
  236. le64_to_cpu(disk_super->magic),
  237. (unsigned long long)THIN_SUPERBLOCK_MAGIC);
  238. return -EILSEQ;
  239. }
  240. csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
  241. block_size - sizeof(__le32),
  242. SUPERBLOCK_CSUM_XOR));
  243. if (csum_le != disk_super->csum) {
  244. DMERR("sb_check failed: csum %u: wanted %u",
  245. le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
  246. return -EILSEQ;
  247. }
  248. return 0;
  249. }
  250. static struct dm_block_validator sb_validator = {
  251. .name = "superblock",
  252. .prepare_for_write = sb_prepare_for_write,
  253. .check = sb_check
  254. };
  255. /*----------------------------------------------------------------
  256. * Methods for the btree value types
  257. *--------------------------------------------------------------*/
  258. static uint64_t pack_block_time(dm_block_t b, uint32_t t)
  259. {
  260. return (b << 24) | t;
  261. }
  262. static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
  263. {
  264. *b = v >> 24;
  265. *t = v & ((1 << 24) - 1);
  266. }
  267. /*
  268. * It's more efficient to call dm_sm_{inc,dec}_blocks as few times as
  269. * possible. 'with_runs' reads contiguous runs of blocks, and calls the
  270. * given sm function.
  271. */
  272. typedef int (*run_fn)(struct dm_space_map *, dm_block_t, dm_block_t);
  273. static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned int count, run_fn fn)
  274. {
  275. uint64_t b, begin, end;
  276. uint32_t t;
  277. bool in_run = false;
  278. unsigned int i;
  279. for (i = 0; i < count; i++, value_le++) {
  280. /* We know value_le is 8 byte aligned */
  281. unpack_block_time(le64_to_cpu(*value_le), &b, &t);
  282. if (in_run) {
  283. if (b == end) {
  284. end++;
  285. } else {
  286. fn(sm, begin, end);
  287. begin = b;
  288. end = b + 1;
  289. }
  290. } else {
  291. in_run = true;
  292. begin = b;
  293. end = b + 1;
  294. }
  295. }
  296. if (in_run)
  297. fn(sm, begin, end);
  298. }
  299. static void data_block_inc(void *context, const void *value_le, unsigned int count)
  300. {
  301. with_runs((struct dm_space_map *) context,
  302. (const __le64 *) value_le, count, dm_sm_inc_blocks);
  303. }
  304. static void data_block_dec(void *context, const void *value_le, unsigned int count)
  305. {
  306. with_runs((struct dm_space_map *) context,
  307. (const __le64 *) value_le, count, dm_sm_dec_blocks);
  308. }
  309. static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
  310. {
  311. __le64 v1_le, v2_le;
  312. uint64_t b1, b2;
  313. uint32_t t;
  314. memcpy(&v1_le, value1_le, sizeof(v1_le));
  315. memcpy(&v2_le, value2_le, sizeof(v2_le));
  316. unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
  317. unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
  318. return b1 == b2;
  319. }
  320. static void subtree_inc(void *context, const void *value, unsigned int count)
  321. {
  322. struct dm_btree_info *info = context;
  323. const __le64 *root_le = value;
  324. unsigned int i;
  325. for (i = 0; i < count; i++, root_le++)
  326. dm_tm_inc(info->tm, le64_to_cpu(*root_le));
  327. }
  328. static void subtree_dec(void *context, const void *value, unsigned int count)
  329. {
  330. struct dm_btree_info *info = context;
  331. const __le64 *root_le = value;
  332. unsigned int i;
  333. for (i = 0; i < count; i++, root_le++)
  334. if (dm_btree_del(info, le64_to_cpu(*root_le)))
  335. DMERR("btree delete failed");
  336. }
  337. static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
  338. {
  339. __le64 v1_le, v2_le;
  340. memcpy(&v1_le, value1_le, sizeof(v1_le));
  341. memcpy(&v2_le, value2_le, sizeof(v2_le));
  342. return v1_le == v2_le;
  343. }
  344. /*----------------------------------------------------------------*/
  345. /*
  346. * Variant that is used for in-core only changes or code that
  347. * shouldn't put the pool in service on its own (e.g. commit).
  348. */
  349. static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd)
  350. __acquires(pmd->root_lock)
  351. {
  352. down_write(&pmd->root_lock);
  353. }
  354. static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
  355. {
  356. pmd_write_lock_in_core(pmd);
  357. if (unlikely(!pmd->in_service))
  358. pmd->in_service = true;
  359. }
  360. static inline void pmd_write_unlock(struct dm_pool_metadata *pmd)
  361. __releases(pmd->root_lock)
  362. {
  363. up_write(&pmd->root_lock);
  364. }
  365. /*----------------------------------------------------------------*/
  366. static int superblock_lock_zero(struct dm_pool_metadata *pmd,
  367. struct dm_block **sblock)
  368. {
  369. return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
  370. &sb_validator, sblock);
  371. }
  372. static int superblock_lock(struct dm_pool_metadata *pmd,
  373. struct dm_block **sblock)
  374. {
  375. return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
  376. &sb_validator, sblock);
  377. }
  378. static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
  379. {
  380. int r;
  381. unsigned int i;
  382. struct dm_block *b;
  383. __le64 *data_le, zero = cpu_to_le64(0);
  384. unsigned int block_size = dm_bm_block_size(bm) / sizeof(__le64);
  385. /*
  386. * We can't use a validator here - it may be all zeroes.
  387. */
  388. r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
  389. if (r)
  390. return r;
  391. data_le = dm_block_data(b);
  392. *result = 1;
  393. for (i = 0; i < block_size; i++) {
  394. if (data_le[i] != zero) {
  395. *result = 0;
  396. break;
  397. }
  398. }
  399. dm_bm_unlock(b);
  400. return 0;
  401. }
  402. static void __setup_btree_details(struct dm_pool_metadata *pmd)
  403. {
  404. pmd->info.tm = pmd->tm;
  405. pmd->info.levels = 2;
  406. pmd->info.value_type.context = pmd->data_sm;
  407. pmd->info.value_type.size = sizeof(__le64);
  408. pmd->info.value_type.inc = data_block_inc;
  409. pmd->info.value_type.dec = data_block_dec;
  410. pmd->info.value_type.equal = data_block_equal;
  411. memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
  412. pmd->nb_info.tm = pmd->nb_tm;
  413. pmd->tl_info.tm = pmd->tm;
  414. pmd->tl_info.levels = 1;
  415. pmd->tl_info.value_type.context = &pmd->bl_info;
  416. pmd->tl_info.value_type.size = sizeof(__le64);
  417. pmd->tl_info.value_type.inc = subtree_inc;
  418. pmd->tl_info.value_type.dec = subtree_dec;
  419. pmd->tl_info.value_type.equal = subtree_equal;
  420. pmd->bl_info.tm = pmd->tm;
  421. pmd->bl_info.levels = 1;
  422. pmd->bl_info.value_type.context = pmd->data_sm;
  423. pmd->bl_info.value_type.size = sizeof(__le64);
  424. pmd->bl_info.value_type.inc = data_block_inc;
  425. pmd->bl_info.value_type.dec = data_block_dec;
  426. pmd->bl_info.value_type.equal = data_block_equal;
  427. pmd->details_info.tm = pmd->tm;
  428. pmd->details_info.levels = 1;
  429. pmd->details_info.value_type.context = NULL;
  430. pmd->details_info.value_type.size = sizeof(struct disk_device_details);
  431. pmd->details_info.value_type.inc = NULL;
  432. pmd->details_info.value_type.dec = NULL;
  433. pmd->details_info.value_type.equal = NULL;
  434. }
  435. static int save_sm_roots(struct dm_pool_metadata *pmd)
  436. {
  437. int r;
  438. size_t len;
  439. r = dm_sm_root_size(pmd->metadata_sm, &len);
  440. if (r < 0)
  441. return r;
  442. r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len);
  443. if (r < 0)
  444. return r;
  445. r = dm_sm_root_size(pmd->data_sm, &len);
  446. if (r < 0)
  447. return r;
  448. return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len);
  449. }
  450. static void copy_sm_roots(struct dm_pool_metadata *pmd,
  451. struct thin_disk_superblock *disk)
  452. {
  453. memcpy(&disk->metadata_space_map_root,
  454. &pmd->metadata_space_map_root,
  455. sizeof(pmd->metadata_space_map_root));
  456. memcpy(&disk->data_space_map_root,
  457. &pmd->data_space_map_root,
  458. sizeof(pmd->data_space_map_root));
  459. }
  460. static int __write_initial_superblock(struct dm_pool_metadata *pmd)
  461. {
  462. int r;
  463. struct dm_block *sblock;
  464. struct thin_disk_superblock *disk_super;
  465. sector_t bdev_size = bdev_nr_sectors(pmd->bdev);
  466. if (bdev_size > THIN_METADATA_MAX_SECTORS)
  467. bdev_size = THIN_METADATA_MAX_SECTORS;
  468. r = dm_sm_commit(pmd->data_sm);
  469. if (r < 0)
  470. return r;
  471. r = dm_tm_pre_commit(pmd->tm);
  472. if (r < 0)
  473. return r;
  474. r = save_sm_roots(pmd);
  475. if (r < 0)
  476. return r;
  477. r = superblock_lock_zero(pmd, &sblock);
  478. if (r)
  479. return r;
  480. disk_super = dm_block_data(sblock);
  481. disk_super->flags = 0;
  482. memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
  483. disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
  484. disk_super->version = cpu_to_le32(THIN_VERSION);
  485. disk_super->time = 0;
  486. disk_super->trans_id = 0;
  487. disk_super->held_root = 0;
  488. copy_sm_roots(pmd, disk_super);
  489. disk_super->data_mapping_root = cpu_to_le64(pmd->root);
  490. disk_super->device_details_root = cpu_to_le64(pmd->details_root);
  491. disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
  492. disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
  493. disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
  494. return dm_tm_commit(pmd->tm, sblock);
  495. }
  496. static int __format_metadata(struct dm_pool_metadata *pmd)
  497. {
  498. int r;
  499. r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
  500. &pmd->tm, &pmd->metadata_sm);
  501. if (r < 0) {
  502. DMERR("tm_create_with_sm failed");
  503. return r;
  504. }
  505. pmd->data_sm = dm_sm_disk_create(pmd->tm, 0);
  506. if (IS_ERR(pmd->data_sm)) {
  507. DMERR("sm_disk_create failed");
  508. r = PTR_ERR(pmd->data_sm);
  509. goto bad_cleanup_tm;
  510. }
  511. pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
  512. if (!pmd->nb_tm) {
  513. DMERR("could not create non-blocking clone tm");
  514. r = -ENOMEM;
  515. goto bad_cleanup_data_sm;
  516. }
  517. __setup_btree_details(pmd);
  518. r = dm_btree_empty(&pmd->info, &pmd->root);
  519. if (r < 0)
  520. goto bad_cleanup_nb_tm;
  521. r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
  522. if (r < 0) {
  523. DMERR("couldn't create devices root");
  524. goto bad_cleanup_nb_tm;
  525. }
  526. r = __write_initial_superblock(pmd);
  527. if (r)
  528. goto bad_cleanup_nb_tm;
  529. return 0;
  530. bad_cleanup_nb_tm:
  531. dm_tm_destroy(pmd->nb_tm);
  532. bad_cleanup_data_sm:
  533. dm_sm_destroy(pmd->data_sm);
  534. bad_cleanup_tm:
  535. dm_tm_destroy(pmd->tm);
  536. dm_sm_destroy(pmd->metadata_sm);
  537. return r;
  538. }
  539. static int __check_incompat_features(struct thin_disk_superblock *disk_super,
  540. struct dm_pool_metadata *pmd)
  541. {
  542. uint32_t features;
  543. features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
  544. if (features) {
  545. DMERR("could not access metadata due to unsupported optional features (%lx).",
  546. (unsigned long)features);
  547. return -EINVAL;
  548. }
  549. /*
  550. * Check for read-only metadata to skip the following RDWR checks.
  551. */
  552. if (bdev_read_only(pmd->bdev))
  553. return 0;
  554. features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
  555. if (features) {
  556. DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
  557. (unsigned long)features);
  558. return -EINVAL;
  559. }
  560. return 0;
  561. }
  562. static int __open_metadata(struct dm_pool_metadata *pmd)
  563. {
  564. int r;
  565. struct dm_block *sblock;
  566. struct thin_disk_superblock *disk_super;
  567. r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
  568. &sb_validator, &sblock);
  569. if (r < 0) {
  570. DMERR("couldn't read superblock");
  571. return r;
  572. }
  573. disk_super = dm_block_data(sblock);
  574. /* Verify the data block size hasn't changed */
  575. if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
  576. DMERR("changing the data block size (from %u to %llu) is not supported",
  577. le32_to_cpu(disk_super->data_block_size),
  578. (unsigned long long)pmd->data_block_size);
  579. r = -EINVAL;
  580. goto bad_unlock_sblock;
  581. }
  582. r = __check_incompat_features(disk_super, pmd);
  583. if (r < 0)
  584. goto bad_unlock_sblock;
  585. r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
  586. disk_super->metadata_space_map_root,
  587. sizeof(disk_super->metadata_space_map_root),
  588. &pmd->tm, &pmd->metadata_sm);
  589. if (r < 0) {
  590. DMERR("tm_open_with_sm failed");
  591. goto bad_unlock_sblock;
  592. }
  593. pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root,
  594. sizeof(disk_super->data_space_map_root));
  595. if (IS_ERR(pmd->data_sm)) {
  596. DMERR("sm_disk_open failed");
  597. r = PTR_ERR(pmd->data_sm);
  598. goto bad_cleanup_tm;
  599. }
  600. pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
  601. if (!pmd->nb_tm) {
  602. DMERR("could not create non-blocking clone tm");
  603. r = -ENOMEM;
  604. goto bad_cleanup_data_sm;
  605. }
  606. /*
  607. * For pool metadata opening process, root setting is redundant
  608. * because it will be set again in __begin_transaction(). But dm
  609. * pool aborting process really needs to get last transaction's
  610. * root to avoid accessing broken btree.
  611. */
  612. pmd->root = le64_to_cpu(disk_super->data_mapping_root);
  613. pmd->details_root = le64_to_cpu(disk_super->device_details_root);
  614. __setup_btree_details(pmd);
  615. dm_bm_unlock(sblock);
  616. return 0;
  617. bad_cleanup_data_sm:
  618. dm_sm_destroy(pmd->data_sm);
  619. bad_cleanup_tm:
  620. dm_tm_destroy(pmd->tm);
  621. dm_sm_destroy(pmd->metadata_sm);
  622. bad_unlock_sblock:
  623. dm_bm_unlock(sblock);
  624. return r;
  625. }
  626. static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device)
  627. {
  628. int r, unformatted;
  629. r = __superblock_all_zeroes(pmd->bm, &unformatted);
  630. if (r)
  631. return r;
  632. if (unformatted)
  633. return format_device ? __format_metadata(pmd) : -EPERM;
  634. return __open_metadata(pmd);
  635. }
  636. static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device)
  637. {
  638. int r;
  639. pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
  640. THIN_MAX_CONCURRENT_LOCKS);
  641. if (IS_ERR(pmd->bm)) {
  642. DMERR("could not create block manager");
  643. r = PTR_ERR(pmd->bm);
  644. pmd->bm = NULL;
  645. return r;
  646. }
  647. r = __open_or_format_metadata(pmd, format_device);
  648. if (r) {
  649. dm_block_manager_destroy(pmd->bm);
  650. pmd->bm = NULL;
  651. }
  652. return r;
  653. }
  654. static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
  655. bool destroy_bm)
  656. {
  657. dm_sm_destroy(pmd->data_sm);
  658. dm_sm_destroy(pmd->metadata_sm);
  659. dm_tm_destroy(pmd->nb_tm);
  660. dm_tm_destroy(pmd->tm);
  661. if (destroy_bm)
  662. dm_block_manager_destroy(pmd->bm);
  663. }
  664. static int __begin_transaction(struct dm_pool_metadata *pmd)
  665. {
  666. int r;
  667. struct thin_disk_superblock *disk_super;
  668. struct dm_block *sblock;
  669. /*
  670. * We re-read the superblock every time. Shouldn't need to do this
  671. * really.
  672. */
  673. r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
  674. &sb_validator, &sblock);
  675. if (r)
  676. return r;
  677. disk_super = dm_block_data(sblock);
  678. pmd->time = le32_to_cpu(disk_super->time);
  679. pmd->root = le64_to_cpu(disk_super->data_mapping_root);
  680. pmd->details_root = le64_to_cpu(disk_super->device_details_root);
  681. pmd->trans_id = le64_to_cpu(disk_super->trans_id);
  682. pmd->flags = le32_to_cpu(disk_super->flags);
  683. pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
  684. dm_bm_unlock(sblock);
  685. return 0;
  686. }
  687. static int __write_changed_details(struct dm_pool_metadata *pmd)
  688. {
  689. int r;
  690. struct dm_thin_device *td, *tmp;
  691. struct disk_device_details details;
  692. uint64_t key;
  693. list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
  694. if (!td->changed)
  695. continue;
  696. key = td->id;
  697. details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
  698. details.transaction_id = cpu_to_le64(td->transaction_id);
  699. details.creation_time = cpu_to_le32(td->creation_time);
  700. details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
  701. __dm_bless_for_disk(&details);
  702. r = dm_btree_insert(&pmd->details_info, pmd->details_root,
  703. &key, &details, &pmd->details_root);
  704. if (r)
  705. return r;
  706. if (td->open_count)
  707. td->changed = false;
  708. else {
  709. list_del(&td->list);
  710. kfree(td);
  711. }
  712. }
  713. return 0;
  714. }
  715. static int __commit_transaction(struct dm_pool_metadata *pmd)
  716. {
  717. int r;
  718. struct thin_disk_superblock *disk_super;
  719. struct dm_block *sblock;
  720. /*
  721. * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
  722. */
  723. BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
  724. BUG_ON(!rwsem_is_locked(&pmd->root_lock));
  725. if (unlikely(!pmd->in_service))
  726. return 0;
  727. if (pmd->pre_commit_fn) {
  728. r = pmd->pre_commit_fn(pmd->pre_commit_context);
  729. if (r < 0) {
  730. DMERR("pre-commit callback failed");
  731. return r;
  732. }
  733. }
  734. r = __write_changed_details(pmd);
  735. if (r < 0)
  736. return r;
  737. r = dm_sm_commit(pmd->data_sm);
  738. if (r < 0)
  739. return r;
  740. r = dm_tm_pre_commit(pmd->tm);
  741. if (r < 0)
  742. return r;
  743. r = save_sm_roots(pmd);
  744. if (r < 0)
  745. return r;
  746. r = superblock_lock(pmd, &sblock);
  747. if (r)
  748. return r;
  749. disk_super = dm_block_data(sblock);
  750. disk_super->time = cpu_to_le32(pmd->time);
  751. disk_super->data_mapping_root = cpu_to_le64(pmd->root);
  752. disk_super->device_details_root = cpu_to_le64(pmd->details_root);
  753. disk_super->trans_id = cpu_to_le64(pmd->trans_id);
  754. disk_super->flags = cpu_to_le32(pmd->flags);
  755. copy_sm_roots(pmd, disk_super);
  756. return dm_tm_commit(pmd->tm, sblock);
  757. }
  758. static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
  759. {
  760. int r;
  761. dm_block_t total;
  762. dm_block_t max_blocks = 4096; /* 16M */
  763. r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
  764. if (r) {
  765. DMERR("could not get size of metadata device");
  766. pmd->metadata_reserve = max_blocks;
  767. } else
  768. pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
  769. }
  770. struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
  771. sector_t data_block_size,
  772. bool format_device)
  773. {
  774. int r;
  775. struct dm_pool_metadata *pmd;
  776. pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
  777. if (!pmd) {
  778. DMERR("could not allocate metadata struct");
  779. return ERR_PTR(-ENOMEM);
  780. }
  781. init_rwsem(&pmd->root_lock);
  782. pmd->time = 0;
  783. INIT_LIST_HEAD(&pmd->thin_devices);
  784. pmd->fail_io = false;
  785. pmd->in_service = false;
  786. pmd->bdev = bdev;
  787. pmd->data_block_size = data_block_size;
  788. pmd->pre_commit_fn = NULL;
  789. pmd->pre_commit_context = NULL;
  790. r = __create_persistent_data_objects(pmd, format_device);
  791. if (r) {
  792. kfree(pmd);
  793. return ERR_PTR(r);
  794. }
  795. r = __begin_transaction(pmd);
  796. if (r < 0) {
  797. if (dm_pool_metadata_close(pmd) < 0)
  798. DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
  799. return ERR_PTR(r);
  800. }
  801. __set_metadata_reserve(pmd);
  802. return pmd;
  803. }
  804. int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
  805. {
  806. int r;
  807. unsigned int open_devices = 0;
  808. struct dm_thin_device *td, *tmp;
  809. down_read(&pmd->root_lock);
  810. list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
  811. if (td->open_count)
  812. open_devices++;
  813. else {
  814. list_del(&td->list);
  815. kfree(td);
  816. }
  817. }
  818. up_read(&pmd->root_lock);
  819. if (open_devices) {
  820. DMERR("attempt to close pmd when %u device(s) are still open",
  821. open_devices);
  822. return -EBUSY;
  823. }
  824. pmd_write_lock_in_core(pmd);
  825. if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
  826. r = __commit_transaction(pmd);
  827. if (r < 0)
  828. DMWARN("%s: __commit_transaction() failed, error = %d",
  829. __func__, r);
  830. }
  831. pmd_write_unlock(pmd);
  832. if (!pmd->fail_io)
  833. __destroy_persistent_data_objects(pmd, true);
  834. kfree(pmd);
  835. return 0;
  836. }
  837. /*
  838. * __open_device: Returns @td corresponding to device with id @dev,
  839. * creating it if @create is set and incrementing @td->open_count.
  840. * On failure, @td is undefined.
  841. */
  842. static int __open_device(struct dm_pool_metadata *pmd,
  843. dm_thin_id dev, int create,
  844. struct dm_thin_device **td)
  845. {
  846. int r, changed = 0;
  847. struct dm_thin_device *td2;
  848. uint64_t key = dev;
  849. struct disk_device_details details_le;
  850. /*
  851. * If the device is already open, return it.
  852. */
  853. list_for_each_entry(td2, &pmd->thin_devices, list)
  854. if (td2->id == dev) {
  855. /*
  856. * May not create an already-open device.
  857. */
  858. if (create)
  859. return -EEXIST;
  860. td2->open_count++;
  861. *td = td2;
  862. return 0;
  863. }
  864. /*
  865. * Check the device exists.
  866. */
  867. r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
  868. &key, &details_le);
  869. if (r) {
  870. if (r != -ENODATA || !create)
  871. return r;
  872. /*
  873. * Create new device.
  874. */
  875. changed = 1;
  876. details_le.mapped_blocks = 0;
  877. details_le.transaction_id = cpu_to_le64(pmd->trans_id);
  878. details_le.creation_time = cpu_to_le32(pmd->time);
  879. details_le.snapshotted_time = cpu_to_le32(pmd->time);
  880. }
  881. *td = kmalloc(sizeof(**td), GFP_NOIO);
  882. if (!*td)
  883. return -ENOMEM;
  884. (*td)->pmd = pmd;
  885. (*td)->id = dev;
  886. (*td)->open_count = 1;
  887. (*td)->changed = changed;
  888. (*td)->aborted_with_changes = false;
  889. (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
  890. (*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
  891. (*td)->creation_time = le32_to_cpu(details_le.creation_time);
  892. (*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
  893. list_add(&(*td)->list, &pmd->thin_devices);
  894. return 0;
  895. }
  896. static void __close_device(struct dm_thin_device *td)
  897. {
  898. --td->open_count;
  899. }
  900. static int __create_thin(struct dm_pool_metadata *pmd,
  901. dm_thin_id dev)
  902. {
  903. int r;
  904. dm_block_t dev_root;
  905. uint64_t key = dev;
  906. struct dm_thin_device *td;
  907. __le64 value;
  908. r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
  909. &key, NULL);
  910. if (!r)
  911. return -EEXIST;
  912. /*
  913. * Create an empty btree for the mappings.
  914. */
  915. r = dm_btree_empty(&pmd->bl_info, &dev_root);
  916. if (r)
  917. return r;
  918. /*
  919. * Insert it into the main mapping tree.
  920. */
  921. value = cpu_to_le64(dev_root);
  922. __dm_bless_for_disk(&value);
  923. r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
  924. if (r) {
  925. dm_btree_del(&pmd->bl_info, dev_root);
  926. return r;
  927. }
  928. r = __open_device(pmd, dev, 1, &td);
  929. if (r) {
  930. dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
  931. dm_btree_del(&pmd->bl_info, dev_root);
  932. return r;
  933. }
  934. __close_device(td);
  935. return r;
  936. }
  937. int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
  938. {
  939. int r = -EINVAL;
  940. pmd_write_lock(pmd);
  941. if (!pmd->fail_io)
  942. r = __create_thin(pmd, dev);
  943. pmd_write_unlock(pmd);
  944. return r;
  945. }
  946. static int __set_snapshot_details(struct dm_pool_metadata *pmd,
  947. struct dm_thin_device *snap,
  948. dm_thin_id origin, uint32_t time)
  949. {
  950. int r;
  951. struct dm_thin_device *td;
  952. r = __open_device(pmd, origin, 0, &td);
  953. if (r)
  954. return r;
  955. td->changed = true;
  956. td->snapshotted_time = time;
  957. snap->mapped_blocks = td->mapped_blocks;
  958. snap->snapshotted_time = time;
  959. __close_device(td);
  960. return 0;
  961. }
  962. static int __create_snap(struct dm_pool_metadata *pmd,
  963. dm_thin_id dev, dm_thin_id origin)
  964. {
  965. int r;
  966. dm_block_t origin_root;
  967. uint64_t key = origin, dev_key = dev;
  968. struct dm_thin_device *td;
  969. __le64 value;
  970. /* check this device is unused */
  971. r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
  972. &dev_key, NULL);
  973. if (!r)
  974. return -EEXIST;
  975. /* find the mapping tree for the origin */
  976. r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
  977. if (r)
  978. return r;
  979. origin_root = le64_to_cpu(value);
  980. /* clone the origin, an inc will do */
  981. dm_tm_inc(pmd->tm, origin_root);
  982. /* insert into the main mapping tree */
  983. value = cpu_to_le64(origin_root);
  984. __dm_bless_for_disk(&value);
  985. key = dev;
  986. r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
  987. if (r) {
  988. dm_tm_dec(pmd->tm, origin_root);
  989. return r;
  990. }
  991. pmd->time++;
  992. r = __open_device(pmd, dev, 1, &td);
  993. if (r)
  994. goto bad;
  995. r = __set_snapshot_details(pmd, td, origin, pmd->time);
  996. __close_device(td);
  997. if (r)
  998. goto bad;
  999. return 0;
  1000. bad:
  1001. dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
  1002. dm_btree_remove(&pmd->details_info, pmd->details_root,
  1003. &key, &pmd->details_root);
  1004. return r;
  1005. }
  1006. int dm_pool_create_snap(struct dm_pool_metadata *pmd,
  1007. dm_thin_id dev,
  1008. dm_thin_id origin)
  1009. {
  1010. int r = -EINVAL;
  1011. pmd_write_lock(pmd);
  1012. if (!pmd->fail_io)
  1013. r = __create_snap(pmd, dev, origin);
  1014. pmd_write_unlock(pmd);
  1015. return r;
  1016. }
  1017. static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
  1018. {
  1019. int r;
  1020. uint64_t key = dev;
  1021. struct dm_thin_device *td;
  1022. /* TODO: failure should mark the transaction invalid */
  1023. r = __open_device(pmd, dev, 0, &td);
  1024. if (r)
  1025. return r;
  1026. if (td->open_count > 1) {
  1027. __close_device(td);
  1028. return -EBUSY;
  1029. }
  1030. list_del(&td->list);
  1031. kfree(td);
  1032. r = dm_btree_remove(&pmd->details_info, pmd->details_root,
  1033. &key, &pmd->details_root);
  1034. if (r)
  1035. return r;
  1036. r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
  1037. if (r)
  1038. return r;
  1039. return 0;
  1040. }
  1041. int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
  1042. dm_thin_id dev)
  1043. {
  1044. int r = -EINVAL;
  1045. pmd_write_lock(pmd);
  1046. if (!pmd->fail_io)
  1047. r = __delete_device(pmd, dev);
  1048. pmd_write_unlock(pmd);
  1049. return r;
  1050. }
  1051. int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
  1052. uint64_t current_id,
  1053. uint64_t new_id)
  1054. {
  1055. int r = -EINVAL;
  1056. pmd_write_lock(pmd);
  1057. if (pmd->fail_io)
  1058. goto out;
  1059. if (pmd->trans_id != current_id) {
  1060. DMERR("mismatched transaction id");
  1061. goto out;
  1062. }
  1063. pmd->trans_id = new_id;
  1064. r = 0;
  1065. out:
  1066. pmd_write_unlock(pmd);
  1067. return r;
  1068. }
  1069. int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
  1070. uint64_t *result)
  1071. {
  1072. int r = -EINVAL;
  1073. down_read(&pmd->root_lock);
  1074. if (!pmd->fail_io) {
  1075. *result = pmd->trans_id;
  1076. r = 0;
  1077. }
  1078. up_read(&pmd->root_lock);
  1079. return r;
  1080. }
  1081. static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
  1082. {
  1083. int r, inc;
  1084. struct thin_disk_superblock *disk_super;
  1085. struct dm_block *copy, *sblock;
  1086. dm_block_t held_root;
  1087. /*
  1088. * We commit to ensure the btree roots which we increment in a
  1089. * moment are up to date.
  1090. */
  1091. r = __commit_transaction(pmd);
  1092. if (r < 0) {
  1093. DMWARN("%s: __commit_transaction() failed, error = %d",
  1094. __func__, r);
  1095. return r;
  1096. }
  1097. /*
  1098. * Copy the superblock.
  1099. */
  1100. dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
  1101. r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
  1102. &sb_validator, &copy, &inc);
  1103. if (r)
  1104. return r;
  1105. BUG_ON(!inc);
  1106. held_root = dm_block_location(copy);
  1107. disk_super = dm_block_data(copy);
  1108. if (le64_to_cpu(disk_super->held_root)) {
  1109. DMWARN("Pool metadata snapshot already exists: release this before taking another.");
  1110. dm_tm_dec(pmd->tm, held_root);
  1111. dm_tm_unlock(pmd->tm, copy);
  1112. return -EBUSY;
  1113. }
  1114. /*
  1115. * Wipe the spacemap since we're not publishing this.
  1116. */
  1117. memset(&disk_super->data_space_map_root, 0,
  1118. sizeof(disk_super->data_space_map_root));
  1119. memset(&disk_super->metadata_space_map_root, 0,
  1120. sizeof(disk_super->metadata_space_map_root));
  1121. /*
  1122. * Increment the data structures that need to be preserved.
  1123. */
  1124. dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
  1125. dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
  1126. dm_tm_unlock(pmd->tm, copy);
  1127. /*
  1128. * Write the held root into the superblock.
  1129. */
  1130. r = superblock_lock(pmd, &sblock);
  1131. if (r) {
  1132. dm_tm_dec(pmd->tm, held_root);
  1133. return r;
  1134. }
  1135. disk_super = dm_block_data(sblock);
  1136. disk_super->held_root = cpu_to_le64(held_root);
  1137. dm_bm_unlock(sblock);
  1138. return 0;
  1139. }
  1140. int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
  1141. {
  1142. int r = -EINVAL;
  1143. pmd_write_lock(pmd);
  1144. if (!pmd->fail_io)
  1145. r = __reserve_metadata_snap(pmd);
  1146. pmd_write_unlock(pmd);
  1147. return r;
  1148. }
  1149. static int __release_metadata_snap(struct dm_pool_metadata *pmd)
  1150. {
  1151. int r;
  1152. struct thin_disk_superblock *disk_super;
  1153. struct dm_block *sblock, *copy;
  1154. dm_block_t held_root;
  1155. r = superblock_lock(pmd, &sblock);
  1156. if (r)
  1157. return r;
  1158. disk_super = dm_block_data(sblock);
  1159. held_root = le64_to_cpu(disk_super->held_root);
  1160. disk_super->held_root = cpu_to_le64(0);
  1161. dm_bm_unlock(sblock);
  1162. if (!held_root) {
  1163. DMWARN("No pool metadata snapshot found: nothing to release.");
  1164. return -EINVAL;
  1165. }
  1166. r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
  1167. if (r)
  1168. return r;
  1169. disk_super = dm_block_data(copy);
  1170. dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
  1171. dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
  1172. dm_sm_dec_block(pmd->metadata_sm, held_root);
  1173. dm_tm_unlock(pmd->tm, copy);
  1174. return 0;
  1175. }
  1176. int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
  1177. {
  1178. int r = -EINVAL;
  1179. pmd_write_lock(pmd);
  1180. if (!pmd->fail_io)
  1181. r = __release_metadata_snap(pmd);
  1182. pmd_write_unlock(pmd);
  1183. return r;
  1184. }
  1185. static int __get_metadata_snap(struct dm_pool_metadata *pmd,
  1186. dm_block_t *result)
  1187. {
  1188. int r;
  1189. struct thin_disk_superblock *disk_super;
  1190. struct dm_block *sblock;
  1191. r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
  1192. &sb_validator, &sblock);
  1193. if (r)
  1194. return r;
  1195. disk_super = dm_block_data(sblock);
  1196. *result = le64_to_cpu(disk_super->held_root);
  1197. dm_bm_unlock(sblock);
  1198. return 0;
  1199. }
  1200. int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
  1201. dm_block_t *result)
  1202. {
  1203. int r = -EINVAL;
  1204. down_read(&pmd->root_lock);
  1205. if (!pmd->fail_io)
  1206. r = __get_metadata_snap(pmd, result);
  1207. up_read(&pmd->root_lock);
  1208. return r;
  1209. }
  1210. int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
  1211. struct dm_thin_device **td)
  1212. {
  1213. int r = -EINVAL;
  1214. pmd_write_lock_in_core(pmd);
  1215. if (!pmd->fail_io)
  1216. r = __open_device(pmd, dev, 0, td);
  1217. pmd_write_unlock(pmd);
  1218. return r;
  1219. }
  1220. int dm_pool_close_thin_device(struct dm_thin_device *td)
  1221. {
  1222. pmd_write_lock_in_core(td->pmd);
  1223. __close_device(td);
  1224. pmd_write_unlock(td->pmd);
  1225. return 0;
  1226. }
  1227. dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
  1228. {
  1229. return td->id;
  1230. }
  1231. /*
  1232. * Check whether @time (of block creation) is older than @td's last snapshot.
  1233. * If so then the associated block is shared with the last snapshot device.
  1234. * Any block on a device created *after* the device last got snapshotted is
  1235. * necessarily not shared.
  1236. */
  1237. static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
  1238. {
  1239. return td->snapshotted_time > time;
  1240. }
  1241. static void unpack_lookup_result(struct dm_thin_device *td, __le64 value,
  1242. struct dm_thin_lookup_result *result)
  1243. {
  1244. uint64_t block_time = 0;
  1245. dm_block_t exception_block;
  1246. uint32_t exception_time;
  1247. block_time = le64_to_cpu(value);
  1248. unpack_block_time(block_time, &exception_block, &exception_time);
  1249. result->block = exception_block;
  1250. result->shared = __snapshotted_since(td, exception_time);
  1251. }
  1252. static int __find_block(struct dm_thin_device *td, dm_block_t block,
  1253. int can_issue_io, struct dm_thin_lookup_result *result)
  1254. {
  1255. int r;
  1256. __le64 value;
  1257. struct dm_pool_metadata *pmd = td->pmd;
  1258. dm_block_t keys[2] = { td->id, block };
  1259. struct dm_btree_info *info;
  1260. if (can_issue_io) {
  1261. info = &pmd->info;
  1262. } else
  1263. info = &pmd->nb_info;
  1264. r = dm_btree_lookup(info, pmd->root, keys, &value);
  1265. if (!r)
  1266. unpack_lookup_result(td, value, result);
  1267. return r;
  1268. }
  1269. int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
  1270. int can_issue_io, struct dm_thin_lookup_result *result)
  1271. {
  1272. int r;
  1273. struct dm_pool_metadata *pmd = td->pmd;
  1274. down_read(&pmd->root_lock);
  1275. if (pmd->fail_io) {
  1276. up_read(&pmd->root_lock);
  1277. return -EINVAL;
  1278. }
  1279. r = __find_block(td, block, can_issue_io, result);
  1280. up_read(&pmd->root_lock);
  1281. return r;
  1282. }
  1283. static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block,
  1284. dm_block_t *vblock,
  1285. struct dm_thin_lookup_result *result)
  1286. {
  1287. int r;
  1288. __le64 value;
  1289. struct dm_pool_metadata *pmd = td->pmd;
  1290. dm_block_t keys[2] = { td->id, block };
  1291. r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value);
  1292. if (!r)
  1293. unpack_lookup_result(td, value, result);
  1294. return r;
  1295. }
  1296. static int __find_mapped_range(struct dm_thin_device *td,
  1297. dm_block_t begin, dm_block_t end,
  1298. dm_block_t *thin_begin, dm_block_t *thin_end,
  1299. dm_block_t *pool_begin, bool *maybe_shared)
  1300. {
  1301. int r;
  1302. dm_block_t pool_end;
  1303. struct dm_thin_lookup_result lookup;
  1304. if (end < begin)
  1305. return -ENODATA;
  1306. r = __find_next_mapped_block(td, begin, &begin, &lookup);
  1307. if (r)
  1308. return r;
  1309. if (begin >= end)
  1310. return -ENODATA;
  1311. *thin_begin = begin;
  1312. *pool_begin = lookup.block;
  1313. *maybe_shared = lookup.shared;
  1314. begin++;
  1315. pool_end = *pool_begin + 1;
  1316. while (begin != end) {
  1317. r = __find_block(td, begin, true, &lookup);
  1318. if (r) {
  1319. if (r == -ENODATA)
  1320. break;
  1321. else
  1322. return r;
  1323. }
  1324. if ((lookup.block != pool_end) ||
  1325. (lookup.shared != *maybe_shared))
  1326. break;
  1327. pool_end++;
  1328. begin++;
  1329. }
  1330. *thin_end = begin;
  1331. return 0;
  1332. }
  1333. int dm_thin_find_mapped_range(struct dm_thin_device *td,
  1334. dm_block_t begin, dm_block_t end,
  1335. dm_block_t *thin_begin, dm_block_t *thin_end,
  1336. dm_block_t *pool_begin, bool *maybe_shared)
  1337. {
  1338. int r = -EINVAL;
  1339. struct dm_pool_metadata *pmd = td->pmd;
  1340. down_read(&pmd->root_lock);
  1341. if (!pmd->fail_io) {
  1342. r = __find_mapped_range(td, begin, end, thin_begin, thin_end,
  1343. pool_begin, maybe_shared);
  1344. }
  1345. up_read(&pmd->root_lock);
  1346. return r;
  1347. }
  1348. static int __insert(struct dm_thin_device *td, dm_block_t block,
  1349. dm_block_t data_block)
  1350. {
  1351. int r, inserted;
  1352. __le64 value;
  1353. struct dm_pool_metadata *pmd = td->pmd;
  1354. dm_block_t keys[2] = { td->id, block };
  1355. value = cpu_to_le64(pack_block_time(data_block, pmd->time));
  1356. __dm_bless_for_disk(&value);
  1357. r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
  1358. &pmd->root, &inserted);
  1359. if (r)
  1360. return r;
  1361. td->changed = true;
  1362. if (inserted)
  1363. td->mapped_blocks++;
  1364. return 0;
  1365. }
  1366. int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
  1367. dm_block_t data_block)
  1368. {
  1369. int r = -EINVAL;
  1370. pmd_write_lock(td->pmd);
  1371. if (!td->pmd->fail_io)
  1372. r = __insert(td, block, data_block);
  1373. pmd_write_unlock(td->pmd);
  1374. return r;
  1375. }
  1376. static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
  1377. {
  1378. int r;
  1379. unsigned int count, total_count = 0;
  1380. struct dm_pool_metadata *pmd = td->pmd;
  1381. dm_block_t keys[1] = { td->id };
  1382. __le64 value;
  1383. dm_block_t mapping_root;
  1384. /*
  1385. * Find the mapping tree
  1386. */
  1387. r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value);
  1388. if (r)
  1389. return r;
  1390. /*
  1391. * Remove from the mapping tree, taking care to inc the
  1392. * ref count so it doesn't get deleted.
  1393. */
  1394. mapping_root = le64_to_cpu(value);
  1395. dm_tm_inc(pmd->tm, mapping_root);
  1396. r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root);
  1397. if (r)
  1398. return r;
  1399. /*
  1400. * Remove leaves stops at the first unmapped entry, so we have to
  1401. * loop round finding mapped ranges.
  1402. */
  1403. while (begin < end) {
  1404. r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
  1405. if (r == -ENODATA)
  1406. break;
  1407. if (r)
  1408. return r;
  1409. if (begin >= end)
  1410. break;
  1411. r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
  1412. if (r)
  1413. return r;
  1414. total_count += count;
  1415. }
  1416. td->mapped_blocks -= total_count;
  1417. td->changed = true;
  1418. /*
  1419. * Reinsert the mapping tree.
  1420. */
  1421. value = cpu_to_le64(mapping_root);
  1422. __dm_bless_for_disk(&value);
  1423. return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
  1424. }
  1425. int dm_thin_remove_range(struct dm_thin_device *td,
  1426. dm_block_t begin, dm_block_t end)
  1427. {
  1428. int r = -EINVAL;
  1429. pmd_write_lock(td->pmd);
  1430. if (!td->pmd->fail_io)
  1431. r = __remove_range(td, begin, end);
  1432. pmd_write_unlock(td->pmd);
  1433. return r;
  1434. }
  1435. int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
  1436. {
  1437. int r = -EINVAL;
  1438. uint32_t ref_count;
  1439. down_read(&pmd->root_lock);
  1440. if (!pmd->fail_io) {
  1441. r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
  1442. if (!r)
  1443. *result = (ref_count > 1);
  1444. }
  1445. up_read(&pmd->root_lock);
  1446. return r;
  1447. }
  1448. int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
  1449. {
  1450. int r = -EINVAL;
  1451. pmd_write_lock(pmd);
  1452. if (!pmd->fail_io)
  1453. r = dm_sm_inc_blocks(pmd->data_sm, b, e);
  1454. pmd_write_unlock(pmd);
  1455. return r;
  1456. }
  1457. int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
  1458. {
  1459. int r = -EINVAL;
  1460. pmd_write_lock(pmd);
  1461. if (!pmd->fail_io)
  1462. r = dm_sm_dec_blocks(pmd->data_sm, b, e);
  1463. pmd_write_unlock(pmd);
  1464. return r;
  1465. }
  1466. bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
  1467. {
  1468. int r;
  1469. down_read(&td->pmd->root_lock);
  1470. r = td->changed;
  1471. up_read(&td->pmd->root_lock);
  1472. return r;
  1473. }
  1474. bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
  1475. {
  1476. bool r = false;
  1477. struct dm_thin_device *td, *tmp;
  1478. down_read(&pmd->root_lock);
  1479. list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
  1480. if (td->changed) {
  1481. r = td->changed;
  1482. break;
  1483. }
  1484. }
  1485. up_read(&pmd->root_lock);
  1486. return r;
  1487. }
  1488. bool dm_thin_aborted_changes(struct dm_thin_device *td)
  1489. {
  1490. bool r;
  1491. down_read(&td->pmd->root_lock);
  1492. r = td->aborted_with_changes;
  1493. up_read(&td->pmd->root_lock);
  1494. return r;
  1495. }
  1496. int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
  1497. {
  1498. int r = -EINVAL;
  1499. pmd_write_lock(pmd);
  1500. if (!pmd->fail_io)
  1501. r = dm_sm_new_block(pmd->data_sm, result);
  1502. pmd_write_unlock(pmd);
  1503. return r;
  1504. }
  1505. int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
  1506. {
  1507. int r = -EINVAL;
  1508. /*
  1509. * Care is taken to not have commit be what
  1510. * triggers putting the thin-pool in-service.
  1511. */
  1512. pmd_write_lock_in_core(pmd);
  1513. if (pmd->fail_io)
  1514. goto out;
  1515. r = __commit_transaction(pmd);
  1516. if (r < 0)
  1517. goto out;
  1518. /*
  1519. * Open the next transaction.
  1520. */
  1521. r = __begin_transaction(pmd);
  1522. out:
  1523. pmd_write_unlock(pmd);
  1524. return r;
  1525. }
  1526. static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
  1527. {
  1528. struct dm_thin_device *td;
  1529. list_for_each_entry(td, &pmd->thin_devices, list)
  1530. td->aborted_with_changes = td->changed;
  1531. }
  1532. int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
  1533. {
  1534. int r = -EINVAL;
  1535. struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
  1536. /* fail_io is double-checked with pmd->root_lock held below */
  1537. if (unlikely(pmd->fail_io))
  1538. return r;
  1539. /*
  1540. * Replacement block manager (new_bm) is created and old_bm destroyed outside of
  1541. * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
  1542. * shrinker associated with the block manager's bufio client vs pmd root_lock).
  1543. * - must take shrinker_rwsem without holding pmd->root_lock
  1544. */
  1545. new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
  1546. THIN_MAX_CONCURRENT_LOCKS);
  1547. pmd_write_lock(pmd);
  1548. if (pmd->fail_io) {
  1549. pmd_write_unlock(pmd);
  1550. goto out;
  1551. }
  1552. __set_abort_with_changes_flags(pmd);
  1553. __destroy_persistent_data_objects(pmd, false);
  1554. old_bm = pmd->bm;
  1555. if (IS_ERR(new_bm)) {
  1556. DMERR("could not create block manager during abort");
  1557. pmd->bm = NULL;
  1558. r = PTR_ERR(new_bm);
  1559. goto out_unlock;
  1560. }
  1561. pmd->bm = new_bm;
  1562. r = __open_or_format_metadata(pmd, false);
  1563. if (r) {
  1564. pmd->bm = NULL;
  1565. goto out_unlock;
  1566. }
  1567. new_bm = NULL;
  1568. out_unlock:
  1569. if (r)
  1570. pmd->fail_io = true;
  1571. pmd_write_unlock(pmd);
  1572. dm_block_manager_destroy(old_bm);
  1573. out:
  1574. if (new_bm && !IS_ERR(new_bm))
  1575. dm_block_manager_destroy(new_bm);
  1576. return r;
  1577. }
  1578. int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
  1579. {
  1580. int r = -EINVAL;
  1581. down_read(&pmd->root_lock);
  1582. if (!pmd->fail_io)
  1583. r = dm_sm_get_nr_free(pmd->data_sm, result);
  1584. up_read(&pmd->root_lock);
  1585. return r;
  1586. }
  1587. int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
  1588. dm_block_t *result)
  1589. {
  1590. int r = -EINVAL;
  1591. down_read(&pmd->root_lock);
  1592. if (!pmd->fail_io)
  1593. r = dm_sm_get_nr_free(pmd->metadata_sm, result);
  1594. if (!r) {
  1595. if (*result < pmd->metadata_reserve)
  1596. *result = 0;
  1597. else
  1598. *result -= pmd->metadata_reserve;
  1599. }
  1600. up_read(&pmd->root_lock);
  1601. return r;
  1602. }
  1603. int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
  1604. dm_block_t *result)
  1605. {
  1606. int r = -EINVAL;
  1607. down_read(&pmd->root_lock);
  1608. if (!pmd->fail_io)
  1609. r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
  1610. up_read(&pmd->root_lock);
  1611. return r;
  1612. }
  1613. int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
  1614. {
  1615. int r = -EINVAL;
  1616. down_read(&pmd->root_lock);
  1617. if (!pmd->fail_io)
  1618. r = dm_sm_get_nr_blocks(pmd->data_sm, result);
  1619. up_read(&pmd->root_lock);
  1620. return r;
  1621. }
  1622. int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
  1623. {
  1624. int r = -EINVAL;
  1625. struct dm_pool_metadata *pmd = td->pmd;
  1626. down_read(&pmd->root_lock);
  1627. if (!pmd->fail_io) {
  1628. *result = td->mapped_blocks;
  1629. r = 0;
  1630. }
  1631. up_read(&pmd->root_lock);
  1632. return r;
  1633. }
  1634. static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
  1635. {
  1636. int r;
  1637. __le64 value_le;
  1638. dm_block_t thin_root;
  1639. struct dm_pool_metadata *pmd = td->pmd;
  1640. r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
  1641. if (r)
  1642. return r;
  1643. thin_root = le64_to_cpu(value_le);
  1644. return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
  1645. }
  1646. int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
  1647. dm_block_t *result)
  1648. {
  1649. int r = -EINVAL;
  1650. struct dm_pool_metadata *pmd = td->pmd;
  1651. down_read(&pmd->root_lock);
  1652. if (!pmd->fail_io)
  1653. r = __highest_block(td, result);
  1654. up_read(&pmd->root_lock);
  1655. return r;
  1656. }
  1657. static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
  1658. {
  1659. int r;
  1660. dm_block_t old_count;
  1661. r = dm_sm_get_nr_blocks(sm, &old_count);
  1662. if (r)
  1663. return r;
  1664. if (new_count == old_count)
  1665. return 0;
  1666. if (new_count < old_count) {
  1667. DMERR("cannot reduce size of space map");
  1668. return -EINVAL;
  1669. }
  1670. return dm_sm_extend(sm, new_count - old_count);
  1671. }
  1672. int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
  1673. {
  1674. int r = -EINVAL;
  1675. pmd_write_lock(pmd);
  1676. if (!pmd->fail_io)
  1677. r = __resize_space_map(pmd->data_sm, new_count);
  1678. pmd_write_unlock(pmd);
  1679. return r;
  1680. }
  1681. int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
  1682. {
  1683. int r = -EINVAL;
  1684. pmd_write_lock(pmd);
  1685. if (!pmd->fail_io) {
  1686. r = __resize_space_map(pmd->metadata_sm, new_count);
  1687. if (!r)
  1688. __set_metadata_reserve(pmd);
  1689. }
  1690. pmd_write_unlock(pmd);
  1691. return r;
  1692. }
  1693. void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
  1694. {
  1695. pmd_write_lock_in_core(pmd);
  1696. dm_bm_set_read_only(pmd->bm);
  1697. pmd_write_unlock(pmd);
  1698. }
  1699. void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
  1700. {
  1701. pmd_write_lock_in_core(pmd);
  1702. dm_bm_set_read_write(pmd->bm);
  1703. pmd_write_unlock(pmd);
  1704. }
  1705. int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
  1706. dm_block_t threshold,
  1707. dm_sm_threshold_fn fn,
  1708. void *context)
  1709. {
  1710. int r = -EINVAL;
  1711. pmd_write_lock_in_core(pmd);
  1712. if (!pmd->fail_io) {
  1713. r = dm_sm_register_threshold_callback(pmd->metadata_sm,
  1714. threshold, fn, context);
  1715. }
  1716. pmd_write_unlock(pmd);
  1717. return r;
  1718. }
  1719. void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd,
  1720. dm_pool_pre_commit_fn fn,
  1721. void *context)
  1722. {
  1723. pmd_write_lock_in_core(pmd);
  1724. pmd->pre_commit_fn = fn;
  1725. pmd->pre_commit_context = context;
  1726. pmd_write_unlock(pmd);
  1727. }
  1728. int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
  1729. {
  1730. int r = -EINVAL;
  1731. struct dm_block *sblock;
  1732. struct thin_disk_superblock *disk_super;
  1733. pmd_write_lock(pmd);
  1734. if (pmd->fail_io)
  1735. goto out;
  1736. pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
  1737. r = superblock_lock(pmd, &sblock);
  1738. if (r) {
  1739. DMERR("couldn't lock superblock");
  1740. goto out;
  1741. }
  1742. disk_super = dm_block_data(sblock);
  1743. disk_super->flags = cpu_to_le32(pmd->flags);
  1744. dm_bm_unlock(sblock);
  1745. out:
  1746. pmd_write_unlock(pmd);
  1747. return r;
  1748. }
  1749. bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
  1750. {
  1751. bool needs_check;
  1752. down_read(&pmd->root_lock);
  1753. needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
  1754. up_read(&pmd->root_lock);
  1755. return needs_check;
  1756. }
  1757. void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
  1758. {
  1759. down_read(&pmd->root_lock);
  1760. if (!pmd->fail_io)
  1761. dm_tm_issue_prefetches(pmd->tm);
  1762. up_read(&pmd->root_lock);
  1763. }