dm-space-map-common.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259
  1. /*
  2. * Copyright (C) 2011 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-space-map-common.h"
  7. #include "dm-transaction-manager.h"
  8. #include "dm-btree-internal.h"
  9. #include "dm-persistent-data-internal.h"
  10. #include <linux/bitops.h>
  11. #include <linux/device-mapper.h>
  12. #define DM_MSG_PREFIX "space map common"
  13. /*----------------------------------------------------------------*/
  14. /*
  15. * Index validator.
  16. */
  17. #define INDEX_CSUM_XOR 160478
  18. static void index_prepare_for_write(struct dm_block_validator *v,
  19. struct dm_block *b,
  20. size_t block_size)
  21. {
  22. struct disk_metadata_index *mi_le = dm_block_data(b);
  23. mi_le->blocknr = cpu_to_le64(dm_block_location(b));
  24. mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
  25. block_size - sizeof(__le32),
  26. INDEX_CSUM_XOR));
  27. }
  28. static int index_check(struct dm_block_validator *v,
  29. struct dm_block *b,
  30. size_t block_size)
  31. {
  32. struct disk_metadata_index *mi_le = dm_block_data(b);
  33. __le32 csum_disk;
  34. if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
  35. DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
  36. le64_to_cpu(mi_le->blocknr), dm_block_location(b));
  37. return -ENOTBLK;
  38. }
  39. csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
  40. block_size - sizeof(__le32),
  41. INDEX_CSUM_XOR));
  42. if (csum_disk != mi_le->csum) {
  43. DMERR_LIMIT("index_check failed: csum %u != wanted %u",
  44. le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
  45. return -EILSEQ;
  46. }
  47. return 0;
  48. }
  49. static struct dm_block_validator index_validator = {
  50. .name = "index",
  51. .prepare_for_write = index_prepare_for_write,
  52. .check = index_check
  53. };
  54. /*----------------------------------------------------------------*/
  55. /*
  56. * Bitmap validator
  57. */
  58. #define BITMAP_CSUM_XOR 240779
  59. static void dm_bitmap_prepare_for_write(struct dm_block_validator *v,
  60. struct dm_block *b,
  61. size_t block_size)
  62. {
  63. struct disk_bitmap_header *disk_header = dm_block_data(b);
  64. disk_header->blocknr = cpu_to_le64(dm_block_location(b));
  65. disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
  66. block_size - sizeof(__le32),
  67. BITMAP_CSUM_XOR));
  68. }
  69. static int dm_bitmap_check(struct dm_block_validator *v,
  70. struct dm_block *b,
  71. size_t block_size)
  72. {
  73. struct disk_bitmap_header *disk_header = dm_block_data(b);
  74. __le32 csum_disk;
  75. if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
  76. DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu",
  77. le64_to_cpu(disk_header->blocknr), dm_block_location(b));
  78. return -ENOTBLK;
  79. }
  80. csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
  81. block_size - sizeof(__le32),
  82. BITMAP_CSUM_XOR));
  83. if (csum_disk != disk_header->csum) {
  84. DMERR_LIMIT("bitmap check failed: csum %u != wanted %u",
  85. le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
  86. return -EILSEQ;
  87. }
  88. return 0;
  89. }
  90. static struct dm_block_validator dm_sm_bitmap_validator = {
  91. .name = "sm_bitmap",
  92. .prepare_for_write = dm_bitmap_prepare_for_write,
  93. .check = dm_bitmap_check,
  94. };
  95. /*----------------------------------------------------------------*/
  96. #define ENTRIES_PER_WORD 32
  97. #define ENTRIES_SHIFT 5
  98. static void *dm_bitmap_data(struct dm_block *b)
  99. {
  100. return dm_block_data(b) + sizeof(struct disk_bitmap_header);
  101. }
  102. #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
  103. static unsigned int dm_bitmap_word_used(void *addr, unsigned int b)
  104. {
  105. __le64 *words_le = addr;
  106. __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
  107. uint64_t bits = le64_to_cpu(*w_le);
  108. uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH;
  109. return !(~bits & mask);
  110. }
  111. static unsigned int sm_lookup_bitmap(void *addr, unsigned int b)
  112. {
  113. __le64 *words_le = addr;
  114. __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
  115. unsigned int hi, lo;
  116. b = (b & (ENTRIES_PER_WORD - 1)) << 1;
  117. hi = !!test_bit_le(b, (void *) w_le);
  118. lo = !!test_bit_le(b + 1, (void *) w_le);
  119. return (hi << 1) | lo;
  120. }
  121. static void sm_set_bitmap(void *addr, unsigned int b, unsigned int val)
  122. {
  123. __le64 *words_le = addr;
  124. __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
  125. b = (b & (ENTRIES_PER_WORD - 1)) << 1;
  126. if (val & 2)
  127. __set_bit_le(b, (void *) w_le);
  128. else
  129. __clear_bit_le(b, (void *) w_le);
  130. if (val & 1)
  131. __set_bit_le(b + 1, (void *) w_le);
  132. else
  133. __clear_bit_le(b + 1, (void *) w_le);
  134. }
  135. static int sm_find_free(void *addr, unsigned int begin, unsigned int end,
  136. unsigned int *result)
  137. {
  138. while (begin < end) {
  139. if (!(begin & (ENTRIES_PER_WORD - 1)) &&
  140. dm_bitmap_word_used(addr, begin)) {
  141. begin += ENTRIES_PER_WORD;
  142. continue;
  143. }
  144. if (!sm_lookup_bitmap(addr, begin)) {
  145. *result = begin;
  146. return 0;
  147. }
  148. begin++;
  149. }
  150. return -ENOSPC;
  151. }
  152. /*----------------------------------------------------------------*/
  153. static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
  154. {
  155. memset(ll, 0, sizeof(struct ll_disk));
  156. ll->tm = tm;
  157. ll->bitmap_info.tm = tm;
  158. ll->bitmap_info.levels = 1;
  159. /*
  160. * Because the new bitmap blocks are created via a shadow
  161. * operation, the old entry has already had its reference count
  162. * decremented and we don't need the btree to do any bookkeeping.
  163. */
  164. ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
  165. ll->bitmap_info.value_type.inc = NULL;
  166. ll->bitmap_info.value_type.dec = NULL;
  167. ll->bitmap_info.value_type.equal = NULL;
  168. ll->ref_count_info.tm = tm;
  169. ll->ref_count_info.levels = 1;
  170. ll->ref_count_info.value_type.size = sizeof(uint32_t);
  171. ll->ref_count_info.value_type.inc = NULL;
  172. ll->ref_count_info.value_type.dec = NULL;
  173. ll->ref_count_info.value_type.equal = NULL;
  174. ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
  175. if (ll->block_size > (1 << 30)) {
  176. DMERR("block size too big to hold bitmaps");
  177. return -EINVAL;
  178. }
  179. ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) *
  180. ENTRIES_PER_BYTE;
  181. ll->nr_blocks = 0;
  182. ll->bitmap_root = 0;
  183. ll->ref_count_root = 0;
  184. ll->bitmap_index_changed = false;
  185. return 0;
  186. }
  187. int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
  188. {
  189. int r;
  190. dm_block_t i, nr_blocks, nr_indexes;
  191. unsigned int old_blocks, blocks;
  192. nr_blocks = ll->nr_blocks + extra_blocks;
  193. old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
  194. blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block);
  195. nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block);
  196. if (nr_indexes > ll->max_entries(ll)) {
  197. DMERR("space map too large");
  198. return -EINVAL;
  199. }
  200. /*
  201. * We need to set this before the dm_tm_new_block() call below.
  202. */
  203. ll->nr_blocks = nr_blocks;
  204. for (i = old_blocks; i < blocks; i++) {
  205. struct dm_block *b;
  206. struct disk_index_entry idx;
  207. r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
  208. if (r < 0)
  209. return r;
  210. idx.blocknr = cpu_to_le64(dm_block_location(b));
  211. dm_tm_unlock(ll->tm, b);
  212. idx.nr_free = cpu_to_le32(ll->entries_per_block);
  213. idx.none_free_before = 0;
  214. r = ll->save_ie(ll, i, &idx);
  215. if (r < 0)
  216. return r;
  217. }
  218. return 0;
  219. }
  220. int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
  221. {
  222. int r;
  223. dm_block_t index = b;
  224. struct disk_index_entry ie_disk;
  225. struct dm_block *blk;
  226. if (b >= ll->nr_blocks) {
  227. DMERR_LIMIT("metadata block out of bounds");
  228. return -EINVAL;
  229. }
  230. b = do_div(index, ll->entries_per_block);
  231. r = ll->load_ie(ll, index, &ie_disk);
  232. if (r < 0)
  233. return r;
  234. r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
  235. &dm_sm_bitmap_validator, &blk);
  236. if (r < 0)
  237. return r;
  238. *result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
  239. dm_tm_unlock(ll->tm, blk);
  240. return 0;
  241. }
  242. static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
  243. uint32_t *result)
  244. {
  245. __le32 le_rc;
  246. int r;
  247. r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
  248. if (r < 0)
  249. return r;
  250. *result = le32_to_cpu(le_rc);
  251. return r;
  252. }
  253. int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
  254. {
  255. int r = sm_ll_lookup_bitmap(ll, b, result);
  256. if (r)
  257. return r;
  258. if (*result != 3)
  259. return r;
  260. return sm_ll_lookup_big_ref_count(ll, b, result);
  261. }
  262. int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
  263. dm_block_t end, dm_block_t *result)
  264. {
  265. int r;
  266. struct disk_index_entry ie_disk;
  267. dm_block_t i, index_begin = begin;
  268. dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
  269. /*
  270. * FIXME: Use shifts
  271. */
  272. begin = do_div(index_begin, ll->entries_per_block);
  273. end = do_div(end, ll->entries_per_block);
  274. if (end == 0)
  275. end = ll->entries_per_block;
  276. for (i = index_begin; i < index_end; i++, begin = 0) {
  277. struct dm_block *blk;
  278. unsigned int position;
  279. uint32_t bit_end;
  280. r = ll->load_ie(ll, i, &ie_disk);
  281. if (r < 0)
  282. return r;
  283. if (le32_to_cpu(ie_disk.nr_free) == 0)
  284. continue;
  285. r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
  286. &dm_sm_bitmap_validator, &blk);
  287. if (r < 0)
  288. return r;
  289. bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
  290. r = sm_find_free(dm_bitmap_data(blk),
  291. max_t(unsigned int, begin, le32_to_cpu(ie_disk.none_free_before)),
  292. bit_end, &position);
  293. if (r == -ENOSPC) {
  294. /*
  295. * This might happen because we started searching
  296. * part way through the bitmap.
  297. */
  298. dm_tm_unlock(ll->tm, blk);
  299. continue;
  300. }
  301. dm_tm_unlock(ll->tm, blk);
  302. *result = i * ll->entries_per_block + (dm_block_t) position;
  303. return 0;
  304. }
  305. return -ENOSPC;
  306. }
  307. int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
  308. dm_block_t begin, dm_block_t end, dm_block_t *b)
  309. {
  310. int r;
  311. uint32_t count;
  312. do {
  313. r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
  314. if (r)
  315. break;
  316. /* double check this block wasn't used in the old transaction */
  317. if (*b >= old_ll->nr_blocks)
  318. count = 0;
  319. else {
  320. r = sm_ll_lookup(old_ll, *b, &count);
  321. if (r)
  322. break;
  323. if (count)
  324. begin = *b + 1;
  325. }
  326. } while (count);
  327. return r;
  328. }
  329. /*----------------------------------------------------------------*/
  330. int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
  331. uint32_t ref_count, int32_t *nr_allocations)
  332. {
  333. int r;
  334. uint32_t bit, old;
  335. struct dm_block *nb;
  336. dm_block_t index = b;
  337. struct disk_index_entry ie_disk;
  338. void *bm_le;
  339. int inc;
  340. bit = do_div(index, ll->entries_per_block);
  341. r = ll->load_ie(ll, index, &ie_disk);
  342. if (r < 0)
  343. return r;
  344. r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr),
  345. &dm_sm_bitmap_validator, &nb, &inc);
  346. if (r < 0) {
  347. DMERR("dm_tm_shadow_block() failed");
  348. return r;
  349. }
  350. ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
  351. bm_le = dm_bitmap_data(nb);
  352. old = sm_lookup_bitmap(bm_le, bit);
  353. if (old > 2) {
  354. r = sm_ll_lookup_big_ref_count(ll, b, &old);
  355. if (r < 0) {
  356. dm_tm_unlock(ll->tm, nb);
  357. return r;
  358. }
  359. }
  360. if (r) {
  361. dm_tm_unlock(ll->tm, nb);
  362. return r;
  363. }
  364. if (ref_count <= 2) {
  365. sm_set_bitmap(bm_le, bit, ref_count);
  366. dm_tm_unlock(ll->tm, nb);
  367. if (old > 2) {
  368. r = dm_btree_remove(&ll->ref_count_info,
  369. ll->ref_count_root,
  370. &b, &ll->ref_count_root);
  371. if (r)
  372. return r;
  373. }
  374. } else {
  375. __le32 le_rc = cpu_to_le32(ref_count);
  376. sm_set_bitmap(bm_le, bit, 3);
  377. dm_tm_unlock(ll->tm, nb);
  378. __dm_bless_for_disk(&le_rc);
  379. r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
  380. &b, &le_rc, &ll->ref_count_root);
  381. if (r < 0) {
  382. DMERR("ref count insert failed");
  383. return r;
  384. }
  385. }
  386. if (ref_count && !old) {
  387. *nr_allocations = 1;
  388. ll->nr_allocated++;
  389. le32_add_cpu(&ie_disk.nr_free, -1);
  390. if (le32_to_cpu(ie_disk.none_free_before) == bit)
  391. ie_disk.none_free_before = cpu_to_le32(bit + 1);
  392. } else if (old && !ref_count) {
  393. *nr_allocations = -1;
  394. ll->nr_allocated--;
  395. le32_add_cpu(&ie_disk.nr_free, 1);
  396. ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
  397. } else
  398. *nr_allocations = 0;
  399. return ll->save_ie(ll, index, &ie_disk);
  400. }
  401. /*----------------------------------------------------------------*/
  402. /*
  403. * Holds useful intermediate results for the range based inc and dec
  404. * operations.
  405. */
  406. struct inc_context {
  407. struct disk_index_entry ie_disk;
  408. struct dm_block *bitmap_block;
  409. void *bitmap;
  410. struct dm_block *overflow_leaf;
  411. };
  412. static inline void init_inc_context(struct inc_context *ic)
  413. {
  414. ic->bitmap_block = NULL;
  415. ic->bitmap = NULL;
  416. ic->overflow_leaf = NULL;
  417. }
  418. static inline void exit_inc_context(struct ll_disk *ll, struct inc_context *ic)
  419. {
  420. if (ic->bitmap_block)
  421. dm_tm_unlock(ll->tm, ic->bitmap_block);
  422. if (ic->overflow_leaf)
  423. dm_tm_unlock(ll->tm, ic->overflow_leaf);
  424. }
  425. static inline void reset_inc_context(struct ll_disk *ll, struct inc_context *ic)
  426. {
  427. exit_inc_context(ll, ic);
  428. init_inc_context(ic);
  429. }
  430. /*
  431. * Confirms a btree node contains a particular key at an index.
  432. */
  433. static bool contains_key(struct btree_node *n, uint64_t key, int index)
  434. {
  435. return index >= 0 &&
  436. index < le32_to_cpu(n->header.nr_entries) &&
  437. le64_to_cpu(n->keys[index]) == key;
  438. }
  439. static int __sm_ll_inc_overflow(struct ll_disk *ll, dm_block_t b, struct inc_context *ic)
  440. {
  441. int r;
  442. int index;
  443. struct btree_node *n;
  444. __le32 *v_ptr;
  445. uint32_t rc;
  446. /*
  447. * bitmap_block needs to be unlocked because getting the
  448. * overflow_leaf may need to allocate, and thus use the space map.
  449. */
  450. reset_inc_context(ll, ic);
  451. r = btree_get_overwrite_leaf(&ll->ref_count_info, ll->ref_count_root,
  452. b, &index, &ll->ref_count_root, &ic->overflow_leaf);
  453. if (r < 0)
  454. return r;
  455. n = dm_block_data(ic->overflow_leaf);
  456. if (!contains_key(n, b, index)) {
  457. DMERR("overflow btree is missing an entry");
  458. return -EINVAL;
  459. }
  460. v_ptr = value_ptr(n, index);
  461. rc = le32_to_cpu(*v_ptr) + 1;
  462. *v_ptr = cpu_to_le32(rc);
  463. return 0;
  464. }
  465. static int sm_ll_inc_overflow(struct ll_disk *ll, dm_block_t b, struct inc_context *ic)
  466. {
  467. int index;
  468. struct btree_node *n;
  469. __le32 *v_ptr;
  470. uint32_t rc;
  471. /*
  472. * Do we already have the correct overflow leaf?
  473. */
  474. if (ic->overflow_leaf) {
  475. n = dm_block_data(ic->overflow_leaf);
  476. index = lower_bound(n, b);
  477. if (contains_key(n, b, index)) {
  478. v_ptr = value_ptr(n, index);
  479. rc = le32_to_cpu(*v_ptr) + 1;
  480. *v_ptr = cpu_to_le32(rc);
  481. return 0;
  482. }
  483. }
  484. return __sm_ll_inc_overflow(ll, b, ic);
  485. }
  486. static inline int shadow_bitmap(struct ll_disk *ll, struct inc_context *ic)
  487. {
  488. int r, inc;
  489. r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ic->ie_disk.blocknr),
  490. &dm_sm_bitmap_validator, &ic->bitmap_block, &inc);
  491. if (r < 0) {
  492. DMERR("dm_tm_shadow_block() failed");
  493. return r;
  494. }
  495. ic->ie_disk.blocknr = cpu_to_le64(dm_block_location(ic->bitmap_block));
  496. ic->bitmap = dm_bitmap_data(ic->bitmap_block);
  497. return 0;
  498. }
  499. /*
  500. * Once shadow_bitmap has been called, which always happens at the start of inc/dec,
  501. * we can reopen the bitmap with a simple write lock, rather than re calling
  502. * dm_tm_shadow_block().
  503. */
  504. static inline int ensure_bitmap(struct ll_disk *ll, struct inc_context *ic)
  505. {
  506. if (!ic->bitmap_block) {
  507. int r = dm_bm_write_lock(dm_tm_get_bm(ll->tm), le64_to_cpu(ic->ie_disk.blocknr),
  508. &dm_sm_bitmap_validator, &ic->bitmap_block);
  509. if (r) {
  510. DMERR("unable to re-get write lock for bitmap");
  511. return r;
  512. }
  513. ic->bitmap = dm_bitmap_data(ic->bitmap_block);
  514. }
  515. return 0;
  516. }
  517. /*
  518. * Loops round incrementing entries in a single bitmap.
  519. */
  520. static inline int sm_ll_inc_bitmap(struct ll_disk *ll, dm_block_t b,
  521. uint32_t bit, uint32_t bit_end,
  522. int32_t *nr_allocations, dm_block_t *new_b,
  523. struct inc_context *ic)
  524. {
  525. int r;
  526. __le32 le_rc;
  527. uint32_t old;
  528. for (; bit != bit_end; bit++, b++) {
  529. /*
  530. * We only need to drop the bitmap if we need to find a new btree
  531. * leaf for the overflow. So if it was dropped last iteration,
  532. * we now re-get it.
  533. */
  534. r = ensure_bitmap(ll, ic);
  535. if (r)
  536. return r;
  537. old = sm_lookup_bitmap(ic->bitmap, bit);
  538. switch (old) {
  539. case 0:
  540. /* inc bitmap, adjust nr_allocated */
  541. sm_set_bitmap(ic->bitmap, bit, 1);
  542. (*nr_allocations)++;
  543. ll->nr_allocated++;
  544. le32_add_cpu(&ic->ie_disk.nr_free, -1);
  545. if (le32_to_cpu(ic->ie_disk.none_free_before) == bit)
  546. ic->ie_disk.none_free_before = cpu_to_le32(bit + 1);
  547. break;
  548. case 1:
  549. /* inc bitmap */
  550. sm_set_bitmap(ic->bitmap, bit, 2);
  551. break;
  552. case 2:
  553. /* inc bitmap and insert into overflow */
  554. sm_set_bitmap(ic->bitmap, bit, 3);
  555. reset_inc_context(ll, ic);
  556. le_rc = cpu_to_le32(3);
  557. __dm_bless_for_disk(&le_rc);
  558. r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
  559. &b, &le_rc, &ll->ref_count_root);
  560. if (r < 0) {
  561. DMERR("ref count insert failed");
  562. return r;
  563. }
  564. break;
  565. default:
  566. /*
  567. * inc within the overflow tree only.
  568. */
  569. r = sm_ll_inc_overflow(ll, b, ic);
  570. if (r < 0)
  571. return r;
  572. }
  573. }
  574. *new_b = b;
  575. return 0;
  576. }
  577. /*
  578. * Finds a bitmap that contains entries in the block range, and increments
  579. * them.
  580. */
  581. static int __sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e,
  582. int32_t *nr_allocations, dm_block_t *new_b)
  583. {
  584. int r;
  585. struct inc_context ic;
  586. uint32_t bit, bit_end;
  587. dm_block_t index = b;
  588. init_inc_context(&ic);
  589. bit = do_div(index, ll->entries_per_block);
  590. r = ll->load_ie(ll, index, &ic.ie_disk);
  591. if (r < 0)
  592. return r;
  593. r = shadow_bitmap(ll, &ic);
  594. if (r)
  595. return r;
  596. bit_end = min(bit + (e - b), (dm_block_t) ll->entries_per_block);
  597. r = sm_ll_inc_bitmap(ll, b, bit, bit_end, nr_allocations, new_b, &ic);
  598. exit_inc_context(ll, &ic);
  599. if (r)
  600. return r;
  601. return ll->save_ie(ll, index, &ic.ie_disk);
  602. }
  603. int sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e,
  604. int32_t *nr_allocations)
  605. {
  606. *nr_allocations = 0;
  607. while (b != e) {
  608. int r = __sm_ll_inc(ll, b, e, nr_allocations, &b);
  609. if (r)
  610. return r;
  611. }
  612. return 0;
  613. }
  614. /*----------------------------------------------------------------*/
  615. static int __sm_ll_del_overflow(struct ll_disk *ll, dm_block_t b,
  616. struct inc_context *ic)
  617. {
  618. reset_inc_context(ll, ic);
  619. return dm_btree_remove(&ll->ref_count_info, ll->ref_count_root,
  620. &b, &ll->ref_count_root);
  621. }
  622. static int __sm_ll_dec_overflow(struct ll_disk *ll, dm_block_t b,
  623. struct inc_context *ic, uint32_t *old_rc)
  624. {
  625. int r;
  626. int index = -1;
  627. struct btree_node *n;
  628. __le32 *v_ptr;
  629. uint32_t rc;
  630. reset_inc_context(ll, ic);
  631. r = btree_get_overwrite_leaf(&ll->ref_count_info, ll->ref_count_root,
  632. b, &index, &ll->ref_count_root, &ic->overflow_leaf);
  633. if (r < 0)
  634. return r;
  635. n = dm_block_data(ic->overflow_leaf);
  636. if (!contains_key(n, b, index)) {
  637. DMERR("overflow btree is missing an entry");
  638. return -EINVAL;
  639. }
  640. v_ptr = value_ptr(n, index);
  641. rc = le32_to_cpu(*v_ptr);
  642. *old_rc = rc;
  643. if (rc == 3) {
  644. return __sm_ll_del_overflow(ll, b, ic);
  645. } else {
  646. rc--;
  647. *v_ptr = cpu_to_le32(rc);
  648. return 0;
  649. }
  650. }
  651. static int sm_ll_dec_overflow(struct ll_disk *ll, dm_block_t b,
  652. struct inc_context *ic, uint32_t *old_rc)
  653. {
  654. /*
  655. * Do we already have the correct overflow leaf?
  656. */
  657. if (ic->overflow_leaf) {
  658. int index;
  659. struct btree_node *n;
  660. __le32 *v_ptr;
  661. uint32_t rc;
  662. n = dm_block_data(ic->overflow_leaf);
  663. index = lower_bound(n, b);
  664. if (contains_key(n, b, index)) {
  665. v_ptr = value_ptr(n, index);
  666. rc = le32_to_cpu(*v_ptr);
  667. *old_rc = rc;
  668. if (rc > 3) {
  669. rc--;
  670. *v_ptr = cpu_to_le32(rc);
  671. return 0;
  672. } else {
  673. return __sm_ll_del_overflow(ll, b, ic);
  674. }
  675. }
  676. }
  677. return __sm_ll_dec_overflow(ll, b, ic, old_rc);
  678. }
  679. /*
  680. * Loops round incrementing entries in a single bitmap.
  681. */
  682. static inline int sm_ll_dec_bitmap(struct ll_disk *ll, dm_block_t b,
  683. uint32_t bit, uint32_t bit_end,
  684. struct inc_context *ic,
  685. int32_t *nr_allocations, dm_block_t *new_b)
  686. {
  687. int r;
  688. uint32_t old;
  689. for (; bit != bit_end; bit++, b++) {
  690. /*
  691. * We only need to drop the bitmap if we need to find a new btree
  692. * leaf for the overflow. So if it was dropped last iteration,
  693. * we now re-get it.
  694. */
  695. r = ensure_bitmap(ll, ic);
  696. if (r)
  697. return r;
  698. old = sm_lookup_bitmap(ic->bitmap, bit);
  699. switch (old) {
  700. case 0:
  701. DMERR("unable to decrement block");
  702. return -EINVAL;
  703. case 1:
  704. /* dec bitmap */
  705. sm_set_bitmap(ic->bitmap, bit, 0);
  706. (*nr_allocations)--;
  707. ll->nr_allocated--;
  708. le32_add_cpu(&ic->ie_disk.nr_free, 1);
  709. ic->ie_disk.none_free_before =
  710. cpu_to_le32(min(le32_to_cpu(ic->ie_disk.none_free_before), bit));
  711. break;
  712. case 2:
  713. /* dec bitmap and insert into overflow */
  714. sm_set_bitmap(ic->bitmap, bit, 1);
  715. break;
  716. case 3:
  717. r = sm_ll_dec_overflow(ll, b, ic, &old);
  718. if (r < 0)
  719. return r;
  720. if (old == 3) {
  721. r = ensure_bitmap(ll, ic);
  722. if (r)
  723. return r;
  724. sm_set_bitmap(ic->bitmap, bit, 2);
  725. }
  726. break;
  727. }
  728. }
  729. *new_b = b;
  730. return 0;
  731. }
  732. static int __sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e,
  733. int32_t *nr_allocations, dm_block_t *new_b)
  734. {
  735. int r;
  736. uint32_t bit, bit_end;
  737. struct inc_context ic;
  738. dm_block_t index = b;
  739. init_inc_context(&ic);
  740. bit = do_div(index, ll->entries_per_block);
  741. r = ll->load_ie(ll, index, &ic.ie_disk);
  742. if (r < 0)
  743. return r;
  744. r = shadow_bitmap(ll, &ic);
  745. if (r)
  746. return r;
  747. bit_end = min(bit + (e - b), (dm_block_t) ll->entries_per_block);
  748. r = sm_ll_dec_bitmap(ll, b, bit, bit_end, &ic, nr_allocations, new_b);
  749. exit_inc_context(ll, &ic);
  750. if (r)
  751. return r;
  752. return ll->save_ie(ll, index, &ic.ie_disk);
  753. }
  754. int sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e,
  755. int32_t *nr_allocations)
  756. {
  757. *nr_allocations = 0;
  758. while (b != e) {
  759. int r = __sm_ll_dec(ll, b, e, nr_allocations, &b);
  760. if (r)
  761. return r;
  762. }
  763. return 0;
  764. }
  765. /*----------------------------------------------------------------*/
  766. int sm_ll_commit(struct ll_disk *ll)
  767. {
  768. int r = 0;
  769. if (ll->bitmap_index_changed) {
  770. r = ll->commit(ll);
  771. if (!r)
  772. ll->bitmap_index_changed = false;
  773. }
  774. return r;
  775. }
  776. /*----------------------------------------------------------------*/
  777. static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
  778. struct disk_index_entry *ie)
  779. {
  780. memcpy(ie, ll->mi_le.index + index, sizeof(*ie));
  781. return 0;
  782. }
  783. static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
  784. struct disk_index_entry *ie)
  785. {
  786. ll->bitmap_index_changed = true;
  787. memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
  788. return 0;
  789. }
  790. static int metadata_ll_init_index(struct ll_disk *ll)
  791. {
  792. int r;
  793. struct dm_block *b;
  794. r = dm_tm_new_block(ll->tm, &index_validator, &b);
  795. if (r < 0)
  796. return r;
  797. ll->bitmap_root = dm_block_location(b);
  798. dm_tm_unlock(ll->tm, b);
  799. return 0;
  800. }
  801. static int metadata_ll_open(struct ll_disk *ll)
  802. {
  803. int r;
  804. struct dm_block *block;
  805. r = dm_tm_read_lock(ll->tm, ll->bitmap_root,
  806. &index_validator, &block);
  807. if (r)
  808. return r;
  809. memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
  810. dm_tm_unlock(ll->tm, block);
  811. return 0;
  812. }
  813. static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
  814. {
  815. return MAX_METADATA_BITMAPS;
  816. }
  817. static int metadata_ll_commit(struct ll_disk *ll)
  818. {
  819. int r, inc;
  820. struct dm_block *b;
  821. r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc);
  822. if (r)
  823. return r;
  824. memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
  825. ll->bitmap_root = dm_block_location(b);
  826. dm_tm_unlock(ll->tm, b);
  827. return 0;
  828. }
  829. int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
  830. {
  831. int r;
  832. r = sm_ll_init(ll, tm);
  833. if (r < 0)
  834. return r;
  835. ll->load_ie = metadata_ll_load_ie;
  836. ll->save_ie = metadata_ll_save_ie;
  837. ll->init_index = metadata_ll_init_index;
  838. ll->open_index = metadata_ll_open;
  839. ll->max_entries = metadata_ll_max_entries;
  840. ll->commit = metadata_ll_commit;
  841. ll->nr_blocks = 0;
  842. ll->nr_allocated = 0;
  843. r = ll->init_index(ll);
  844. if (r < 0)
  845. return r;
  846. r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
  847. if (r < 0)
  848. return r;
  849. return 0;
  850. }
  851. int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
  852. void *root_le, size_t len)
  853. {
  854. int r;
  855. struct disk_sm_root smr;
  856. if (len < sizeof(struct disk_sm_root)) {
  857. DMERR("sm_metadata root too small");
  858. return -ENOMEM;
  859. }
  860. /*
  861. * We don't know the alignment of the root_le buffer, so need to
  862. * copy into a new structure.
  863. */
  864. memcpy(&smr, root_le, sizeof(smr));
  865. r = sm_ll_init(ll, tm);
  866. if (r < 0)
  867. return r;
  868. ll->load_ie = metadata_ll_load_ie;
  869. ll->save_ie = metadata_ll_save_ie;
  870. ll->init_index = metadata_ll_init_index;
  871. ll->open_index = metadata_ll_open;
  872. ll->max_entries = metadata_ll_max_entries;
  873. ll->commit = metadata_ll_commit;
  874. ll->nr_blocks = le64_to_cpu(smr.nr_blocks);
  875. ll->nr_allocated = le64_to_cpu(smr.nr_allocated);
  876. ll->bitmap_root = le64_to_cpu(smr.bitmap_root);
  877. ll->ref_count_root = le64_to_cpu(smr.ref_count_root);
  878. return ll->open_index(ll);
  879. }
  880. /*----------------------------------------------------------------*/
  881. static inline int ie_cache_writeback(struct ll_disk *ll, struct ie_cache *iec)
  882. {
  883. iec->dirty = false;
  884. __dm_bless_for_disk(iec->ie);
  885. return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root,
  886. &iec->index, &iec->ie, &ll->bitmap_root);
  887. }
  888. static inline unsigned int hash_index(dm_block_t index)
  889. {
  890. return dm_hash_block(index, IE_CACHE_MASK);
  891. }
  892. static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
  893. struct disk_index_entry *ie)
  894. {
  895. int r;
  896. unsigned int h = hash_index(index);
  897. struct ie_cache *iec = ll->ie_cache + h;
  898. if (iec->valid) {
  899. if (iec->index == index) {
  900. memcpy(ie, &iec->ie, sizeof(*ie));
  901. return 0;
  902. }
  903. if (iec->dirty) {
  904. r = ie_cache_writeback(ll, iec);
  905. if (r)
  906. return r;
  907. }
  908. }
  909. r = dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie);
  910. if (!r) {
  911. iec->valid = true;
  912. iec->dirty = false;
  913. iec->index = index;
  914. memcpy(&iec->ie, ie, sizeof(*ie));
  915. }
  916. return r;
  917. }
  918. static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
  919. struct disk_index_entry *ie)
  920. {
  921. int r;
  922. unsigned int h = hash_index(index);
  923. struct ie_cache *iec = ll->ie_cache + h;
  924. ll->bitmap_index_changed = true;
  925. if (iec->valid) {
  926. if (iec->index == index) {
  927. memcpy(&iec->ie, ie, sizeof(*ie));
  928. iec->dirty = true;
  929. return 0;
  930. }
  931. if (iec->dirty) {
  932. r = ie_cache_writeback(ll, iec);
  933. if (r)
  934. return r;
  935. }
  936. }
  937. iec->valid = true;
  938. iec->dirty = true;
  939. iec->index = index;
  940. memcpy(&iec->ie, ie, sizeof(*ie));
  941. return 0;
  942. }
  943. static int disk_ll_init_index(struct ll_disk *ll)
  944. {
  945. unsigned int i;
  946. for (i = 0; i < IE_CACHE_SIZE; i++) {
  947. struct ie_cache *iec = ll->ie_cache + i;
  948. iec->valid = false;
  949. iec->dirty = false;
  950. }
  951. return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root);
  952. }
  953. static int disk_ll_open(struct ll_disk *ll)
  954. {
  955. return 0;
  956. }
  957. static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
  958. {
  959. return -1ULL;
  960. }
  961. static int disk_ll_commit(struct ll_disk *ll)
  962. {
  963. int r = 0;
  964. unsigned int i;
  965. for (i = 0; i < IE_CACHE_SIZE; i++) {
  966. struct ie_cache *iec = ll->ie_cache + i;
  967. if (iec->valid && iec->dirty)
  968. r = ie_cache_writeback(ll, iec);
  969. }
  970. return r;
  971. }
  972. int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm)
  973. {
  974. int r;
  975. r = sm_ll_init(ll, tm);
  976. if (r < 0)
  977. return r;
  978. ll->load_ie = disk_ll_load_ie;
  979. ll->save_ie = disk_ll_save_ie;
  980. ll->init_index = disk_ll_init_index;
  981. ll->open_index = disk_ll_open;
  982. ll->max_entries = disk_ll_max_entries;
  983. ll->commit = disk_ll_commit;
  984. ll->nr_blocks = 0;
  985. ll->nr_allocated = 0;
  986. r = ll->init_index(ll);
  987. if (r < 0)
  988. return r;
  989. r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
  990. if (r < 0)
  991. return r;
  992. return 0;
  993. }
  994. int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
  995. void *root_le, size_t len)
  996. {
  997. int r;
  998. struct disk_sm_root *smr = root_le;
  999. if (len < sizeof(struct disk_sm_root)) {
  1000. DMERR("sm_metadata root too small");
  1001. return -ENOMEM;
  1002. }
  1003. r = sm_ll_init(ll, tm);
  1004. if (r < 0)
  1005. return r;
  1006. ll->load_ie = disk_ll_load_ie;
  1007. ll->save_ie = disk_ll_save_ie;
  1008. ll->init_index = disk_ll_init_index;
  1009. ll->open_index = disk_ll_open;
  1010. ll->max_entries = disk_ll_max_entries;
  1011. ll->commit = disk_ll_commit;
  1012. ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
  1013. ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
  1014. ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
  1015. ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
  1016. return ll->open_index(ll);
  1017. }
  1018. /*----------------------------------------------------------------*/