tree-mod-log.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "tree-mod-log.h"
  3. #include "disk-io.h"
  4. struct tree_mod_root {
  5. u64 logical;
  6. u8 level;
  7. };
  8. struct tree_mod_elem {
  9. struct rb_node node;
  10. u64 logical;
  11. u64 seq;
  12. enum btrfs_mod_log_op op;
  13. /*
  14. * This is used for BTRFS_MOD_LOG_KEY_* and BTRFS_MOD_LOG_MOVE_KEYS
  15. * operations.
  16. */
  17. int slot;
  18. /* This is used for BTRFS_MOD_LOG_KEY* and BTRFS_MOD_LOG_ROOT_REPLACE. */
  19. u64 generation;
  20. /* Those are used for op == BTRFS_MOD_LOG_KEY_{REPLACE,REMOVE}. */
  21. struct btrfs_disk_key key;
  22. u64 blockptr;
  23. /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */
  24. struct {
  25. int dst_slot;
  26. int nr_items;
  27. } move;
  28. /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */
  29. struct tree_mod_root old_root;
  30. };
  31. /*
  32. * Pull a new tree mod seq number for our operation.
  33. */
  34. static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
  35. {
  36. return atomic64_inc_return(&fs_info->tree_mod_seq);
  37. }
  38. /*
  39. * This adds a new blocker to the tree mod log's blocker list if the @elem
  40. * passed does not already have a sequence number set. So when a caller expects
  41. * to record tree modifications, it should ensure to set elem->seq to zero
  42. * before calling btrfs_get_tree_mod_seq.
  43. * Returns a fresh, unused tree log modification sequence number, even if no new
  44. * blocker was added.
  45. */
  46. u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
  47. struct btrfs_seq_list *elem)
  48. {
  49. write_lock(&fs_info->tree_mod_log_lock);
  50. if (!elem->seq) {
  51. elem->seq = btrfs_inc_tree_mod_seq(fs_info);
  52. list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
  53. set_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags);
  54. }
  55. write_unlock(&fs_info->tree_mod_log_lock);
  56. return elem->seq;
  57. }
  58. void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
  59. struct btrfs_seq_list *elem)
  60. {
  61. struct rb_root *tm_root;
  62. struct rb_node *node;
  63. struct rb_node *next;
  64. struct tree_mod_elem *tm;
  65. u64 min_seq = BTRFS_SEQ_LAST;
  66. u64 seq_putting = elem->seq;
  67. if (!seq_putting)
  68. return;
  69. write_lock(&fs_info->tree_mod_log_lock);
  70. list_del(&elem->list);
  71. elem->seq = 0;
  72. if (list_empty(&fs_info->tree_mod_seq_list)) {
  73. clear_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags);
  74. } else {
  75. struct btrfs_seq_list *first;
  76. first = list_first_entry(&fs_info->tree_mod_seq_list,
  77. struct btrfs_seq_list, list);
  78. if (seq_putting > first->seq) {
  79. /*
  80. * Blocker with lower sequence number exists, we cannot
  81. * remove anything from the log.
  82. */
  83. write_unlock(&fs_info->tree_mod_log_lock);
  84. return;
  85. }
  86. min_seq = first->seq;
  87. }
  88. /*
  89. * Anything that's lower than the lowest existing (read: blocked)
  90. * sequence number can be removed from the tree.
  91. */
  92. tm_root = &fs_info->tree_mod_log;
  93. for (node = rb_first(tm_root); node; node = next) {
  94. next = rb_next(node);
  95. tm = rb_entry(node, struct tree_mod_elem, node);
  96. if (tm->seq >= min_seq)
  97. continue;
  98. rb_erase(node, tm_root);
  99. kfree(tm);
  100. }
  101. write_unlock(&fs_info->tree_mod_log_lock);
  102. }
  103. /*
  104. * Key order of the log:
  105. * node/leaf start address -> sequence
  106. *
  107. * The 'start address' is the logical address of the *new* root node for root
  108. * replace operations, or the logical address of the affected block for all
  109. * other operations.
  110. */
  111. static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info,
  112. struct tree_mod_elem *tm)
  113. {
  114. struct rb_root *tm_root;
  115. struct rb_node **new;
  116. struct rb_node *parent = NULL;
  117. struct tree_mod_elem *cur;
  118. lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
  119. tm->seq = btrfs_inc_tree_mod_seq(fs_info);
  120. tm_root = &fs_info->tree_mod_log;
  121. new = &tm_root->rb_node;
  122. while (*new) {
  123. cur = rb_entry(*new, struct tree_mod_elem, node);
  124. parent = *new;
  125. if (cur->logical < tm->logical)
  126. new = &((*new)->rb_left);
  127. else if (cur->logical > tm->logical)
  128. new = &((*new)->rb_right);
  129. else if (cur->seq < tm->seq)
  130. new = &((*new)->rb_left);
  131. else if (cur->seq > tm->seq)
  132. new = &((*new)->rb_right);
  133. else
  134. return -EEXIST;
  135. }
  136. rb_link_node(&tm->node, parent, new);
  137. rb_insert_color(&tm->node, tm_root);
  138. return 0;
  139. }
  140. /*
  141. * Determines if logging can be omitted. Returns true if it can. Otherwise, it
  142. * returns false with the tree_mod_log_lock acquired. The caller must hold
  143. * this until all tree mod log insertions are recorded in the rb tree and then
  144. * write unlock fs_info::tree_mod_log_lock.
  145. */
  146. static inline bool tree_mod_dont_log(struct btrfs_fs_info *fs_info,
  147. struct extent_buffer *eb)
  148. {
  149. if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
  150. return true;
  151. if (eb && btrfs_header_level(eb) == 0)
  152. return true;
  153. write_lock(&fs_info->tree_mod_log_lock);
  154. if (list_empty(&(fs_info)->tree_mod_seq_list)) {
  155. write_unlock(&fs_info->tree_mod_log_lock);
  156. return true;
  157. }
  158. return false;
  159. }
  160. /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
  161. static inline bool tree_mod_need_log(const struct btrfs_fs_info *fs_info,
  162. struct extent_buffer *eb)
  163. {
  164. if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags))
  165. return false;
  166. if (eb && btrfs_header_level(eb) == 0)
  167. return false;
  168. return true;
  169. }
  170. static struct tree_mod_elem *alloc_tree_mod_elem(struct extent_buffer *eb,
  171. int slot,
  172. enum btrfs_mod_log_op op,
  173. gfp_t flags)
  174. {
  175. struct tree_mod_elem *tm;
  176. tm = kzalloc(sizeof(*tm), flags);
  177. if (!tm)
  178. return NULL;
  179. tm->logical = eb->start;
  180. if (op != BTRFS_MOD_LOG_KEY_ADD) {
  181. btrfs_node_key(eb, &tm->key, slot);
  182. tm->blockptr = btrfs_node_blockptr(eb, slot);
  183. }
  184. tm->op = op;
  185. tm->slot = slot;
  186. tm->generation = btrfs_node_ptr_generation(eb, slot);
  187. RB_CLEAR_NODE(&tm->node);
  188. return tm;
  189. }
  190. int btrfs_tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
  191. enum btrfs_mod_log_op op, gfp_t flags)
  192. {
  193. struct tree_mod_elem *tm;
  194. int ret;
  195. if (!tree_mod_need_log(eb->fs_info, eb))
  196. return 0;
  197. tm = alloc_tree_mod_elem(eb, slot, op, flags);
  198. if (!tm)
  199. return -ENOMEM;
  200. if (tree_mod_dont_log(eb->fs_info, eb)) {
  201. kfree(tm);
  202. return 0;
  203. }
  204. ret = tree_mod_log_insert(eb->fs_info, tm);
  205. write_unlock(&eb->fs_info->tree_mod_log_lock);
  206. if (ret)
  207. kfree(tm);
  208. return ret;
  209. }
  210. int btrfs_tree_mod_log_insert_move(struct extent_buffer *eb,
  211. int dst_slot, int src_slot,
  212. int nr_items)
  213. {
  214. struct tree_mod_elem *tm = NULL;
  215. struct tree_mod_elem **tm_list = NULL;
  216. int ret = 0;
  217. int i;
  218. bool locked = false;
  219. if (!tree_mod_need_log(eb->fs_info, eb))
  220. return 0;
  221. tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
  222. if (!tm_list)
  223. return -ENOMEM;
  224. tm = kzalloc(sizeof(*tm), GFP_NOFS);
  225. if (!tm) {
  226. ret = -ENOMEM;
  227. goto free_tms;
  228. }
  229. tm->logical = eb->start;
  230. tm->slot = src_slot;
  231. tm->move.dst_slot = dst_slot;
  232. tm->move.nr_items = nr_items;
  233. tm->op = BTRFS_MOD_LOG_MOVE_KEYS;
  234. for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
  235. tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
  236. BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
  237. if (!tm_list[i]) {
  238. ret = -ENOMEM;
  239. goto free_tms;
  240. }
  241. }
  242. if (tree_mod_dont_log(eb->fs_info, eb))
  243. goto free_tms;
  244. locked = true;
  245. /*
  246. * When we override something during the move, we log these removals.
  247. * This can only happen when we move towards the beginning of the
  248. * buffer, i.e. dst_slot < src_slot.
  249. */
  250. for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
  251. ret = tree_mod_log_insert(eb->fs_info, tm_list[i]);
  252. if (ret)
  253. goto free_tms;
  254. }
  255. ret = tree_mod_log_insert(eb->fs_info, tm);
  256. if (ret)
  257. goto free_tms;
  258. write_unlock(&eb->fs_info->tree_mod_log_lock);
  259. kfree(tm_list);
  260. return 0;
  261. free_tms:
  262. for (i = 0; i < nr_items; i++) {
  263. if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
  264. rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
  265. kfree(tm_list[i]);
  266. }
  267. if (locked)
  268. write_unlock(&eb->fs_info->tree_mod_log_lock);
  269. kfree(tm_list);
  270. kfree(tm);
  271. return ret;
  272. }
  273. static inline int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
  274. struct tree_mod_elem **tm_list,
  275. int nritems)
  276. {
  277. int i, j;
  278. int ret;
  279. for (i = nritems - 1; i >= 0; i--) {
  280. ret = tree_mod_log_insert(fs_info, tm_list[i]);
  281. if (ret) {
  282. for (j = nritems - 1; j > i; j--)
  283. rb_erase(&tm_list[j]->node,
  284. &fs_info->tree_mod_log);
  285. return ret;
  286. }
  287. }
  288. return 0;
  289. }
  290. int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root,
  291. struct extent_buffer *new_root,
  292. bool log_removal)
  293. {
  294. struct btrfs_fs_info *fs_info = old_root->fs_info;
  295. struct tree_mod_elem *tm = NULL;
  296. struct tree_mod_elem **tm_list = NULL;
  297. int nritems = 0;
  298. int ret = 0;
  299. int i;
  300. if (!tree_mod_need_log(fs_info, NULL))
  301. return 0;
  302. if (log_removal && btrfs_header_level(old_root) > 0) {
  303. nritems = btrfs_header_nritems(old_root);
  304. tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
  305. GFP_NOFS);
  306. if (!tm_list) {
  307. ret = -ENOMEM;
  308. goto free_tms;
  309. }
  310. for (i = 0; i < nritems; i++) {
  311. tm_list[i] = alloc_tree_mod_elem(old_root, i,
  312. BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
  313. if (!tm_list[i]) {
  314. ret = -ENOMEM;
  315. goto free_tms;
  316. }
  317. }
  318. }
  319. tm = kzalloc(sizeof(*tm), GFP_NOFS);
  320. if (!tm) {
  321. ret = -ENOMEM;
  322. goto free_tms;
  323. }
  324. tm->logical = new_root->start;
  325. tm->old_root.logical = old_root->start;
  326. tm->old_root.level = btrfs_header_level(old_root);
  327. tm->generation = btrfs_header_generation(old_root);
  328. tm->op = BTRFS_MOD_LOG_ROOT_REPLACE;
  329. if (tree_mod_dont_log(fs_info, NULL))
  330. goto free_tms;
  331. if (tm_list)
  332. ret = tree_mod_log_free_eb(fs_info, tm_list, nritems);
  333. if (!ret)
  334. ret = tree_mod_log_insert(fs_info, tm);
  335. write_unlock(&fs_info->tree_mod_log_lock);
  336. if (ret)
  337. goto free_tms;
  338. kfree(tm_list);
  339. return ret;
  340. free_tms:
  341. if (tm_list) {
  342. for (i = 0; i < nritems; i++)
  343. kfree(tm_list[i]);
  344. kfree(tm_list);
  345. }
  346. kfree(tm);
  347. return ret;
  348. }
  349. static struct tree_mod_elem *__tree_mod_log_search(struct btrfs_fs_info *fs_info,
  350. u64 start, u64 min_seq,
  351. bool smallest)
  352. {
  353. struct rb_root *tm_root;
  354. struct rb_node *node;
  355. struct tree_mod_elem *cur = NULL;
  356. struct tree_mod_elem *found = NULL;
  357. read_lock(&fs_info->tree_mod_log_lock);
  358. tm_root = &fs_info->tree_mod_log;
  359. node = tm_root->rb_node;
  360. while (node) {
  361. cur = rb_entry(node, struct tree_mod_elem, node);
  362. if (cur->logical < start) {
  363. node = node->rb_left;
  364. } else if (cur->logical > start) {
  365. node = node->rb_right;
  366. } else if (cur->seq < min_seq) {
  367. node = node->rb_left;
  368. } else if (!smallest) {
  369. /* We want the node with the highest seq */
  370. if (found)
  371. BUG_ON(found->seq > cur->seq);
  372. found = cur;
  373. node = node->rb_left;
  374. } else if (cur->seq > min_seq) {
  375. /* We want the node with the smallest seq */
  376. if (found)
  377. BUG_ON(found->seq < cur->seq);
  378. found = cur;
  379. node = node->rb_right;
  380. } else {
  381. found = cur;
  382. break;
  383. }
  384. }
  385. read_unlock(&fs_info->tree_mod_log_lock);
  386. return found;
  387. }
  388. /*
  389. * This returns the element from the log with the smallest time sequence
  390. * value that's in the log (the oldest log item). Any element with a time
  391. * sequence lower than min_seq will be ignored.
  392. */
  393. static struct tree_mod_elem *tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info,
  394. u64 start, u64 min_seq)
  395. {
  396. return __tree_mod_log_search(fs_info, start, min_seq, true);
  397. }
  398. /*
  399. * This returns the element from the log with the largest time sequence
  400. * value that's in the log (the most recent log item). Any element with
  401. * a time sequence lower than min_seq will be ignored.
  402. */
  403. static struct tree_mod_elem *tree_mod_log_search(struct btrfs_fs_info *fs_info,
  404. u64 start, u64 min_seq)
  405. {
  406. return __tree_mod_log_search(fs_info, start, min_seq, false);
  407. }
  408. int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst,
  409. struct extent_buffer *src,
  410. unsigned long dst_offset,
  411. unsigned long src_offset,
  412. int nr_items)
  413. {
  414. struct btrfs_fs_info *fs_info = dst->fs_info;
  415. int ret = 0;
  416. struct tree_mod_elem **tm_list = NULL;
  417. struct tree_mod_elem **tm_list_add, **tm_list_rem;
  418. int i;
  419. bool locked = false;
  420. if (!tree_mod_need_log(fs_info, NULL))
  421. return 0;
  422. if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
  423. return 0;
  424. tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
  425. GFP_NOFS);
  426. if (!tm_list)
  427. return -ENOMEM;
  428. tm_list_add = tm_list;
  429. tm_list_rem = tm_list + nr_items;
  430. for (i = 0; i < nr_items; i++) {
  431. tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
  432. BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS);
  433. if (!tm_list_rem[i]) {
  434. ret = -ENOMEM;
  435. goto free_tms;
  436. }
  437. tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
  438. BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS);
  439. if (!tm_list_add[i]) {
  440. ret = -ENOMEM;
  441. goto free_tms;
  442. }
  443. }
  444. if (tree_mod_dont_log(fs_info, NULL))
  445. goto free_tms;
  446. locked = true;
  447. for (i = 0; i < nr_items; i++) {
  448. ret = tree_mod_log_insert(fs_info, tm_list_rem[i]);
  449. if (ret)
  450. goto free_tms;
  451. ret = tree_mod_log_insert(fs_info, tm_list_add[i]);
  452. if (ret)
  453. goto free_tms;
  454. }
  455. write_unlock(&fs_info->tree_mod_log_lock);
  456. kfree(tm_list);
  457. return 0;
  458. free_tms:
  459. for (i = 0; i < nr_items * 2; i++) {
  460. if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
  461. rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
  462. kfree(tm_list[i]);
  463. }
  464. if (locked)
  465. write_unlock(&fs_info->tree_mod_log_lock);
  466. kfree(tm_list);
  467. return ret;
  468. }
  469. int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb)
  470. {
  471. struct tree_mod_elem **tm_list = NULL;
  472. int nritems = 0;
  473. int i;
  474. int ret = 0;
  475. if (!tree_mod_need_log(eb->fs_info, eb))
  476. return 0;
  477. nritems = btrfs_header_nritems(eb);
  478. tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
  479. if (!tm_list)
  480. return -ENOMEM;
  481. for (i = 0; i < nritems; i++) {
  482. tm_list[i] = alloc_tree_mod_elem(eb, i,
  483. BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
  484. if (!tm_list[i]) {
  485. ret = -ENOMEM;
  486. goto free_tms;
  487. }
  488. }
  489. if (tree_mod_dont_log(eb->fs_info, eb))
  490. goto free_tms;
  491. ret = tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
  492. write_unlock(&eb->fs_info->tree_mod_log_lock);
  493. if (ret)
  494. goto free_tms;
  495. kfree(tm_list);
  496. return 0;
  497. free_tms:
  498. for (i = 0; i < nritems; i++)
  499. kfree(tm_list[i]);
  500. kfree(tm_list);
  501. return ret;
  502. }
  503. /*
  504. * Returns the logical address of the oldest predecessor of the given root.
  505. * Entries older than time_seq are ignored.
  506. */
  507. static struct tree_mod_elem *tree_mod_log_oldest_root(struct extent_buffer *eb_root,
  508. u64 time_seq)
  509. {
  510. struct tree_mod_elem *tm;
  511. struct tree_mod_elem *found = NULL;
  512. u64 root_logical = eb_root->start;
  513. bool looped = false;
  514. if (!time_seq)
  515. return NULL;
  516. /*
  517. * The very last operation that's logged for a root is the replacement
  518. * operation (if it is replaced at all). This has the logical address
  519. * of the *new* root, making it the very first operation that's logged
  520. * for this root.
  521. */
  522. while (1) {
  523. tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
  524. time_seq);
  525. if (!looped && !tm)
  526. return NULL;
  527. /*
  528. * If there are no tree operation for the oldest root, we simply
  529. * return it. This should only happen if that (old) root is at
  530. * level 0.
  531. */
  532. if (!tm)
  533. break;
  534. /*
  535. * If there's an operation that's not a root replacement, we
  536. * found the oldest version of our root. Normally, we'll find a
  537. * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
  538. */
  539. if (tm->op != BTRFS_MOD_LOG_ROOT_REPLACE)
  540. break;
  541. found = tm;
  542. root_logical = tm->old_root.logical;
  543. looped = true;
  544. }
  545. /* If there's no old root to return, return what we found instead */
  546. if (!found)
  547. found = tm;
  548. return found;
  549. }
  550. /*
  551. * tm is a pointer to the first operation to rewind within eb. Then, all
  552. * previous operations will be rewound (until we reach something older than
  553. * time_seq).
  554. */
  555. static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
  556. struct extent_buffer *eb,
  557. u64 time_seq,
  558. struct tree_mod_elem *first_tm)
  559. {
  560. u32 n;
  561. struct rb_node *next;
  562. struct tree_mod_elem *tm = first_tm;
  563. unsigned long o_dst;
  564. unsigned long o_src;
  565. unsigned long p_size = sizeof(struct btrfs_key_ptr);
  566. n = btrfs_header_nritems(eb);
  567. read_lock(&fs_info->tree_mod_log_lock);
  568. while (tm && tm->seq >= time_seq) {
  569. /*
  570. * All the operations are recorded with the operator used for
  571. * the modification. As we're going backwards, we do the
  572. * opposite of each operation here.
  573. */
  574. switch (tm->op) {
  575. case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING:
  576. BUG_ON(tm->slot < n);
  577. fallthrough;
  578. case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING:
  579. case BTRFS_MOD_LOG_KEY_REMOVE:
  580. btrfs_set_node_key(eb, &tm->key, tm->slot);
  581. btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
  582. btrfs_set_node_ptr_generation(eb, tm->slot,
  583. tm->generation);
  584. n++;
  585. break;
  586. case BTRFS_MOD_LOG_KEY_REPLACE:
  587. BUG_ON(tm->slot >= n);
  588. btrfs_set_node_key(eb, &tm->key, tm->slot);
  589. btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
  590. btrfs_set_node_ptr_generation(eb, tm->slot,
  591. tm->generation);
  592. break;
  593. case BTRFS_MOD_LOG_KEY_ADD:
  594. /* if a move operation is needed it's in the log */
  595. n--;
  596. break;
  597. case BTRFS_MOD_LOG_MOVE_KEYS:
  598. o_dst = btrfs_node_key_ptr_offset(tm->slot);
  599. o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
  600. memmove_extent_buffer(eb, o_dst, o_src,
  601. tm->move.nr_items * p_size);
  602. break;
  603. case BTRFS_MOD_LOG_ROOT_REPLACE:
  604. /*
  605. * This operation is special. For roots, this must be
  606. * handled explicitly before rewinding.
  607. * For non-roots, this operation may exist if the node
  608. * was a root: root A -> child B; then A gets empty and
  609. * B is promoted to the new root. In the mod log, we'll
  610. * have a root-replace operation for B, a tree block
  611. * that is no root. We simply ignore that operation.
  612. */
  613. break;
  614. }
  615. next = rb_next(&tm->node);
  616. if (!next)
  617. break;
  618. tm = rb_entry(next, struct tree_mod_elem, node);
  619. if (tm->logical != first_tm->logical)
  620. break;
  621. }
  622. read_unlock(&fs_info->tree_mod_log_lock);
  623. btrfs_set_header_nritems(eb, n);
  624. }
  625. /*
  626. * Called with eb read locked. If the buffer cannot be rewound, the same buffer
  627. * is returned. If rewind operations happen, a fresh buffer is returned. The
  628. * returned buffer is always read-locked. If the returned buffer is not the
  629. * input buffer, the lock on the input buffer is released and the input buffer
  630. * is freed (its refcount is decremented).
  631. */
  632. struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info,
  633. struct btrfs_path *path,
  634. struct extent_buffer *eb,
  635. u64 time_seq)
  636. {
  637. struct extent_buffer *eb_rewin;
  638. struct tree_mod_elem *tm;
  639. if (!time_seq)
  640. return eb;
  641. if (btrfs_header_level(eb) == 0)
  642. return eb;
  643. tm = tree_mod_log_search(fs_info, eb->start, time_seq);
  644. if (!tm)
  645. return eb;
  646. if (tm->op == BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
  647. BUG_ON(tm->slot != 0);
  648. eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
  649. if (!eb_rewin) {
  650. btrfs_tree_read_unlock(eb);
  651. free_extent_buffer(eb);
  652. return NULL;
  653. }
  654. btrfs_set_header_bytenr(eb_rewin, eb->start);
  655. btrfs_set_header_backref_rev(eb_rewin,
  656. btrfs_header_backref_rev(eb));
  657. btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
  658. btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
  659. } else {
  660. eb_rewin = btrfs_clone_extent_buffer(eb);
  661. if (!eb_rewin) {
  662. btrfs_tree_read_unlock(eb);
  663. free_extent_buffer(eb);
  664. return NULL;
  665. }
  666. }
  667. btrfs_tree_read_unlock(eb);
  668. free_extent_buffer(eb);
  669. btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
  670. eb_rewin, btrfs_header_level(eb_rewin));
  671. btrfs_tree_read_lock(eb_rewin);
  672. tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
  673. WARN_ON(btrfs_header_nritems(eb_rewin) >
  674. BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  675. return eb_rewin;
  676. }
  677. /*
  678. * Rewind the state of @root's root node to the given @time_seq value.
  679. * If there are no changes, the current root->root_node is returned. If anything
  680. * changed in between, there's a fresh buffer allocated on which the rewind
  681. * operations are done. In any case, the returned buffer is read locked.
  682. * Returns NULL on error (with no locks held).
  683. */
  684. struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq)
  685. {
  686. struct btrfs_fs_info *fs_info = root->fs_info;
  687. struct tree_mod_elem *tm;
  688. struct extent_buffer *eb = NULL;
  689. struct extent_buffer *eb_root;
  690. u64 eb_root_owner = 0;
  691. struct extent_buffer *old;
  692. struct tree_mod_root *old_root = NULL;
  693. u64 old_generation = 0;
  694. u64 logical;
  695. int level;
  696. eb_root = btrfs_read_lock_root_node(root);
  697. tm = tree_mod_log_oldest_root(eb_root, time_seq);
  698. if (!tm)
  699. return eb_root;
  700. if (tm->op == BTRFS_MOD_LOG_ROOT_REPLACE) {
  701. old_root = &tm->old_root;
  702. old_generation = tm->generation;
  703. logical = old_root->logical;
  704. level = old_root->level;
  705. } else {
  706. logical = eb_root->start;
  707. level = btrfs_header_level(eb_root);
  708. }
  709. tm = tree_mod_log_search(fs_info, logical, time_seq);
  710. if (old_root && tm && tm->op != BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
  711. btrfs_tree_read_unlock(eb_root);
  712. free_extent_buffer(eb_root);
  713. old = read_tree_block(fs_info, logical, root->root_key.objectid,
  714. 0, level, NULL);
  715. if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
  716. if (!IS_ERR(old))
  717. free_extent_buffer(old);
  718. btrfs_warn(fs_info,
  719. "failed to read tree block %llu from get_old_root",
  720. logical);
  721. } else {
  722. struct tree_mod_elem *tm2;
  723. btrfs_tree_read_lock(old);
  724. eb = btrfs_clone_extent_buffer(old);
  725. /*
  726. * After the lookup for the most recent tree mod operation
  727. * above and before we locked and cloned the extent buffer
  728. * 'old', a new tree mod log operation may have been added.
  729. * So lookup for a more recent one to make sure the number
  730. * of mod log operations we replay is consistent with the
  731. * number of items we have in the cloned extent buffer,
  732. * otherwise we can hit a BUG_ON when rewinding the extent
  733. * buffer.
  734. */
  735. tm2 = tree_mod_log_search(fs_info, logical, time_seq);
  736. btrfs_tree_read_unlock(old);
  737. free_extent_buffer(old);
  738. ASSERT(tm2);
  739. ASSERT(tm2 == tm || tm2->seq > tm->seq);
  740. if (!tm2 || tm2->seq < tm->seq) {
  741. free_extent_buffer(eb);
  742. return NULL;
  743. }
  744. tm = tm2;
  745. }
  746. } else if (old_root) {
  747. eb_root_owner = btrfs_header_owner(eb_root);
  748. btrfs_tree_read_unlock(eb_root);
  749. free_extent_buffer(eb_root);
  750. eb = alloc_dummy_extent_buffer(fs_info, logical);
  751. } else {
  752. eb = btrfs_clone_extent_buffer(eb_root);
  753. btrfs_tree_read_unlock(eb_root);
  754. free_extent_buffer(eb_root);
  755. }
  756. if (!eb)
  757. return NULL;
  758. if (old_root) {
  759. btrfs_set_header_bytenr(eb, eb->start);
  760. btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
  761. btrfs_set_header_owner(eb, eb_root_owner);
  762. btrfs_set_header_level(eb, old_root->level);
  763. btrfs_set_header_generation(eb, old_generation);
  764. }
  765. btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
  766. btrfs_header_level(eb));
  767. btrfs_tree_read_lock(eb);
  768. if (tm)
  769. tree_mod_log_rewind(fs_info, eb, time_seq, tm);
  770. else
  771. WARN_ON(btrfs_header_level(eb) != 0);
  772. WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  773. return eb;
  774. }
  775. int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
  776. {
  777. struct tree_mod_elem *tm;
  778. int level;
  779. struct extent_buffer *eb_root = btrfs_root_node(root);
  780. tm = tree_mod_log_oldest_root(eb_root, time_seq);
  781. if (tm && tm->op == BTRFS_MOD_LOG_ROOT_REPLACE)
  782. level = tm->old_root.level;
  783. else
  784. level = btrfs_header_level(eb_root);
  785. free_extent_buffer(eb_root);
  786. return level;
  787. }
  788. /*
  789. * Return the lowest sequence number in the tree modification log.
  790. *
  791. * Return the sequence number of the oldest tree modification log user, which
  792. * corresponds to the lowest sequence number of all existing users. If there are
  793. * no users it returns 0.
  794. */
  795. u64 btrfs_tree_mod_log_lowest_seq(struct btrfs_fs_info *fs_info)
  796. {
  797. u64 ret = 0;
  798. read_lock(&fs_info->tree_mod_log_lock);
  799. if (!list_empty(&fs_info->tree_mod_seq_list)) {
  800. struct btrfs_seq_list *elem;
  801. elem = list_first_entry(&fs_info->tree_mod_seq_list,
  802. struct btrfs_seq_list, list);
  803. ret = elem->seq;
  804. }
  805. read_unlock(&fs_info->tree_mod_log_lock);
  806. return ret;
  807. }