recovery.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/f2fs/recovery.c
  4. *
  5. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com/
  7. */
  8. #include <asm/unaligned.h>
  9. #include <linux/fs.h>
  10. #include <linux/f2fs_fs.h>
  11. #include <linux/sched/mm.h>
  12. #include "f2fs.h"
  13. #include "node.h"
  14. #include "segment.h"
  15. /*
  16. * Roll forward recovery scenarios.
  17. *
  18. * [Term] F: fsync_mark, D: dentry_mark
  19. *
  20. * 1. inode(x) | CP | inode(x) | dnode(F)
  21. * -> Update the latest inode(x).
  22. *
  23. * 2. inode(x) | CP | inode(F) | dnode(F)
  24. * -> No problem.
  25. *
  26. * 3. inode(x) | CP | dnode(F) | inode(x)
  27. * -> Recover to the latest dnode(F), and drop the last inode(x)
  28. *
  29. * 4. inode(x) | CP | dnode(F) | inode(F)
  30. * -> No problem.
  31. *
  32. * 5. CP | inode(x) | dnode(F)
  33. * -> The inode(DF) was missing. Should drop this dnode(F).
  34. *
  35. * 6. CP | inode(DF) | dnode(F)
  36. * -> No problem.
  37. *
  38. * 7. CP | dnode(F) | inode(DF)
  39. * -> If f2fs_iget fails, then goto next to find inode(DF).
  40. *
  41. * 8. CP | dnode(F) | inode(x)
  42. * -> If f2fs_iget fails, then goto next to find inode(DF).
  43. * But it will fail due to no inode(DF).
  44. */
  45. static struct kmem_cache *fsync_entry_slab;
  46. #if IS_ENABLED(CONFIG_UNICODE)
  47. extern struct kmem_cache *f2fs_cf_name_slab;
  48. #endif
  49. bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
  50. {
  51. s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
  52. if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
  53. return false;
  54. if (NM_I(sbi)->max_rf_node_blocks &&
  55. percpu_counter_sum_positive(&sbi->rf_node_block_count) >=
  56. NM_I(sbi)->max_rf_node_blocks)
  57. return false;
  58. return true;
  59. }
  60. static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
  61. nid_t ino)
  62. {
  63. struct fsync_inode_entry *entry;
  64. list_for_each_entry(entry, head, list)
  65. if (entry->inode->i_ino == ino)
  66. return entry;
  67. return NULL;
  68. }
  69. static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
  70. struct list_head *head, nid_t ino, bool quota_inode)
  71. {
  72. struct inode *inode;
  73. struct fsync_inode_entry *entry;
  74. int err;
  75. inode = f2fs_iget_retry(sbi->sb, ino);
  76. if (IS_ERR(inode))
  77. return ERR_CAST(inode);
  78. err = f2fs_dquot_initialize(inode);
  79. if (err)
  80. goto err_out;
  81. if (quota_inode) {
  82. err = dquot_alloc_inode(inode);
  83. if (err)
  84. goto err_out;
  85. }
  86. entry = f2fs_kmem_cache_alloc(fsync_entry_slab,
  87. GFP_F2FS_ZERO, true, NULL);
  88. entry->inode = inode;
  89. list_add_tail(&entry->list, head);
  90. return entry;
  91. err_out:
  92. iput(inode);
  93. return ERR_PTR(err);
  94. }
  95. static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
  96. {
  97. if (drop) {
  98. /* inode should not be recovered, drop it */
  99. f2fs_inode_synced(entry->inode);
  100. }
  101. iput(entry->inode);
  102. list_del(&entry->list);
  103. kmem_cache_free(fsync_entry_slab, entry);
  104. }
  105. static int init_recovered_filename(const struct inode *dir,
  106. struct f2fs_inode *raw_inode,
  107. struct f2fs_filename *fname,
  108. struct qstr *usr_fname)
  109. {
  110. int err;
  111. memset(fname, 0, sizeof(*fname));
  112. fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
  113. fname->disk_name.name = raw_inode->i_name;
  114. if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
  115. return -ENAMETOOLONG;
  116. if (!IS_ENCRYPTED(dir)) {
  117. usr_fname->name = fname->disk_name.name;
  118. usr_fname->len = fname->disk_name.len;
  119. fname->usr_fname = usr_fname;
  120. }
  121. /* Compute the hash of the filename */
  122. if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) {
  123. /*
  124. * In this case the hash isn't computable without the key, so it
  125. * was saved on-disk.
  126. */
  127. if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN)
  128. return -EINVAL;
  129. fname->hash = get_unaligned((f2fs_hash_t *)
  130. &raw_inode->i_name[fname->disk_name.len]);
  131. } else if (IS_CASEFOLDED(dir)) {
  132. err = f2fs_init_casefolded_name(dir, fname);
  133. if (err)
  134. return err;
  135. f2fs_hash_filename(dir, fname);
  136. #if IS_ENABLED(CONFIG_UNICODE)
  137. /* Case-sensitive match is fine for recovery */
  138. kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
  139. fname->cf_name.name = NULL;
  140. #endif
  141. } else {
  142. f2fs_hash_filename(dir, fname);
  143. }
  144. return 0;
  145. }
  146. static int recover_dentry(struct inode *inode, struct page *ipage,
  147. struct list_head *dir_list)
  148. {
  149. struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
  150. nid_t pino = le32_to_cpu(raw_inode->i_pino);
  151. struct f2fs_dir_entry *de;
  152. struct f2fs_filename fname;
  153. struct qstr usr_fname;
  154. struct page *page;
  155. struct inode *dir, *einode;
  156. struct fsync_inode_entry *entry;
  157. int err = 0;
  158. char *name;
  159. entry = get_fsync_inode(dir_list, pino);
  160. if (!entry) {
  161. entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
  162. pino, false);
  163. if (IS_ERR(entry)) {
  164. dir = ERR_CAST(entry);
  165. err = PTR_ERR(entry);
  166. goto out;
  167. }
  168. }
  169. dir = entry->inode;
  170. err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
  171. if (err)
  172. goto out;
  173. retry:
  174. de = __f2fs_find_entry(dir, &fname, &page);
  175. if (de && inode->i_ino == le32_to_cpu(de->ino))
  176. goto out_put;
  177. if (de) {
  178. einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
  179. if (IS_ERR(einode)) {
  180. WARN_ON(1);
  181. err = PTR_ERR(einode);
  182. if (err == -ENOENT)
  183. err = -EEXIST;
  184. goto out_put;
  185. }
  186. err = f2fs_dquot_initialize(einode);
  187. if (err) {
  188. iput(einode);
  189. goto out_put;
  190. }
  191. err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
  192. if (err) {
  193. iput(einode);
  194. goto out_put;
  195. }
  196. f2fs_delete_entry(de, page, dir, einode);
  197. iput(einode);
  198. goto retry;
  199. } else if (IS_ERR(page)) {
  200. err = PTR_ERR(page);
  201. } else {
  202. err = f2fs_add_dentry(dir, &fname, inode,
  203. inode->i_ino, inode->i_mode);
  204. }
  205. if (err == -ENOMEM)
  206. goto retry;
  207. goto out;
  208. out_put:
  209. f2fs_put_page(page, 0);
  210. out:
  211. if (file_enc_name(inode))
  212. name = "<encrypted>";
  213. else
  214. name = raw_inode->i_name;
  215. f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
  216. __func__, ino_of_node(ipage), name,
  217. IS_ERR(dir) ? 0 : dir->i_ino, err);
  218. return err;
  219. }
  220. static int recover_quota_data(struct inode *inode, struct page *page)
  221. {
  222. struct f2fs_inode *raw = F2FS_INODE(page);
  223. struct iattr attr;
  224. uid_t i_uid = le32_to_cpu(raw->i_uid);
  225. gid_t i_gid = le32_to_cpu(raw->i_gid);
  226. int err;
  227. memset(&attr, 0, sizeof(attr));
  228. attr.ia_vfsuid = VFSUIDT_INIT(make_kuid(inode->i_sb->s_user_ns, i_uid));
  229. attr.ia_vfsgid = VFSGIDT_INIT(make_kgid(inode->i_sb->s_user_ns, i_gid));
  230. if (!vfsuid_eq(attr.ia_vfsuid, i_uid_into_vfsuid(&init_user_ns, inode)))
  231. attr.ia_valid |= ATTR_UID;
  232. if (!vfsgid_eq(attr.ia_vfsgid, i_gid_into_vfsgid(&init_user_ns, inode)))
  233. attr.ia_valid |= ATTR_GID;
  234. if (!attr.ia_valid)
  235. return 0;
  236. err = dquot_transfer(&init_user_ns, inode, &attr);
  237. if (err)
  238. set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
  239. return err;
  240. }
  241. static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
  242. {
  243. if (ri->i_inline & F2FS_PIN_FILE)
  244. set_inode_flag(inode, FI_PIN_FILE);
  245. else
  246. clear_inode_flag(inode, FI_PIN_FILE);
  247. if (ri->i_inline & F2FS_DATA_EXIST)
  248. set_inode_flag(inode, FI_DATA_EXIST);
  249. else
  250. clear_inode_flag(inode, FI_DATA_EXIST);
  251. }
  252. static int recover_inode(struct inode *inode, struct page *page)
  253. {
  254. struct f2fs_inode *raw = F2FS_INODE(page);
  255. char *name;
  256. int err;
  257. inode->i_mode = le16_to_cpu(raw->i_mode);
  258. err = recover_quota_data(inode, page);
  259. if (err)
  260. return err;
  261. i_uid_write(inode, le32_to_cpu(raw->i_uid));
  262. i_gid_write(inode, le32_to_cpu(raw->i_gid));
  263. if (raw->i_inline & F2FS_EXTRA_ATTR) {
  264. if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
  265. F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
  266. i_projid)) {
  267. projid_t i_projid;
  268. kprojid_t kprojid;
  269. i_projid = (projid_t)le32_to_cpu(raw->i_projid);
  270. kprojid = make_kprojid(&init_user_ns, i_projid);
  271. if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
  272. err = f2fs_transfer_project_quota(inode,
  273. kprojid);
  274. if (err)
  275. return err;
  276. F2FS_I(inode)->i_projid = kprojid;
  277. }
  278. }
  279. }
  280. f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
  281. inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
  282. inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
  283. inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
  284. inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
  285. inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
  286. inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
  287. F2FS_I(inode)->i_advise = raw->i_advise;
  288. F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
  289. f2fs_set_inode_flags(inode);
  290. F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
  291. le16_to_cpu(raw->i_gc_failures);
  292. recover_inline_flags(inode, raw);
  293. f2fs_mark_inode_dirty_sync(inode, true);
  294. if (file_enc_name(inode))
  295. name = "<encrypted>";
  296. else
  297. name = F2FS_INODE(page)->i_name;
  298. f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
  299. ino_of_node(page), name, raw->i_inline);
  300. return 0;
  301. }
  302. static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi,
  303. unsigned int ra_blocks, unsigned int blkaddr,
  304. unsigned int next_blkaddr)
  305. {
  306. if (blkaddr + 1 == next_blkaddr)
  307. ra_blocks = min_t(unsigned int, RECOVERY_MAX_RA_BLOCKS,
  308. ra_blocks * 2);
  309. else if (next_blkaddr % sbi->blocks_per_seg)
  310. ra_blocks = max_t(unsigned int, RECOVERY_MIN_RA_BLOCKS,
  311. ra_blocks / 2);
  312. return ra_blocks;
  313. }
  314. static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
  315. bool check_only)
  316. {
  317. struct curseg_info *curseg;
  318. struct page *page = NULL;
  319. block_t blkaddr;
  320. unsigned int loop_cnt = 0;
  321. unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
  322. unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
  323. valid_user_blocks(sbi);
  324. int err = 0;
  325. /* get node pages in the current segment */
  326. curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
  327. blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  328. while (1) {
  329. struct fsync_inode_entry *entry;
  330. if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
  331. return 0;
  332. page = f2fs_get_tmp_page(sbi, blkaddr);
  333. if (IS_ERR(page)) {
  334. err = PTR_ERR(page);
  335. break;
  336. }
  337. if (!is_recoverable_dnode(page)) {
  338. f2fs_put_page(page, 1);
  339. break;
  340. }
  341. if (!is_fsync_dnode(page))
  342. goto next;
  343. entry = get_fsync_inode(head, ino_of_node(page));
  344. if (!entry) {
  345. bool quota_inode = false;
  346. if (!check_only &&
  347. IS_INODE(page) && is_dent_dnode(page)) {
  348. err = f2fs_recover_inode_page(sbi, page);
  349. if (err) {
  350. f2fs_put_page(page, 1);
  351. break;
  352. }
  353. quota_inode = true;
  354. }
  355. /*
  356. * CP | dnode(F) | inode(DF)
  357. * For this case, we should not give up now.
  358. */
  359. entry = add_fsync_inode(sbi, head, ino_of_node(page),
  360. quota_inode);
  361. if (IS_ERR(entry)) {
  362. err = PTR_ERR(entry);
  363. if (err == -ENOENT) {
  364. err = 0;
  365. goto next;
  366. }
  367. f2fs_put_page(page, 1);
  368. break;
  369. }
  370. }
  371. entry->blkaddr = blkaddr;
  372. if (IS_INODE(page) && is_dent_dnode(page))
  373. entry->last_dentry = blkaddr;
  374. next:
  375. /* sanity check in order to detect looped node chain */
  376. if (++loop_cnt >= free_blocks ||
  377. blkaddr == next_blkaddr_of_node(page)) {
  378. f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
  379. __func__, blkaddr,
  380. next_blkaddr_of_node(page));
  381. f2fs_put_page(page, 1);
  382. err = -EINVAL;
  383. break;
  384. }
  385. ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr,
  386. next_blkaddr_of_node(page));
  387. /* check next segment */
  388. blkaddr = next_blkaddr_of_node(page);
  389. f2fs_put_page(page, 1);
  390. f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks);
  391. }
  392. return err;
  393. }
  394. static void destroy_fsync_dnodes(struct list_head *head, int drop)
  395. {
  396. struct fsync_inode_entry *entry, *tmp;
  397. list_for_each_entry_safe(entry, tmp, head, list)
  398. del_fsync_inode(entry, drop);
  399. }
  400. static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
  401. block_t blkaddr, struct dnode_of_data *dn)
  402. {
  403. struct seg_entry *sentry;
  404. unsigned int segno = GET_SEGNO(sbi, blkaddr);
  405. unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
  406. struct f2fs_summary_block *sum_node;
  407. struct f2fs_summary sum;
  408. struct page *sum_page, *node_page;
  409. struct dnode_of_data tdn = *dn;
  410. nid_t ino, nid;
  411. struct inode *inode;
  412. unsigned int offset, ofs_in_node, max_addrs;
  413. block_t bidx;
  414. int i;
  415. sentry = get_seg_entry(sbi, segno);
  416. if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
  417. return 0;
  418. /* Get the previous summary */
  419. for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
  420. struct curseg_info *curseg = CURSEG_I(sbi, i);
  421. if (curseg->segno == segno) {
  422. sum = curseg->sum_blk->entries[blkoff];
  423. goto got_it;
  424. }
  425. }
  426. sum_page = f2fs_get_sum_page(sbi, segno);
  427. if (IS_ERR(sum_page))
  428. return PTR_ERR(sum_page);
  429. sum_node = (struct f2fs_summary_block *)page_address(sum_page);
  430. sum = sum_node->entries[blkoff];
  431. f2fs_put_page(sum_page, 1);
  432. got_it:
  433. /* Use the locked dnode page and inode */
  434. nid = le32_to_cpu(sum.nid);
  435. ofs_in_node = le16_to_cpu(sum.ofs_in_node);
  436. max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode);
  437. if (ofs_in_node >= max_addrs) {
  438. f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%lu, nid:%u, max:%u",
  439. ofs_in_node, dn->inode->i_ino, nid, max_addrs);
  440. f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUMMARY);
  441. return -EFSCORRUPTED;
  442. }
  443. if (dn->inode->i_ino == nid) {
  444. tdn.nid = nid;
  445. if (!dn->inode_page_locked)
  446. lock_page(dn->inode_page);
  447. tdn.node_page = dn->inode_page;
  448. tdn.ofs_in_node = ofs_in_node;
  449. goto truncate_out;
  450. } else if (dn->nid == nid) {
  451. tdn.ofs_in_node = ofs_in_node;
  452. goto truncate_out;
  453. }
  454. /* Get the node page */
  455. node_page = f2fs_get_node_page(sbi, nid);
  456. if (IS_ERR(node_page))
  457. return PTR_ERR(node_page);
  458. offset = ofs_of_node(node_page);
  459. ino = ino_of_node(node_page);
  460. f2fs_put_page(node_page, 1);
  461. if (ino != dn->inode->i_ino) {
  462. int ret;
  463. /* Deallocate previous index in the node page */
  464. inode = f2fs_iget_retry(sbi->sb, ino);
  465. if (IS_ERR(inode))
  466. return PTR_ERR(inode);
  467. ret = f2fs_dquot_initialize(inode);
  468. if (ret) {
  469. iput(inode);
  470. return ret;
  471. }
  472. } else {
  473. inode = dn->inode;
  474. }
  475. bidx = f2fs_start_bidx_of_node(offset, inode) +
  476. le16_to_cpu(sum.ofs_in_node);
  477. /*
  478. * if inode page is locked, unlock temporarily, but its reference
  479. * count keeps alive.
  480. */
  481. if (ino == dn->inode->i_ino && dn->inode_page_locked)
  482. unlock_page(dn->inode_page);
  483. set_new_dnode(&tdn, inode, NULL, NULL, 0);
  484. if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
  485. goto out;
  486. if (tdn.data_blkaddr == blkaddr)
  487. f2fs_truncate_data_blocks_range(&tdn, 1);
  488. f2fs_put_dnode(&tdn);
  489. out:
  490. if (ino != dn->inode->i_ino)
  491. iput(inode);
  492. else if (dn->inode_page_locked)
  493. lock_page(dn->inode_page);
  494. return 0;
  495. truncate_out:
  496. if (f2fs_data_blkaddr(&tdn) == blkaddr)
  497. f2fs_truncate_data_blocks_range(&tdn, 1);
  498. if (dn->inode->i_ino == nid && !dn->inode_page_locked)
  499. unlock_page(dn->inode_page);
  500. return 0;
  501. }
  502. static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
  503. struct page *page)
  504. {
  505. struct dnode_of_data dn;
  506. struct node_info ni;
  507. unsigned int start, end;
  508. int err = 0, recovered = 0;
  509. /* step 1: recover xattr */
  510. if (IS_INODE(page)) {
  511. err = f2fs_recover_inline_xattr(inode, page);
  512. if (err)
  513. goto out;
  514. } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
  515. err = f2fs_recover_xattr_data(inode, page);
  516. if (!err)
  517. recovered++;
  518. goto out;
  519. }
  520. /* step 2: recover inline data */
  521. err = f2fs_recover_inline_data(inode, page);
  522. if (err) {
  523. if (err == 1)
  524. err = 0;
  525. goto out;
  526. }
  527. /* step 3: recover data indices */
  528. start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
  529. end = start + ADDRS_PER_PAGE(page, inode);
  530. set_new_dnode(&dn, inode, NULL, NULL, 0);
  531. retry_dn:
  532. err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
  533. if (err) {
  534. if (err == -ENOMEM) {
  535. memalloc_retry_wait(GFP_NOFS);
  536. goto retry_dn;
  537. }
  538. goto out;
  539. }
  540. f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
  541. err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
  542. if (err)
  543. goto err;
  544. f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
  545. if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
  546. f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
  547. inode->i_ino, ofs_of_node(dn.node_page),
  548. ofs_of_node(page));
  549. err = -EFSCORRUPTED;
  550. f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
  551. goto err;
  552. }
  553. for (; start < end; start++, dn.ofs_in_node++) {
  554. block_t src, dest;
  555. src = f2fs_data_blkaddr(&dn);
  556. dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
  557. if (__is_valid_data_blkaddr(src) &&
  558. !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
  559. err = -EFSCORRUPTED;
  560. f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
  561. goto err;
  562. }
  563. if (__is_valid_data_blkaddr(dest) &&
  564. !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
  565. err = -EFSCORRUPTED;
  566. f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
  567. goto err;
  568. }
  569. /* skip recovering if dest is the same as src */
  570. if (src == dest)
  571. continue;
  572. /* dest is invalid, just invalidate src block */
  573. if (dest == NULL_ADDR) {
  574. f2fs_truncate_data_blocks_range(&dn, 1);
  575. continue;
  576. }
  577. if (!file_keep_isize(inode) &&
  578. (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
  579. f2fs_i_size_write(inode,
  580. (loff_t)(start + 1) << PAGE_SHIFT);
  581. /*
  582. * dest is reserved block, invalidate src block
  583. * and then reserve one new block in dnode page.
  584. */
  585. if (dest == NEW_ADDR) {
  586. f2fs_truncate_data_blocks_range(&dn, 1);
  587. f2fs_reserve_new_block(&dn);
  588. continue;
  589. }
  590. /* dest is valid block, try to recover from src to dest */
  591. if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
  592. if (src == NULL_ADDR) {
  593. err = f2fs_reserve_new_block(&dn);
  594. while (err &&
  595. IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
  596. err = f2fs_reserve_new_block(&dn);
  597. /* We should not get -ENOSPC */
  598. f2fs_bug_on(sbi, err);
  599. if (err)
  600. goto err;
  601. }
  602. retry_prev:
  603. /* Check the previous node page having this index */
  604. err = check_index_in_prev_nodes(sbi, dest, &dn);
  605. if (err) {
  606. if (err == -ENOMEM) {
  607. memalloc_retry_wait(GFP_NOFS);
  608. goto retry_prev;
  609. }
  610. goto err;
  611. }
  612. if (f2fs_is_valid_blkaddr(sbi, dest,
  613. DATA_GENERIC_ENHANCE_UPDATE)) {
  614. f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u",
  615. dest, inode->i_ino, dn.ofs_in_node);
  616. err = -EFSCORRUPTED;
  617. f2fs_handle_error(sbi,
  618. ERROR_INVALID_BLKADDR);
  619. goto err;
  620. }
  621. /* write dummy data page */
  622. f2fs_replace_block(sbi, &dn, src, dest,
  623. ni.version, false, false);
  624. recovered++;
  625. }
  626. }
  627. copy_node_footer(dn.node_page, page);
  628. fill_node_footer(dn.node_page, dn.nid, ni.ino,
  629. ofs_of_node(page), false);
  630. set_page_dirty(dn.node_page);
  631. err:
  632. f2fs_put_dnode(&dn);
  633. out:
  634. f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
  635. inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
  636. recovered, err);
  637. return err;
  638. }
  639. static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
  640. struct list_head *tmp_inode_list, struct list_head *dir_list)
  641. {
  642. struct curseg_info *curseg;
  643. struct page *page = NULL;
  644. int err = 0;
  645. block_t blkaddr;
  646. unsigned int ra_blocks = RECOVERY_MAX_RA_BLOCKS;
  647. /* get node pages in the current segment */
  648. curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
  649. blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  650. while (1) {
  651. struct fsync_inode_entry *entry;
  652. if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
  653. break;
  654. page = f2fs_get_tmp_page(sbi, blkaddr);
  655. if (IS_ERR(page)) {
  656. err = PTR_ERR(page);
  657. break;
  658. }
  659. if (!is_recoverable_dnode(page)) {
  660. f2fs_put_page(page, 1);
  661. break;
  662. }
  663. entry = get_fsync_inode(inode_list, ino_of_node(page));
  664. if (!entry)
  665. goto next;
  666. /*
  667. * inode(x) | CP | inode(x) | dnode(F)
  668. * In this case, we can lose the latest inode(x).
  669. * So, call recover_inode for the inode update.
  670. */
  671. if (IS_INODE(page)) {
  672. err = recover_inode(entry->inode, page);
  673. if (err) {
  674. f2fs_put_page(page, 1);
  675. break;
  676. }
  677. }
  678. if (entry->last_dentry == blkaddr) {
  679. err = recover_dentry(entry->inode, page, dir_list);
  680. if (err) {
  681. f2fs_put_page(page, 1);
  682. break;
  683. }
  684. }
  685. err = do_recover_data(sbi, entry->inode, page);
  686. if (err) {
  687. f2fs_put_page(page, 1);
  688. break;
  689. }
  690. if (entry->blkaddr == blkaddr)
  691. list_move_tail(&entry->list, tmp_inode_list);
  692. next:
  693. ra_blocks = adjust_por_ra_blocks(sbi, ra_blocks, blkaddr,
  694. next_blkaddr_of_node(page));
  695. /* check next segment */
  696. blkaddr = next_blkaddr_of_node(page);
  697. f2fs_put_page(page, 1);
  698. f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks);
  699. }
  700. if (!err)
  701. f2fs_allocate_new_segments(sbi);
  702. return err;
  703. }
  704. int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
  705. {
  706. struct list_head inode_list, tmp_inode_list;
  707. struct list_head dir_list;
  708. int err;
  709. int ret = 0;
  710. unsigned long s_flags = sbi->sb->s_flags;
  711. bool need_writecp = false;
  712. bool fix_curseg_write_pointer = false;
  713. if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE))
  714. f2fs_info(sbi, "recover fsync data on readonly fs");
  715. INIT_LIST_HEAD(&inode_list);
  716. INIT_LIST_HEAD(&tmp_inode_list);
  717. INIT_LIST_HEAD(&dir_list);
  718. /* prevent checkpoint */
  719. f2fs_down_write(&sbi->cp_global_sem);
  720. /* step #1: find fsynced inode numbers */
  721. err = find_fsync_dnodes(sbi, &inode_list, check_only);
  722. if (err || list_empty(&inode_list))
  723. goto skip;
  724. if (check_only) {
  725. ret = 1;
  726. goto skip;
  727. }
  728. need_writecp = true;
  729. /* step #2: recover data */
  730. err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
  731. if (!err)
  732. f2fs_bug_on(sbi, !list_empty(&inode_list));
  733. else
  734. f2fs_bug_on(sbi, sbi->sb->s_flags & SB_ACTIVE);
  735. skip:
  736. fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
  737. destroy_fsync_dnodes(&inode_list, err);
  738. destroy_fsync_dnodes(&tmp_inode_list, err);
  739. /* truncate meta pages to be used by the recovery */
  740. truncate_inode_pages_range(META_MAPPING(sbi),
  741. (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
  742. if (err) {
  743. truncate_inode_pages_final(NODE_MAPPING(sbi));
  744. truncate_inode_pages_final(META_MAPPING(sbi));
  745. }
  746. /*
  747. * If fsync data succeeds or there is no fsync data to recover,
  748. * and the f2fs is not read only, check and fix zoned block devices'
  749. * write pointer consistency.
  750. */
  751. if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
  752. f2fs_sb_has_blkzoned(sbi)) {
  753. err = f2fs_fix_curseg_write_pointer(sbi);
  754. ret = err;
  755. }
  756. if (!err)
  757. clear_sbi_flag(sbi, SBI_POR_DOING);
  758. f2fs_up_write(&sbi->cp_global_sem);
  759. /* let's drop all the directory inodes for clean checkpoint */
  760. destroy_fsync_dnodes(&dir_list, err);
  761. if (need_writecp) {
  762. set_sbi_flag(sbi, SBI_IS_RECOVERED);
  763. if (!err) {
  764. struct cp_control cpc = {
  765. .reason = CP_RECOVERY,
  766. };
  767. err = f2fs_write_checkpoint(sbi, &cpc);
  768. }
  769. }
  770. sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
  771. return ret ? ret : err;
  772. }
  773. int __init f2fs_create_recovery_cache(void)
  774. {
  775. fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
  776. sizeof(struct fsync_inode_entry));
  777. return fsync_entry_slab ? 0 : -ENOMEM;
  778. }
  779. void f2fs_destroy_recovery_cache(void)
  780. {
  781. kmem_cache_destroy(fsync_entry_slab);
  782. }