dir.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <[email protected]>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_log_format.h"
  13. #include "xfs_trans.h"
  14. #include "xfs_inode.h"
  15. #include "xfs_icache.h"
  16. #include "xfs_dir2.h"
  17. #include "xfs_dir2_priv.h"
  18. #include "scrub/scrub.h"
  19. #include "scrub/common.h"
  20. #include "scrub/dabtree.h"
  21. /* Set us up to scrub directories. */
  22. int
  23. xchk_setup_directory(
  24. struct xfs_scrub *sc)
  25. {
  26. return xchk_setup_inode_contents(sc, 0);
  27. }
  28. /* Directories */
  29. /* Scrub a directory entry. */
  30. struct xchk_dir_ctx {
  31. /* VFS fill-directory iterator */
  32. struct dir_context dir_iter;
  33. struct xfs_scrub *sc;
  34. };
  35. /* Check that an inode's mode matches a given DT_ type. */
  36. STATIC int
  37. xchk_dir_check_ftype(
  38. struct xchk_dir_ctx *sdc,
  39. xfs_fileoff_t offset,
  40. xfs_ino_t inum,
  41. int dtype)
  42. {
  43. struct xfs_mount *mp = sdc->sc->mp;
  44. struct xfs_inode *ip;
  45. int ino_dtype;
  46. int error = 0;
  47. if (!xfs_has_ftype(mp)) {
  48. if (dtype != DT_UNKNOWN && dtype != DT_DIR)
  49. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  50. offset);
  51. goto out;
  52. }
  53. /*
  54. * Grab the inode pointed to by the dirent. We release the
  55. * inode before we cancel the scrub transaction. Since we're
  56. * don't know a priori that releasing the inode won't trigger
  57. * eofblocks cleanup (which allocates what would be a nested
  58. * transaction), we can't use DONTCACHE here because DONTCACHE
  59. * inodes can trigger immediate inactive cleanup of the inode.
  60. *
  61. * If _iget returns -EINVAL or -ENOENT then the child inode number is
  62. * garbage and the directory is corrupt. If the _iget returns
  63. * -EFSCORRUPTED or -EFSBADCRC then the child is corrupt which is a
  64. * cross referencing error. Any other error is an operational error.
  65. */
  66. error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip);
  67. if (error == -EINVAL || error == -ENOENT) {
  68. error = -EFSCORRUPTED;
  69. xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, 0, &error);
  70. goto out;
  71. }
  72. if (!xchk_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset,
  73. &error))
  74. goto out;
  75. /* Convert mode to the DT_* values that dir_emit uses. */
  76. ino_dtype = xfs_dir3_get_dtype(mp,
  77. xfs_mode_to_ftype(VFS_I(ip)->i_mode));
  78. if (ino_dtype != dtype)
  79. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
  80. xfs_irele(ip);
  81. out:
  82. return error;
  83. }
  84. /*
  85. * Scrub a single directory entry.
  86. *
  87. * We use the VFS directory iterator (i.e. readdir) to call this
  88. * function for every directory entry in a directory. Once we're here,
  89. * we check the inode number to make sure it's sane, then we check that
  90. * we can look up this filename. Finally, we check the ftype.
  91. */
  92. STATIC bool
  93. xchk_dir_actor(
  94. struct dir_context *dir_iter,
  95. const char *name,
  96. int namelen,
  97. loff_t pos,
  98. u64 ino,
  99. unsigned type)
  100. {
  101. struct xfs_mount *mp;
  102. struct xfs_inode *ip;
  103. struct xchk_dir_ctx *sdc;
  104. struct xfs_name xname;
  105. xfs_ino_t lookup_ino;
  106. xfs_dablk_t offset;
  107. bool checked_ftype = false;
  108. int error = 0;
  109. sdc = container_of(dir_iter, struct xchk_dir_ctx, dir_iter);
  110. ip = sdc->sc->ip;
  111. mp = ip->i_mount;
  112. offset = xfs_dir2_db_to_da(mp->m_dir_geo,
  113. xfs_dir2_dataptr_to_db(mp->m_dir_geo, pos));
  114. if (xchk_should_terminate(sdc->sc, &error))
  115. return !error;
  116. /* Does this inode number make sense? */
  117. if (!xfs_verify_dir_ino(mp, ino)) {
  118. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
  119. goto out;
  120. }
  121. /* Does this name make sense? */
  122. if (!xfs_dir2_namecheck(name, namelen)) {
  123. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
  124. goto out;
  125. }
  126. if (!strncmp(".", name, namelen)) {
  127. /* If this is "." then check that the inum matches the dir. */
  128. if (xfs_has_ftype(mp) && type != DT_DIR)
  129. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  130. offset);
  131. checked_ftype = true;
  132. if (ino != ip->i_ino)
  133. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  134. offset);
  135. } else if (!strncmp("..", name, namelen)) {
  136. /*
  137. * If this is ".." in the root inode, check that the inum
  138. * matches this dir.
  139. */
  140. if (xfs_has_ftype(mp) && type != DT_DIR)
  141. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  142. offset);
  143. checked_ftype = true;
  144. if (ip->i_ino == mp->m_sb.sb_rootino && ino != ip->i_ino)
  145. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
  146. offset);
  147. }
  148. /* Verify that we can look up this name by hash. */
  149. xname.name = name;
  150. xname.len = namelen;
  151. xname.type = XFS_DIR3_FT_UNKNOWN;
  152. error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
  153. /* ENOENT means the hash lookup failed and the dir is corrupt */
  154. if (error == -ENOENT)
  155. error = -EFSCORRUPTED;
  156. if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
  157. &error))
  158. goto out;
  159. if (lookup_ino != ino) {
  160. xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
  161. goto out;
  162. }
  163. /* Verify the file type. This function absorbs error codes. */
  164. if (!checked_ftype) {
  165. error = xchk_dir_check_ftype(sdc, offset, lookup_ino, type);
  166. if (error)
  167. goto out;
  168. }
  169. out:
  170. /*
  171. * A negative error code returned here is supposed to cause the
  172. * dir_emit caller (xfs_readdir) to abort the directory iteration
  173. * and return zero to xchk_directory.
  174. */
  175. if (error == 0 && sdc->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  176. return false;
  177. return !error;
  178. }
  179. /* Scrub a directory btree record. */
  180. STATIC int
  181. xchk_dir_rec(
  182. struct xchk_da_btree *ds,
  183. int level)
  184. {
  185. struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
  186. struct xfs_mount *mp = ds->state->mp;
  187. struct xfs_inode *dp = ds->dargs.dp;
  188. struct xfs_da_geometry *geo = mp->m_dir_geo;
  189. struct xfs_dir2_data_entry *dent;
  190. struct xfs_buf *bp;
  191. struct xfs_dir2_leaf_entry *ent;
  192. unsigned int end;
  193. unsigned int iter_off;
  194. xfs_ino_t ino;
  195. xfs_dablk_t rec_bno;
  196. xfs_dir2_db_t db;
  197. xfs_dir2_data_aoff_t off;
  198. xfs_dir2_dataptr_t ptr;
  199. xfs_dahash_t calc_hash;
  200. xfs_dahash_t hash;
  201. struct xfs_dir3_icleaf_hdr hdr;
  202. unsigned int tag;
  203. int error;
  204. ASSERT(blk->magic == XFS_DIR2_LEAF1_MAGIC ||
  205. blk->magic == XFS_DIR2_LEAFN_MAGIC);
  206. xfs_dir2_leaf_hdr_from_disk(mp, &hdr, blk->bp->b_addr);
  207. ent = hdr.ents + blk->index;
  208. /* Check the hash of the entry. */
  209. error = xchk_da_btree_hash(ds, level, &ent->hashval);
  210. if (error)
  211. goto out;
  212. /* Valid hash pointer? */
  213. ptr = be32_to_cpu(ent->address);
  214. if (ptr == 0)
  215. return 0;
  216. /* Find the directory entry's location. */
  217. db = xfs_dir2_dataptr_to_db(geo, ptr);
  218. off = xfs_dir2_dataptr_to_off(geo, ptr);
  219. rec_bno = xfs_dir2_db_to_da(geo, db);
  220. if (rec_bno >= geo->leafblk) {
  221. xchk_da_set_corrupt(ds, level);
  222. goto out;
  223. }
  224. error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno,
  225. XFS_DABUF_MAP_HOLE_OK, &bp);
  226. if (!xchk_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
  227. &error))
  228. goto out;
  229. if (!bp) {
  230. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  231. goto out;
  232. }
  233. xchk_buffer_recheck(ds->sc, bp);
  234. if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  235. goto out_relse;
  236. dent = bp->b_addr + off;
  237. /* Make sure we got a real directory entry. */
  238. iter_off = geo->data_entry_offset;
  239. end = xfs_dir3_data_end_offset(geo, bp->b_addr);
  240. if (!end) {
  241. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  242. goto out_relse;
  243. }
  244. for (;;) {
  245. struct xfs_dir2_data_entry *dep = bp->b_addr + iter_off;
  246. struct xfs_dir2_data_unused *dup = bp->b_addr + iter_off;
  247. if (iter_off >= end) {
  248. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  249. goto out_relse;
  250. }
  251. if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
  252. iter_off += be16_to_cpu(dup->length);
  253. continue;
  254. }
  255. if (dep == dent)
  256. break;
  257. iter_off += xfs_dir2_data_entsize(mp, dep->namelen);
  258. }
  259. /* Retrieve the entry, sanity check it, and compare hashes. */
  260. ino = be64_to_cpu(dent->inumber);
  261. hash = be32_to_cpu(ent->hashval);
  262. tag = be16_to_cpup(xfs_dir2_data_entry_tag_p(mp, dent));
  263. if (!xfs_verify_dir_ino(mp, ino) || tag != off)
  264. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  265. if (dent->namelen == 0) {
  266. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  267. goto out_relse;
  268. }
  269. calc_hash = xfs_da_hashname(dent->name, dent->namelen);
  270. if (calc_hash != hash)
  271. xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
  272. out_relse:
  273. xfs_trans_brelse(ds->dargs.trans, bp);
  274. out:
  275. return error;
  276. }
  277. /*
  278. * Is this unused entry either in the bestfree or smaller than all of
  279. * them? We've already checked that the bestfrees are sorted longest to
  280. * shortest, and that there aren't any bogus entries.
  281. */
  282. STATIC void
  283. xchk_directory_check_free_entry(
  284. struct xfs_scrub *sc,
  285. xfs_dablk_t lblk,
  286. struct xfs_dir2_data_free *bf,
  287. struct xfs_dir2_data_unused *dup)
  288. {
  289. struct xfs_dir2_data_free *dfp;
  290. unsigned int dup_length;
  291. dup_length = be16_to_cpu(dup->length);
  292. /* Unused entry is shorter than any of the bestfrees */
  293. if (dup_length < be16_to_cpu(bf[XFS_DIR2_DATA_FD_COUNT - 1].length))
  294. return;
  295. for (dfp = &bf[XFS_DIR2_DATA_FD_COUNT - 1]; dfp >= bf; dfp--)
  296. if (dup_length == be16_to_cpu(dfp->length))
  297. return;
  298. /* Unused entry should be in the bestfrees but wasn't found. */
  299. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  300. }
  301. /* Check free space info in a directory data block. */
  302. STATIC int
  303. xchk_directory_data_bestfree(
  304. struct xfs_scrub *sc,
  305. xfs_dablk_t lblk,
  306. bool is_block)
  307. {
  308. struct xfs_dir2_data_unused *dup;
  309. struct xfs_dir2_data_free *dfp;
  310. struct xfs_buf *bp;
  311. struct xfs_dir2_data_free *bf;
  312. struct xfs_mount *mp = sc->mp;
  313. u16 tag;
  314. unsigned int nr_bestfrees = 0;
  315. unsigned int nr_frees = 0;
  316. unsigned int smallest_bestfree;
  317. int newlen;
  318. unsigned int offset;
  319. unsigned int end;
  320. int error;
  321. if (is_block) {
  322. /* dir block format */
  323. if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET))
  324. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  325. error = xfs_dir3_block_read(sc->tp, sc->ip, &bp);
  326. } else {
  327. /* dir data format */
  328. error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, 0, &bp);
  329. }
  330. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
  331. goto out;
  332. xchk_buffer_recheck(sc, bp);
  333. /* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */
  334. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  335. goto out_buf;
  336. /* Do the bestfrees correspond to actual free space? */
  337. bf = xfs_dir2_data_bestfree_p(mp, bp->b_addr);
  338. smallest_bestfree = UINT_MAX;
  339. for (dfp = &bf[0]; dfp < &bf[XFS_DIR2_DATA_FD_COUNT]; dfp++) {
  340. offset = be16_to_cpu(dfp->offset);
  341. if (offset == 0)
  342. continue;
  343. if (offset >= mp->m_dir_geo->blksize) {
  344. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  345. goto out_buf;
  346. }
  347. dup = bp->b_addr + offset;
  348. tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
  349. /* bestfree doesn't match the entry it points at? */
  350. if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) ||
  351. be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) ||
  352. tag != offset) {
  353. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  354. goto out_buf;
  355. }
  356. /* bestfree records should be ordered largest to smallest */
  357. if (smallest_bestfree < be16_to_cpu(dfp->length)) {
  358. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  359. goto out_buf;
  360. }
  361. smallest_bestfree = be16_to_cpu(dfp->length);
  362. nr_bestfrees++;
  363. }
  364. /* Make sure the bestfrees are actually the best free spaces. */
  365. offset = mp->m_dir_geo->data_entry_offset;
  366. end = xfs_dir3_data_end_offset(mp->m_dir_geo, bp->b_addr);
  367. /* Iterate the entries, stopping when we hit or go past the end. */
  368. while (offset < end) {
  369. dup = bp->b_addr + offset;
  370. /* Skip real entries */
  371. if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG)) {
  372. struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
  373. newlen = xfs_dir2_data_entsize(mp, dep->namelen);
  374. if (newlen <= 0) {
  375. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
  376. lblk);
  377. goto out_buf;
  378. }
  379. offset += newlen;
  380. continue;
  381. }
  382. /* Spot check this free entry */
  383. tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
  384. if (tag != offset) {
  385. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  386. goto out_buf;
  387. }
  388. /*
  389. * Either this entry is a bestfree or it's smaller than
  390. * any of the bestfrees.
  391. */
  392. xchk_directory_check_free_entry(sc, lblk, bf, dup);
  393. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  394. goto out_buf;
  395. /* Move on. */
  396. newlen = be16_to_cpu(dup->length);
  397. if (newlen <= 0) {
  398. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  399. goto out_buf;
  400. }
  401. offset += newlen;
  402. if (offset <= end)
  403. nr_frees++;
  404. }
  405. /* We're required to fill all the space. */
  406. if (offset != end)
  407. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  408. /* Did we see at least as many free slots as there are bestfrees? */
  409. if (nr_frees < nr_bestfrees)
  410. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  411. out_buf:
  412. xfs_trans_brelse(sc->tp, bp);
  413. out:
  414. return error;
  415. }
  416. /*
  417. * Does the free space length in the free space index block ($len) match
  418. * the longest length in the directory data block's bestfree array?
  419. * Assume that we've already checked that the data block's bestfree
  420. * array is in order.
  421. */
  422. STATIC void
  423. xchk_directory_check_freesp(
  424. struct xfs_scrub *sc,
  425. xfs_dablk_t lblk,
  426. struct xfs_buf *dbp,
  427. unsigned int len)
  428. {
  429. struct xfs_dir2_data_free *dfp;
  430. dfp = xfs_dir2_data_bestfree_p(sc->mp, dbp->b_addr);
  431. if (len != be16_to_cpu(dfp->length))
  432. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  433. if (len > 0 && be16_to_cpu(dfp->offset) == 0)
  434. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  435. }
  436. /* Check free space info in a directory leaf1 block. */
  437. STATIC int
  438. xchk_directory_leaf1_bestfree(
  439. struct xfs_scrub *sc,
  440. struct xfs_da_args *args,
  441. xfs_dir2_db_t last_data_db,
  442. xfs_dablk_t lblk)
  443. {
  444. struct xfs_dir3_icleaf_hdr leafhdr;
  445. struct xfs_dir2_leaf_tail *ltp;
  446. struct xfs_dir2_leaf *leaf;
  447. struct xfs_buf *dbp;
  448. struct xfs_buf *bp;
  449. struct xfs_da_geometry *geo = sc->mp->m_dir_geo;
  450. __be16 *bestp;
  451. __u16 best;
  452. __u32 hash;
  453. __u32 lasthash = 0;
  454. __u32 bestcount;
  455. unsigned int stale = 0;
  456. int i;
  457. int error;
  458. /* Read the free space block. */
  459. error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, &bp);
  460. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
  461. return error;
  462. xchk_buffer_recheck(sc, bp);
  463. leaf = bp->b_addr;
  464. xfs_dir2_leaf_hdr_from_disk(sc->ip->i_mount, &leafhdr, leaf);
  465. ltp = xfs_dir2_leaf_tail_p(geo, leaf);
  466. bestcount = be32_to_cpu(ltp->bestcount);
  467. bestp = xfs_dir2_leaf_bests_p(ltp);
  468. if (xfs_has_crc(sc->mp)) {
  469. struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
  470. if (hdr3->pad != cpu_to_be32(0))
  471. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  472. }
  473. /*
  474. * There must be enough bestfree slots to cover all the directory data
  475. * blocks that we scanned. It is possible for there to be a hole
  476. * between the last data block and i_disk_size. This seems like an
  477. * oversight to the scrub author, but as we have been writing out
  478. * directories like this (and xfs_repair doesn't mind them) for years,
  479. * that's what we have to check.
  480. */
  481. if (bestcount != last_data_db + 1) {
  482. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  483. goto out;
  484. }
  485. /* Is the leaf count even remotely sane? */
  486. if (leafhdr.count > geo->leaf_max_ents) {
  487. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  488. goto out;
  489. }
  490. /* Leaves and bests don't overlap in leaf format. */
  491. if ((char *)&leafhdr.ents[leafhdr.count] > (char *)bestp) {
  492. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  493. goto out;
  494. }
  495. /* Check hash value order, count stale entries. */
  496. for (i = 0; i < leafhdr.count; i++) {
  497. hash = be32_to_cpu(leafhdr.ents[i].hashval);
  498. if (i > 0 && lasthash > hash)
  499. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  500. lasthash = hash;
  501. if (leafhdr.ents[i].address ==
  502. cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
  503. stale++;
  504. }
  505. if (leafhdr.stale != stale)
  506. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  507. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  508. goto out;
  509. /* Check all the bestfree entries. */
  510. for (i = 0; i < bestcount; i++, bestp++) {
  511. best = be16_to_cpu(*bestp);
  512. error = xfs_dir3_data_read(sc->tp, sc->ip,
  513. xfs_dir2_db_to_da(args->geo, i),
  514. XFS_DABUF_MAP_HOLE_OK,
  515. &dbp);
  516. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
  517. &error))
  518. break;
  519. if (!dbp) {
  520. if (best != NULLDATAOFF) {
  521. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
  522. lblk);
  523. break;
  524. }
  525. continue;
  526. }
  527. if (best == NULLDATAOFF)
  528. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  529. else
  530. xchk_directory_check_freesp(sc, lblk, dbp, best);
  531. xfs_trans_brelse(sc->tp, dbp);
  532. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  533. break;
  534. }
  535. out:
  536. xfs_trans_brelse(sc->tp, bp);
  537. return error;
  538. }
  539. /* Check free space info in a directory freespace block. */
  540. STATIC int
  541. xchk_directory_free_bestfree(
  542. struct xfs_scrub *sc,
  543. struct xfs_da_args *args,
  544. xfs_dablk_t lblk)
  545. {
  546. struct xfs_dir3_icfree_hdr freehdr;
  547. struct xfs_buf *dbp;
  548. struct xfs_buf *bp;
  549. __u16 best;
  550. unsigned int stale = 0;
  551. int i;
  552. int error;
  553. /* Read the free space block */
  554. error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
  555. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
  556. return error;
  557. xchk_buffer_recheck(sc, bp);
  558. if (xfs_has_crc(sc->mp)) {
  559. struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
  560. if (hdr3->pad != cpu_to_be32(0))
  561. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  562. }
  563. /* Check all the entries. */
  564. xfs_dir2_free_hdr_from_disk(sc->ip->i_mount, &freehdr, bp->b_addr);
  565. for (i = 0; i < freehdr.nvalid; i++) {
  566. best = be16_to_cpu(freehdr.bests[i]);
  567. if (best == NULLDATAOFF) {
  568. stale++;
  569. continue;
  570. }
  571. error = xfs_dir3_data_read(sc->tp, sc->ip,
  572. (freehdr.firstdb + i) * args->geo->fsbcount,
  573. 0, &dbp);
  574. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
  575. &error))
  576. goto out;
  577. xchk_directory_check_freesp(sc, lblk, dbp, best);
  578. xfs_trans_brelse(sc->tp, dbp);
  579. }
  580. if (freehdr.nused + stale != freehdr.nvalid)
  581. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  582. out:
  583. xfs_trans_brelse(sc->tp, bp);
  584. return error;
  585. }
  586. /* Check free space information in directories. */
  587. STATIC int
  588. xchk_directory_blocks(
  589. struct xfs_scrub *sc)
  590. {
  591. struct xfs_bmbt_irec got;
  592. struct xfs_da_args args;
  593. struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, XFS_DATA_FORK);
  594. struct xfs_mount *mp = sc->mp;
  595. xfs_fileoff_t leaf_lblk;
  596. xfs_fileoff_t free_lblk;
  597. xfs_fileoff_t lblk;
  598. struct xfs_iext_cursor icur;
  599. xfs_dablk_t dabno;
  600. xfs_dir2_db_t last_data_db = 0;
  601. bool found;
  602. bool is_block = false;
  603. int error;
  604. /* Ignore local format directories. */
  605. if (ifp->if_format != XFS_DINODE_FMT_EXTENTS &&
  606. ifp->if_format != XFS_DINODE_FMT_BTREE)
  607. return 0;
  608. lblk = XFS_B_TO_FSB(mp, XFS_DIR2_DATA_OFFSET);
  609. leaf_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_LEAF_OFFSET);
  610. free_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_FREE_OFFSET);
  611. /* Is this a block dir? */
  612. args.dp = sc->ip;
  613. args.geo = mp->m_dir_geo;
  614. args.trans = sc->tp;
  615. error = xfs_dir2_isblock(&args, &is_block);
  616. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
  617. goto out;
  618. /* Iterate all the data extents in the directory... */
  619. found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
  620. while (found && !(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) {
  621. /* No more data blocks... */
  622. if (got.br_startoff >= leaf_lblk)
  623. break;
  624. /*
  625. * Check each data block's bestfree data.
  626. *
  627. * Iterate all the fsbcount-aligned block offsets in
  628. * this directory. The directory block reading code is
  629. * smart enough to do its own bmap lookups to handle
  630. * discontiguous directory blocks. When we're done
  631. * with the extent record, re-query the bmap at the
  632. * next fsbcount-aligned offset to avoid redundant
  633. * block checks.
  634. */
  635. for (lblk = roundup((xfs_dablk_t)got.br_startoff,
  636. args.geo->fsbcount);
  637. lblk < got.br_startoff + got.br_blockcount;
  638. lblk += args.geo->fsbcount) {
  639. last_data_db = xfs_dir2_da_to_db(args.geo, lblk);
  640. error = xchk_directory_data_bestfree(sc, lblk,
  641. is_block);
  642. if (error)
  643. goto out;
  644. }
  645. dabno = got.br_startoff + got.br_blockcount;
  646. lblk = roundup(dabno, args.geo->fsbcount);
  647. found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
  648. }
  649. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  650. goto out;
  651. /* Look for a leaf1 block, which has free info. */
  652. if (xfs_iext_lookup_extent(sc->ip, ifp, leaf_lblk, &icur, &got) &&
  653. got.br_startoff == leaf_lblk &&
  654. got.br_blockcount == args.geo->fsbcount &&
  655. !xfs_iext_next_extent(ifp, &icur, &got)) {
  656. if (is_block) {
  657. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  658. goto out;
  659. }
  660. error = xchk_directory_leaf1_bestfree(sc, &args, last_data_db,
  661. leaf_lblk);
  662. if (error)
  663. goto out;
  664. }
  665. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  666. goto out;
  667. /* Scan for free blocks */
  668. lblk = free_lblk;
  669. found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
  670. while (found && !(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) {
  671. /*
  672. * Dirs can't have blocks mapped above 2^32.
  673. * Single-block dirs shouldn't even be here.
  674. */
  675. lblk = got.br_startoff;
  676. if (lblk & ~0xFFFFFFFFULL) {
  677. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  678. goto out;
  679. }
  680. if (is_block) {
  681. xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
  682. goto out;
  683. }
  684. /*
  685. * Check each dir free block's bestfree data.
  686. *
  687. * Iterate all the fsbcount-aligned block offsets in
  688. * this directory. The directory block reading code is
  689. * smart enough to do its own bmap lookups to handle
  690. * discontiguous directory blocks. When we're done
  691. * with the extent record, re-query the bmap at the
  692. * next fsbcount-aligned offset to avoid redundant
  693. * block checks.
  694. */
  695. for (lblk = roundup((xfs_dablk_t)got.br_startoff,
  696. args.geo->fsbcount);
  697. lblk < got.br_startoff + got.br_blockcount;
  698. lblk += args.geo->fsbcount) {
  699. error = xchk_directory_free_bestfree(sc, &args,
  700. lblk);
  701. if (error)
  702. goto out;
  703. }
  704. dabno = got.br_startoff + got.br_blockcount;
  705. lblk = roundup(dabno, args.geo->fsbcount);
  706. found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
  707. }
  708. out:
  709. return error;
  710. }
  711. /* Scrub a whole directory. */
  712. int
  713. xchk_directory(
  714. struct xfs_scrub *sc)
  715. {
  716. struct xchk_dir_ctx sdc = {
  717. .dir_iter.actor = xchk_dir_actor,
  718. .dir_iter.pos = 0,
  719. .sc = sc,
  720. };
  721. size_t bufsize;
  722. loff_t oldpos;
  723. int error = 0;
  724. if (!S_ISDIR(VFS_I(sc->ip)->i_mode))
  725. return -ENOENT;
  726. /* Plausible size? */
  727. if (sc->ip->i_disk_size < xfs_dir2_sf_hdr_size(0)) {
  728. xchk_ino_set_corrupt(sc, sc->ip->i_ino);
  729. goto out;
  730. }
  731. /* Check directory tree structure */
  732. error = xchk_da_btree(sc, XFS_DATA_FORK, xchk_dir_rec, NULL);
  733. if (error)
  734. return error;
  735. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  736. return error;
  737. /* Check the freespace. */
  738. error = xchk_directory_blocks(sc);
  739. if (error)
  740. return error;
  741. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  742. return error;
  743. /*
  744. * Check that every dirent we see can also be looked up by hash.
  745. * Userspace usually asks for a 32k buffer, so we will too.
  746. */
  747. bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE,
  748. sc->ip->i_disk_size);
  749. /*
  750. * Look up every name in this directory by hash.
  751. *
  752. * Use the xfs_readdir function to call xchk_dir_actor on
  753. * every directory entry in this directory. In _actor, we check
  754. * the name, inode number, and ftype (if applicable) of the
  755. * entry. xfs_readdir uses the VFS filldir functions to provide
  756. * iteration context.
  757. *
  758. * The VFS grabs a read or write lock via i_rwsem before it reads
  759. * or writes to a directory. If we've gotten this far we've
  760. * already obtained IOLOCK_EXCL, which (since 4.10) is the same as
  761. * getting a write lock on i_rwsem. Therefore, it is safe for us
  762. * to drop the ILOCK here in order to reuse the _readdir and
  763. * _dir_lookup routines, which do their own ILOCK locking.
  764. */
  765. oldpos = 0;
  766. sc->ilock_flags &= ~XFS_ILOCK_EXCL;
  767. xfs_iunlock(sc->ip, XFS_ILOCK_EXCL);
  768. while (true) {
  769. error = xfs_readdir(sc->tp, sc->ip, &sdc.dir_iter, bufsize);
  770. if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
  771. &error))
  772. goto out;
  773. if (oldpos == sdc.dir_iter.pos)
  774. break;
  775. oldpos = sdc.dir_iter.pos;
  776. }
  777. out:
  778. return error;
  779. }