ialloc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <[email protected]>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_btree.h"
  13. #include "xfs_log_format.h"
  14. #include "xfs_trans.h"
  15. #include "xfs_inode.h"
  16. #include "xfs_ialloc.h"
  17. #include "xfs_ialloc_btree.h"
  18. #include "xfs_icache.h"
  19. #include "xfs_rmap.h"
  20. #include "scrub/scrub.h"
  21. #include "scrub/common.h"
  22. #include "scrub/btree.h"
  23. #include "scrub/trace.h"
  24. #include "xfs_ag.h"
  25. /*
  26. * Set us up to scrub inode btrees.
  27. * If we detect a discrepancy between the inobt and the inode,
  28. * try again after forcing logged inode cores out to disk.
  29. */
  30. int
  31. xchk_setup_ag_iallocbt(
  32. struct xfs_scrub *sc)
  33. {
  34. return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER);
  35. }
  36. /* Inode btree scrubber. */
  37. struct xchk_iallocbt {
  38. /* Number of inodes we see while scanning inobt. */
  39. unsigned long long inodes;
  40. /* Expected next startino, for big block filesystems. */
  41. xfs_agino_t next_startino;
  42. /* Expected end of the current inode cluster. */
  43. xfs_agino_t next_cluster_ino;
  44. };
  45. /*
  46. * If we're checking the finobt, cross-reference with the inobt.
  47. * Otherwise we're checking the inobt; if there is an finobt, make sure
  48. * we have a record or not depending on freecount.
  49. */
  50. static inline void
  51. xchk_iallocbt_chunk_xref_other(
  52. struct xfs_scrub *sc,
  53. struct xfs_inobt_rec_incore *irec,
  54. xfs_agino_t agino)
  55. {
  56. struct xfs_btree_cur **pcur;
  57. bool has_irec;
  58. int error;
  59. if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
  60. pcur = &sc->sa.ino_cur;
  61. else
  62. pcur = &sc->sa.fino_cur;
  63. if (!(*pcur))
  64. return;
  65. error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
  66. if (!xchk_should_check_xref(sc, &error, pcur))
  67. return;
  68. if (((irec->ir_freecount > 0 && !has_irec) ||
  69. (irec->ir_freecount == 0 && has_irec)))
  70. xchk_btree_xref_set_corrupt(sc, *pcur, 0);
  71. }
  72. /* Cross-reference with the other btrees. */
  73. STATIC void
  74. xchk_iallocbt_chunk_xref(
  75. struct xfs_scrub *sc,
  76. struct xfs_inobt_rec_incore *irec,
  77. xfs_agino_t agino,
  78. xfs_agblock_t agbno,
  79. xfs_extlen_t len)
  80. {
  81. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  82. return;
  83. xchk_xref_is_used_space(sc, agbno, len);
  84. xchk_iallocbt_chunk_xref_other(sc, irec, agino);
  85. xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
  86. xchk_xref_is_not_shared(sc, agbno, len);
  87. }
  88. /* Is this chunk worth checking? */
  89. STATIC bool
  90. xchk_iallocbt_chunk(
  91. struct xchk_btree *bs,
  92. struct xfs_inobt_rec_incore *irec,
  93. xfs_agino_t agino,
  94. xfs_extlen_t len)
  95. {
  96. struct xfs_mount *mp = bs->cur->bc_mp;
  97. struct xfs_perag *pag = bs->cur->bc_ag.pag;
  98. xfs_agblock_t bno;
  99. bno = XFS_AGINO_TO_AGBNO(mp, agino);
  100. if (!xfs_verify_agbext(pag, bno, len))
  101. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  102. xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
  103. return true;
  104. }
  105. /* Count the number of free inodes. */
  106. static unsigned int
  107. xchk_iallocbt_freecount(
  108. xfs_inofree_t freemask)
  109. {
  110. BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
  111. return hweight64(freemask);
  112. }
  113. /*
  114. * Check that an inode's allocation status matches ir_free in the inobt
  115. * record. First we try querying the in-core inode state, and if the inode
  116. * isn't loaded we examine the on-disk inode directly.
  117. *
  118. * Since there can be 1:M and M:1 mappings between inobt records and inode
  119. * clusters, we pass in the inode location information as an inobt record;
  120. * the index of an inode cluster within the inobt record (as well as the
  121. * cluster buffer itself); and the index of the inode within the cluster.
  122. *
  123. * @irec is the inobt record.
  124. * @irec_ino is the inode offset from the start of the record.
  125. * @dip is the on-disk inode.
  126. */
  127. STATIC int
  128. xchk_iallocbt_check_cluster_ifree(
  129. struct xchk_btree *bs,
  130. struct xfs_inobt_rec_incore *irec,
  131. unsigned int irec_ino,
  132. struct xfs_dinode *dip)
  133. {
  134. struct xfs_mount *mp = bs->cur->bc_mp;
  135. xfs_ino_t fsino;
  136. xfs_agino_t agino;
  137. bool irec_free;
  138. bool ino_inuse;
  139. bool freemask_ok;
  140. int error = 0;
  141. if (xchk_should_terminate(bs->sc, &error))
  142. return error;
  143. /*
  144. * Given an inobt record and the offset of an inode from the start of
  145. * the record, compute which fs inode we're talking about.
  146. */
  147. agino = irec->ir_startino + irec_ino;
  148. fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino);
  149. irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
  150. if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
  151. (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
  152. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  153. goto out;
  154. }
  155. error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino,
  156. &ino_inuse);
  157. if (error == -ENODATA) {
  158. /* Not cached, just read the disk buffer */
  159. freemask_ok = irec_free ^ !!(dip->di_mode);
  160. if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok)
  161. return -EDEADLOCK;
  162. } else if (error < 0) {
  163. /*
  164. * Inode is only half assembled, or there was an IO error,
  165. * or the verifier failed, so don't bother trying to check.
  166. * The inode scrubber can deal with this.
  167. */
  168. goto out;
  169. } else {
  170. /* Inode is all there. */
  171. freemask_ok = irec_free ^ ino_inuse;
  172. }
  173. if (!freemask_ok)
  174. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  175. out:
  176. return 0;
  177. }
  178. /*
  179. * Check that the holemask and freemask of a hypothetical inode cluster match
  180. * what's actually on disk. If sparse inodes are enabled, the cluster does
  181. * not actually have to map to inodes if the corresponding holemask bit is set.
  182. *
  183. * @cluster_base is the first inode in the cluster within the @irec.
  184. */
  185. STATIC int
  186. xchk_iallocbt_check_cluster(
  187. struct xchk_btree *bs,
  188. struct xfs_inobt_rec_incore *irec,
  189. unsigned int cluster_base)
  190. {
  191. struct xfs_imap imap;
  192. struct xfs_mount *mp = bs->cur->bc_mp;
  193. struct xfs_buf *cluster_bp;
  194. unsigned int nr_inodes;
  195. xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno;
  196. xfs_agblock_t agbno;
  197. unsigned int cluster_index;
  198. uint16_t cluster_mask = 0;
  199. uint16_t ir_holemask;
  200. int error = 0;
  201. nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
  202. M_IGEO(mp)->inodes_per_cluster);
  203. /* Map this inode cluster */
  204. agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
  205. /* Compute a bitmask for this cluster that can be used for holemask. */
  206. for (cluster_index = 0;
  207. cluster_index < nr_inodes;
  208. cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
  209. cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
  210. XFS_INODES_PER_HOLEMASK_BIT);
  211. /*
  212. * Map the first inode of this cluster to a buffer and offset.
  213. * Be careful about inobt records that don't align with the start of
  214. * the inode buffer when block sizes are large enough to hold multiple
  215. * inode chunks. When this happens, cluster_base will be zero but
  216. * ir_startino can be large enough to make im_boffset nonzero.
  217. */
  218. ir_holemask = (irec->ir_holemask & cluster_mask);
  219. imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
  220. imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
  221. imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
  222. mp->m_sb.sb_inodelog;
  223. if (imap.im_boffset != 0 && cluster_base != 0) {
  224. ASSERT(imap.im_boffset == 0 || cluster_base == 0);
  225. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  226. return 0;
  227. }
  228. trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
  229. imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
  230. cluster_mask, ir_holemask,
  231. XFS_INO_TO_OFFSET(mp, irec->ir_startino +
  232. cluster_base));
  233. /* The whole cluster must be a hole or not a hole. */
  234. if (ir_holemask != cluster_mask && ir_holemask != 0) {
  235. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  236. return 0;
  237. }
  238. /* If any part of this is a hole, skip it. */
  239. if (ir_holemask) {
  240. xchk_xref_is_not_owned_by(bs->sc, agbno,
  241. M_IGEO(mp)->blocks_per_cluster,
  242. &XFS_RMAP_OINFO_INODES);
  243. return 0;
  244. }
  245. xchk_xref_is_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster,
  246. &XFS_RMAP_OINFO_INODES);
  247. /* Grab the inode cluster buffer. */
  248. error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp);
  249. if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
  250. return error;
  251. /* Check free status of each inode within this cluster. */
  252. for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
  253. struct xfs_dinode *dip;
  254. if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
  255. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  256. break;
  257. }
  258. dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
  259. error = xchk_iallocbt_check_cluster_ifree(bs, irec,
  260. cluster_base + cluster_index, dip);
  261. if (error)
  262. break;
  263. imap.im_boffset += mp->m_sb.sb_inodesize;
  264. }
  265. xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
  266. return error;
  267. }
  268. /*
  269. * For all the inode clusters that could map to this inobt record, make sure
  270. * that the holemask makes sense and that the allocation status of each inode
  271. * matches the freemask.
  272. */
  273. STATIC int
  274. xchk_iallocbt_check_clusters(
  275. struct xchk_btree *bs,
  276. struct xfs_inobt_rec_incore *irec)
  277. {
  278. unsigned int cluster_base;
  279. int error = 0;
  280. /*
  281. * For the common case where this inobt record maps to multiple inode
  282. * clusters this will call _check_cluster for each cluster.
  283. *
  284. * For the case that multiple inobt records map to a single cluster,
  285. * this will call _check_cluster once.
  286. */
  287. for (cluster_base = 0;
  288. cluster_base < XFS_INODES_PER_CHUNK;
  289. cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) {
  290. error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
  291. if (error)
  292. break;
  293. }
  294. return error;
  295. }
  296. /*
  297. * Make sure this inode btree record is aligned properly. Because a fs block
  298. * contains multiple inodes, we check that the inobt record is aligned to the
  299. * correct inode, not just the correct block on disk. This results in a finer
  300. * grained corruption check.
  301. */
  302. STATIC void
  303. xchk_iallocbt_rec_alignment(
  304. struct xchk_btree *bs,
  305. struct xfs_inobt_rec_incore *irec)
  306. {
  307. struct xfs_mount *mp = bs->sc->mp;
  308. struct xchk_iallocbt *iabt = bs->private;
  309. struct xfs_ino_geometry *igeo = M_IGEO(mp);
  310. /*
  311. * finobt records have different positioning requirements than inobt
  312. * records: each finobt record must have a corresponding inobt record.
  313. * That is checked in the xref function, so for now we only catch the
  314. * obvious case where the record isn't at all aligned properly.
  315. *
  316. * Note that if a fs block contains more than a single chunk of inodes,
  317. * we will have finobt records only for those chunks containing free
  318. * inodes, and therefore expect chunk alignment of finobt records.
  319. * Otherwise, we expect that the finobt record is aligned to the
  320. * cluster alignment as told by the superblock.
  321. */
  322. if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
  323. unsigned int imask;
  324. imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
  325. igeo->cluster_align_inodes) - 1;
  326. if (irec->ir_startino & imask)
  327. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  328. return;
  329. }
  330. if (iabt->next_startino != NULLAGINO) {
  331. /*
  332. * We're midway through a cluster of inodes that is mapped by
  333. * multiple inobt records. Did we get the record for the next
  334. * irec in the sequence?
  335. */
  336. if (irec->ir_startino != iabt->next_startino) {
  337. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  338. return;
  339. }
  340. iabt->next_startino += XFS_INODES_PER_CHUNK;
  341. /* Are we done with the cluster? */
  342. if (iabt->next_startino >= iabt->next_cluster_ino) {
  343. iabt->next_startino = NULLAGINO;
  344. iabt->next_cluster_ino = NULLAGINO;
  345. }
  346. return;
  347. }
  348. /* inobt records must be aligned to cluster and inoalignmnt size. */
  349. if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) {
  350. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  351. return;
  352. }
  353. if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) {
  354. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  355. return;
  356. }
  357. if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK)
  358. return;
  359. /*
  360. * If this is the start of an inode cluster that can be mapped by
  361. * multiple inobt records, the next inobt record must follow exactly
  362. * after this one.
  363. */
  364. iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
  365. iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster;
  366. }
  367. /* Scrub an inobt/finobt record. */
  368. STATIC int
  369. xchk_iallocbt_rec(
  370. struct xchk_btree *bs,
  371. const union xfs_btree_rec *rec)
  372. {
  373. struct xfs_mount *mp = bs->cur->bc_mp;
  374. struct xfs_perag *pag = bs->cur->bc_ag.pag;
  375. struct xchk_iallocbt *iabt = bs->private;
  376. struct xfs_inobt_rec_incore irec;
  377. uint64_t holes;
  378. xfs_agino_t agino;
  379. xfs_extlen_t len;
  380. int holecount;
  381. int i;
  382. int error = 0;
  383. unsigned int real_freecount;
  384. uint16_t holemask;
  385. xfs_inobt_btrec_to_irec(mp, rec, &irec);
  386. if (irec.ir_count > XFS_INODES_PER_CHUNK ||
  387. irec.ir_freecount > XFS_INODES_PER_CHUNK)
  388. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  389. real_freecount = irec.ir_freecount +
  390. (XFS_INODES_PER_CHUNK - irec.ir_count);
  391. if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
  392. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  393. agino = irec.ir_startino;
  394. /* Record has to be properly aligned within the AG. */
  395. if (!xfs_verify_agino(pag, agino) ||
  396. !xfs_verify_agino(pag, agino + XFS_INODES_PER_CHUNK - 1)) {
  397. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  398. goto out;
  399. }
  400. xchk_iallocbt_rec_alignment(bs, &irec);
  401. if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  402. goto out;
  403. iabt->inodes += irec.ir_count;
  404. /* Handle non-sparse inodes */
  405. if (!xfs_inobt_issparse(irec.ir_holemask)) {
  406. len = XFS_B_TO_FSB(mp,
  407. XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
  408. if (irec.ir_count != XFS_INODES_PER_CHUNK)
  409. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  410. if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
  411. goto out;
  412. goto check_clusters;
  413. }
  414. /* Check each chunk of a sparse inode cluster. */
  415. holemask = irec.ir_holemask;
  416. holecount = 0;
  417. len = XFS_B_TO_FSB(mp,
  418. XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
  419. holes = ~xfs_inobt_irec_to_allocmask(&irec);
  420. if ((holes & irec.ir_free) != holes ||
  421. irec.ir_freecount > irec.ir_count)
  422. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  423. for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
  424. if (holemask & 1)
  425. holecount += XFS_INODES_PER_HOLEMASK_BIT;
  426. else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
  427. break;
  428. holemask >>= 1;
  429. agino += XFS_INODES_PER_HOLEMASK_BIT;
  430. }
  431. if (holecount > XFS_INODES_PER_CHUNK ||
  432. holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
  433. xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
  434. check_clusters:
  435. error = xchk_iallocbt_check_clusters(bs, &irec);
  436. if (error)
  437. goto out;
  438. out:
  439. return error;
  440. }
  441. /*
  442. * Make sure the inode btrees are as large as the rmap thinks they are.
  443. * Don't bother if we're missing btree cursors, as we're already corrupt.
  444. */
  445. STATIC void
  446. xchk_iallocbt_xref_rmap_btreeblks(
  447. struct xfs_scrub *sc,
  448. int which)
  449. {
  450. xfs_filblks_t blocks;
  451. xfs_extlen_t inobt_blocks = 0;
  452. xfs_extlen_t finobt_blocks = 0;
  453. int error;
  454. if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
  455. (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) ||
  456. xchk_skip_xref(sc->sm))
  457. return;
  458. /* Check that we saw as many inobt blocks as the rmap says. */
  459. error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
  460. if (!xchk_process_error(sc, 0, 0, &error))
  461. return;
  462. if (sc->sa.fino_cur) {
  463. error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
  464. if (!xchk_process_error(sc, 0, 0, &error))
  465. return;
  466. }
  467. error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
  468. &XFS_RMAP_OINFO_INOBT, &blocks);
  469. if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  470. return;
  471. if (blocks != inobt_blocks + finobt_blocks)
  472. xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
  473. }
  474. /*
  475. * Make sure that the inobt records point to the same number of blocks as
  476. * the rmap says are owned by inodes.
  477. */
  478. STATIC void
  479. xchk_iallocbt_xref_rmap_inodes(
  480. struct xfs_scrub *sc,
  481. int which,
  482. unsigned long long inodes)
  483. {
  484. xfs_filblks_t blocks;
  485. xfs_filblks_t inode_blocks;
  486. int error;
  487. if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
  488. return;
  489. /* Check that we saw as many inode blocks as the rmap knows about. */
  490. error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
  491. &XFS_RMAP_OINFO_INODES, &blocks);
  492. if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  493. return;
  494. inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
  495. if (blocks != inode_blocks)
  496. xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
  497. }
  498. /* Scrub the inode btrees for some AG. */
  499. STATIC int
  500. xchk_iallocbt(
  501. struct xfs_scrub *sc,
  502. xfs_btnum_t which)
  503. {
  504. struct xfs_btree_cur *cur;
  505. struct xchk_iallocbt iabt = {
  506. .inodes = 0,
  507. .next_startino = NULLAGINO,
  508. .next_cluster_ino = NULLAGINO,
  509. };
  510. int error;
  511. cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
  512. error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
  513. &iabt);
  514. if (error)
  515. return error;
  516. xchk_iallocbt_xref_rmap_btreeblks(sc, which);
  517. /*
  518. * If we're scrubbing the inode btree, inode_blocks is the number of
  519. * blocks pointed to by all the inode chunk records. Therefore, we
  520. * should compare to the number of inode chunk blocks that the rmap
  521. * knows about. We can't do this for the finobt since it only points
  522. * to inode chunks with free inodes.
  523. */
  524. if (which == XFS_BTNUM_INO)
  525. xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
  526. return error;
  527. }
  528. int
  529. xchk_inobt(
  530. struct xfs_scrub *sc)
  531. {
  532. return xchk_iallocbt(sc, XFS_BTNUM_INO);
  533. }
  534. int
  535. xchk_finobt(
  536. struct xfs_scrub *sc)
  537. {
  538. return xchk_iallocbt(sc, XFS_BTNUM_FINO);
  539. }
  540. /* See if an inode btree has (or doesn't have) an inode chunk record. */
  541. static inline void
  542. xchk_xref_inode_check(
  543. struct xfs_scrub *sc,
  544. xfs_agblock_t agbno,
  545. xfs_extlen_t len,
  546. struct xfs_btree_cur **icur,
  547. bool should_have_inodes)
  548. {
  549. bool has_inodes;
  550. int error;
  551. if (!(*icur) || xchk_skip_xref(sc->sm))
  552. return;
  553. error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
  554. if (!xchk_should_check_xref(sc, &error, icur))
  555. return;
  556. if (has_inodes != should_have_inodes)
  557. xchk_btree_xref_set_corrupt(sc, *icur, 0);
  558. }
  559. /* xref check that the extent is not covered by inodes */
  560. void
  561. xchk_xref_is_not_inode_chunk(
  562. struct xfs_scrub *sc,
  563. xfs_agblock_t agbno,
  564. xfs_extlen_t len)
  565. {
  566. xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
  567. xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
  568. }
  569. /* xref check that the extent is covered by inodes */
  570. void
  571. xchk_xref_is_inode_chunk(
  572. struct xfs_scrub *sc,
  573. xfs_agblock_t agbno,
  574. xfs_extlen_t len)
  575. {
  576. xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
  577. }