xfs: shorten xfs_scrub_ prefix

Shorten all the metadata checking xfs_scrub_ prefixes to xchk_.  After
this, the only xfs_scrub* symbols are the ones that pertain to both
scrub and repair.  Whitespace damage will be fixed in a subsequent
patch.  There are no functional changes.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
This commit is contained in:
Darrick J. Wong
2018-07-19 12:29:11 -07:00
parent ef97ef26d2
commit c517b3aa02
24 changed files with 1196 additions and 1190 deletions

View File

@@ -28,11 +28,11 @@
* Set us up to scrub reference count btrees.
*/
int
xfs_scrub_setup_ag_refcountbt(
xchk_setup_ag_refcountbt(
struct xfs_scrub_context *sc,
struct xfs_inode *ip)
{
return xfs_scrub_setup_ag_btree(sc, ip, false);
return xchk_setup_ag_btree(sc, ip, false);
}
/* Reference count btree scrubber. */
@@ -73,12 +73,12 @@ xfs_scrub_setup_ag_refcountbt(
* If the refcount is correct, all the check conditions in the algorithm
* should always hold true. If not, the refcount is incorrect.
*/
struct xfs_scrub_refcnt_frag {
struct xchk_refcnt_frag {
struct list_head list;
struct xfs_rmap_irec rm;
};
struct xfs_scrub_refcnt_check {
struct xchk_refcnt_check {
struct xfs_scrub_context *sc;
struct list_head fragments;
@@ -99,18 +99,18 @@ struct xfs_scrub_refcnt_check {
* fragments as the refcountbt says we should have.
*/
STATIC int
xfs_scrub_refcountbt_rmap_check(
xchk_refcountbt_rmap_check(
struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec,
void *priv)
{
struct xfs_scrub_refcnt_check *refchk = priv;
struct xfs_scrub_refcnt_frag *frag;
struct xchk_refcnt_check *refchk = priv;
struct xchk_refcnt_frag *frag;
xfs_agblock_t rm_last;
xfs_agblock_t rc_last;
int error = 0;
if (xfs_scrub_should_terminate(refchk->sc, &error))
if (xchk_should_terminate(refchk->sc, &error))
return error;
rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
@@ -118,7 +118,7 @@ xfs_scrub_refcountbt_rmap_check(
/* Confirm that a single-owner refc extent is a CoW stage. */
if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
xfs_scrub_btree_xref_set_corrupt(refchk->sc, cur, 0);
xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
return 0;
}
@@ -135,7 +135,7 @@ xfs_scrub_refcountbt_rmap_check(
* is healthy each rmap_irec we see will be in agbno order
* so we don't need insertion sort here.
*/
frag = kmem_alloc(sizeof(struct xfs_scrub_refcnt_frag),
frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
KM_MAYFAIL);
if (!frag)
return -ENOMEM;
@@ -154,12 +154,12 @@ xfs_scrub_refcountbt_rmap_check(
* we have a refcountbt error.
*/
STATIC void
xfs_scrub_refcountbt_process_rmap_fragments(
struct xfs_scrub_refcnt_check *refchk)
xchk_refcountbt_process_rmap_fragments(
struct xchk_refcnt_check *refchk)
{
struct list_head worklist;
struct xfs_scrub_refcnt_frag *frag;
struct xfs_scrub_refcnt_frag *n;
struct xchk_refcnt_frag *frag;
struct xchk_refcnt_frag *n;
xfs_agblock_t bno;
xfs_agblock_t rbno;
xfs_agblock_t next_rbno;
@@ -277,13 +277,13 @@ done:
/* Use the rmap entries covering this extent to verify the refcount. */
STATIC void
xfs_scrub_refcountbt_xref_rmap(
xchk_refcountbt_xref_rmap(
struct xfs_scrub_context *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
xfs_nlink_t refcount)
{
struct xfs_scrub_refcnt_check refchk = {
struct xchk_refcnt_check refchk = {
.sc = sc,
.bno = bno,
.len = len,
@@ -292,11 +292,11 @@ xfs_scrub_refcountbt_xref_rmap(
};
struct xfs_rmap_irec low;
struct xfs_rmap_irec high;
struct xfs_scrub_refcnt_frag *frag;
struct xfs_scrub_refcnt_frag *n;
struct xchk_refcnt_frag *frag;
struct xchk_refcnt_frag *n;
int error;
if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
/* Cross-reference with the rmapbt to confirm the refcount. */
@@ -307,13 +307,13 @@ xfs_scrub_refcountbt_xref_rmap(
INIT_LIST_HEAD(&refchk.fragments);
error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
&xfs_scrub_refcountbt_rmap_check, &refchk);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
&xchk_refcountbt_rmap_check, &refchk);
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
goto out_free;
xfs_scrub_refcountbt_process_rmap_fragments(&refchk);
xchk_refcountbt_process_rmap_fragments(&refchk);
if (refcount != refchk.seen)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
out_free:
list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
@@ -324,7 +324,7 @@ out_free:
/* Cross-reference with the other btrees. */
STATIC void
xfs_scrub_refcountbt_xref(
xchk_refcountbt_xref(
struct xfs_scrub_context *sc,
xfs_agblock_t agbno,
xfs_extlen_t len,
@@ -333,15 +333,15 @@ xfs_scrub_refcountbt_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
xfs_scrub_xref_is_used_space(sc, agbno, len);
xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
xfs_scrub_refcountbt_xref_rmap(sc, agbno, len, refcount);
xchk_xref_is_used_space(sc, agbno, len);
xchk_xref_is_not_inode_chunk(sc, agbno, len);
xchk_refcountbt_xref_rmap(sc, agbno, len, refcount);
}
/* Scrub a refcountbt record. */
STATIC int
xfs_scrub_refcountbt_rec(
struct xfs_scrub_btree *bs,
xchk_refcountbt_rec(
struct xchk_btree *bs,
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
@@ -360,7 +360,7 @@ xfs_scrub_refcountbt_rec(
/* Only CoW records can have refcount == 1. */
has_cowflag = (bno & XFS_REFC_COW_START);
if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (has_cowflag)
(*cow_blocks) += len;
@@ -369,19 +369,19 @@ xfs_scrub_refcountbt_rec(
if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (refcount == 0)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
xfs_scrub_refcountbt_xref(bs->sc, bno, len, refcount);
xchk_refcountbt_xref(bs->sc, bno, len, refcount);
return error;
}
/* Make sure we have as many refc blocks as the rmap says. */
STATIC void
xfs_scrub_refcount_xref_rmap(
xchk_refcount_xref_rmap(
struct xfs_scrub_context *sc,
struct xfs_owner_info *oinfo,
xfs_filblks_t cow_blocks)
@@ -390,33 +390,33 @@ xfs_scrub_refcount_xref_rmap(
xfs_filblks_t blocks;
int error;
if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
/* Check that we saw as many refcbt blocks as the rmap knows about. */
error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
if (!xfs_scrub_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
return;
error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
&blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != refcbt_blocks)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
/* Check that we saw as many cow blocks as the rmap knows about. */
xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
&blocks);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != cow_blocks)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* Scrub the refcount btree for some AG. */
int
xfs_scrub_refcountbt(
xchk_refcountbt(
struct xfs_scrub_context *sc)
{
struct xfs_owner_info oinfo;
@@ -424,19 +424,19 @@ xfs_scrub_refcountbt(
int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
error = xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec,
error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
&oinfo, &cow_blocks);
if (error)
return error;
xfs_scrub_refcount_xref_rmap(sc, &oinfo, cow_blocks);
xchk_refcount_xref_rmap(sc, &oinfo, cow_blocks);
return 0;
}
/* xref check that a cow staging extent is marked in the refcountbt. */
void
xfs_scrub_xref_is_cow_staging(
xchk_xref_is_cow_staging(
struct xfs_scrub_context *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
@@ -446,35 +446,35 @@ xfs_scrub_xref_is_cow_staging(
int has_refcount;
int error;
if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
/* Find the CoW staging extent. */
error = xfs_refcount_lookup_le(sc->sa.refc_cur,
agbno + XFS_REFC_COW_START, &has_refcount);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (!has_refcount) {
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
return;
}
error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (!has_refcount) {
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
return;
}
/* CoW flag must be set, refcount must be 1. */
has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
if (!has_cowflag || rc.rc_refcount != 1)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
/* Must be at least as long as what was passed in */
if (rc.rc_blockcount < len)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
/*
@@ -482,7 +482,7 @@ xfs_scrub_xref_is_cow_staging(
* can have multiple owners.
*/
void
xfs_scrub_xref_is_not_shared(
xchk_xref_is_not_shared(
struct xfs_scrub_context *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
@@ -490,12 +490,12 @@ xfs_scrub_xref_is_not_shared(
bool shared;
int error;
if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (shared)
xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}