xfs: separate inode geometry

Separate the inode geometry information into a distinct structure.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
This commit is contained in:
Darrick J. Wong
2019-06-05 11:19:34 -07:00
parent fe0da9c09b
commit ef32595999
18 changed files with 208 additions and 161 deletions

View File

@@ -1071,7 +1071,7 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
#define XFS_INO_MASK(k) (uint32_t)((1ULL << (k)) - 1)
#define XFS_INO_OFFSET_BITS(mp) (mp)->m_sb.sb_inopblog
#define XFS_INO_AGBNO_BITS(mp) (mp)->m_sb.sb_agblklog
#define XFS_INO_AGINO_BITS(mp) (mp)->m_agino_log
#define XFS_INO_AGINO_BITS(mp) ((mp)->m_ino_geo.agino_log)
#define XFS_INO_AGNO_BITS(mp) (mp)->m_agno_log
#define XFS_INO_BITS(mp) \
XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp)
@@ -1694,4 +1694,40 @@ struct xfs_acl {
#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
struct xfs_ino_geometry {
/* Maximum inode count in this filesystem. */
uint64_t maxicount;
/*
* Desired inode cluster buffer size, in bytes. This value is not
* rounded up to at least one filesystem block.
*/
unsigned int inode_cluster_size;
/* Inode cluster sizes, adjusted to be at least 1 fsb. */
unsigned int inodes_per_cluster;
unsigned int blocks_per_cluster;
/* Inode cluster alignment. */
unsigned int cluster_align;
unsigned int cluster_align_inodes;
unsigned int inoalign_mask; /* mask sb_inoalignmt if used */
unsigned int inobt_mxr[2]; /* max inobt btree records */
unsigned int inobt_mnr[2]; /* min inobt btree records */
unsigned int inobt_maxlevels; /* max inobt btree levels. */
/* Size of inode allocations under normal operation. */
unsigned int ialloc_inos;
unsigned int ialloc_blks;
/* Minimum inode blocks for a sparse allocation. */
unsigned int ialloc_min_blks;
/* stripe unit inode alignment */
unsigned int ialloc_align;
unsigned int agino_log; /* #bits for agino in inum */
};
#endif /* __XFS_FORMAT_H__ */

View File

@@ -299,7 +299,7 @@ xfs_ialloc_inode_init(
* sizes, manipulate the inodes in buffers which are multiples of the
* blocks size.
*/
nbufs = length / mp->m_blocks_per_cluster;
nbufs = length / M_IGEO(mp)->blocks_per_cluster;
/*
* Figure out what version number to use in the inodes we create. If
@@ -343,9 +343,10 @@ xfs_ialloc_inode_init(
* Get the block.
*/
d = XFS_AGB_TO_DADDR(mp, agno, agbno +
(j * mp->m_blocks_per_cluster));
(j * M_IGEO(mp)->blocks_per_cluster));
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
mp->m_bsize * mp->m_blocks_per_cluster,
mp->m_bsize *
M_IGEO(mp)->blocks_per_cluster,
XBF_UNMAPPED);
if (!fbuf)
return -ENOMEM;
@@ -353,7 +354,7 @@ xfs_ialloc_inode_init(
/* Initialize the inode buffers and log them appropriately. */
fbuf->b_ops = &xfs_inode_buf_ops;
xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
for (i = 0; i < mp->m_inodes_per_cluster; i++) {
for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
int ioffset = i << mp->m_sb.sb_inodelog;
uint isize = xfs_dinode_size(version);
@@ -616,24 +617,26 @@ error:
* Allocate new inodes in the allocation group specified by agbp.
* Return 0 for success, else error code.
*/
STATIC int /* error code or 0 */
STATIC int
xfs_ialloc_ag_alloc(
xfs_trans_t *tp, /* transaction pointer */
xfs_buf_t *agbp, /* alloc group buffer */
int *alloc)
struct xfs_trans *tp,
struct xfs_buf *agbp,
int *alloc)
{
xfs_agi_t *agi; /* allocation group header */
xfs_alloc_arg_t args; /* allocation argument structure */
xfs_agnumber_t agno;
int error;
xfs_agino_t newino; /* new first inode's number */
xfs_agino_t newlen; /* new number of inodes */
int isaligned = 0; /* inode allocation at stripe unit */
/* boundary */
uint16_t allocmask = (uint16_t) -1; /* init. to full chunk */
struct xfs_agi *agi;
struct xfs_alloc_arg args;
xfs_agnumber_t agno;
int error;
xfs_agino_t newino; /* new first inode's number */
xfs_agino_t newlen; /* new number of inodes */
int isaligned = 0; /* inode allocation at stripe */
/* unit boundary */
/* init. to full chunk */
uint16_t allocmask = (uint16_t) -1;
struct xfs_inobt_rec_incore rec;
struct xfs_perag *pag;
int do_sparse = 0;
struct xfs_perag *pag;
struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
int do_sparse = 0;
memset(&args, 0, sizeof(args));
args.tp = tp;
@@ -644,7 +647,7 @@ xfs_ialloc_ag_alloc(
#ifdef DEBUG
/* randomly do sparse inode allocations */
if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks)
igeo->ialloc_min_blks < igeo->ialloc_blks)
do_sparse = prandom_u32() & 1;
#endif
@@ -652,12 +655,12 @@ xfs_ialloc_ag_alloc(
* Locking will ensure that we don't have two callers in here
* at one time.
*/
newlen = args.mp->m_ialloc_inos;
if (args.mp->m_maxicount &&
newlen = igeo->ialloc_inos;
if (igeo->maxicount &&
percpu_counter_read_positive(&args.mp->m_icount) + newlen >
args.mp->m_maxicount)
igeo->maxicount)
return -ENOSPC;
args.minlen = args.maxlen = args.mp->m_ialloc_blks;
args.minlen = args.maxlen = igeo->ialloc_blks;
/*
* First try to allocate inodes contiguous with the last-allocated
* chunk of inodes. If the filesystem is striped, this will fill
@@ -667,7 +670,7 @@ xfs_ialloc_ag_alloc(
newino = be32_to_cpu(agi->agi_newino);
agno = be32_to_cpu(agi->agi_seqno);
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
args.mp->m_ialloc_blks;
igeo->ialloc_blks;
if (do_sparse)
goto sparse_alloc;
if (likely(newino != NULLAGINO &&
@@ -690,10 +693,10 @@ xfs_ialloc_ag_alloc(
* but not to use them in the actual exact allocation.
*/
args.alignment = 1;
args.minalignslop = args.mp->m_cluster_align - 1;
args.minalignslop = igeo->cluster_align - 1;
/* Allow space for the inode btree to split. */
args.minleft = args.mp->m_in_maxlevels - 1;
args.minleft = igeo->inobt_maxlevels - 1;
if ((error = xfs_alloc_vextent(&args)))
return error;
@@ -720,12 +723,12 @@ xfs_ialloc_ag_alloc(
* pieces, so don't need alignment anyway.
*/
isaligned = 0;
if (args.mp->m_sinoalign) {
if (igeo->ialloc_align) {
ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
args.alignment = args.mp->m_dalign;
isaligned = 1;
} else
args.alignment = args.mp->m_cluster_align;
args.alignment = igeo->cluster_align;
/*
* Need to figure out where to allocate the inode blocks.
* Ideally they should be spaced out through the a.g.
@@ -741,7 +744,7 @@ xfs_ialloc_ag_alloc(
/*
* Allow space for the inode btree to split.
*/
args.minleft = args.mp->m_in_maxlevels - 1;
args.minleft = igeo->inobt_maxlevels - 1;
if ((error = xfs_alloc_vextent(&args)))
return error;
}
@@ -754,7 +757,7 @@ xfs_ialloc_ag_alloc(
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.agbno = be32_to_cpu(agi->agi_root);
args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
args.alignment = args.mp->m_cluster_align;
args.alignment = igeo->cluster_align;
if ((error = xfs_alloc_vextent(&args)))
return error;
}
@@ -764,7 +767,7 @@ xfs_ialloc_ag_alloc(
* the sparse allocation length is smaller than a full chunk.
*/
if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
args.mp->m_ialloc_min_blks < args.mp->m_ialloc_blks &&
igeo->ialloc_min_blks < igeo->ialloc_blks &&
args.fsbno == NULLFSBLOCK) {
sparse_alloc:
args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -773,7 +776,7 @@ sparse_alloc:
args.alignment = args.mp->m_sb.sb_spino_align;
args.prod = 1;
args.minlen = args.mp->m_ialloc_min_blks;
args.minlen = igeo->ialloc_min_blks;
args.maxlen = args.minlen;
/*
@@ -789,7 +792,7 @@ sparse_alloc:
args.min_agbno = args.mp->m_sb.sb_inoalignmt;
args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
args.mp->m_sb.sb_inoalignmt) -
args.mp->m_ialloc_blks;
igeo->ialloc_blks;
error = xfs_alloc_vextent(&args);
if (error)
@@ -1006,7 +1009,7 @@ xfs_ialloc_ag_select(
* space needed for alignment of inode chunks when checking the
* longest contiguous free space in the AG - this prevents us
* from getting ENOSPC because we have free space larger than
* m_ialloc_blks but alignment constraints prevent us from using
* ialloc_blks but alignment constraints prevent us from using
* it.
*
* If we can't find an AG with space for full alignment slack to
@@ -1015,9 +1018,9 @@ xfs_ialloc_ag_select(
* if we fail allocation due to alignment issues then it is most
* likely a real ENOSPC condition.
*/
ineed = mp->m_ialloc_min_blks;
ineed = M_IGEO(mp)->ialloc_min_blks;
if (flags && ineed > 1)
ineed += mp->m_cluster_align;
ineed += M_IGEO(mp)->cluster_align;
longest = pag->pagf_longest;
if (!longest)
longest = pag->pagf_flcount > 0;
@@ -1703,6 +1706,7 @@ xfs_dialloc(
int noroom = 0;
xfs_agnumber_t start_agno;
struct xfs_perag *pag;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
int okalloc = 1;
if (*IO_agbp) {
@@ -1733,9 +1737,9 @@ xfs_dialloc(
* Read rough value of mp->m_icount by percpu_counter_read_positive,
* which will sacrifice the preciseness but improve the performance.
*/
if (mp->m_maxicount &&
percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos
> mp->m_maxicount) {
if (igeo->maxicount &&
percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
> igeo->maxicount) {
noroom = 1;
okalloc = 0;
}
@@ -1852,7 +1856,8 @@ xfs_difree_inode_chunk(
if (!xfs_inobt_issparse(rec->ir_holemask)) {
/* not sparse, calculate extent info directly */
xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
mp->m_ialloc_blks, &XFS_RMAP_OINFO_INODES);
M_IGEO(mp)->ialloc_blks,
&XFS_RMAP_OINFO_INODES);
return;
}
@@ -2261,7 +2266,7 @@ xfs_imap_lookup(
/* check that the returned record contains the required inode */
if (rec.ir_startino > agino ||
rec.ir_startino + mp->m_ialloc_inos <= agino)
rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
return -EINVAL;
/* for untrusted inodes check it is allocated first */
@@ -2352,7 +2357,7 @@ xfs_imap(
* If the inode cluster size is the same as the blocksize or
* smaller we get to the buffer by simple arithmetics.
*/
if (mp->m_blocks_per_cluster == 1) {
if (M_IGEO(mp)->blocks_per_cluster == 1) {
offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock);
@@ -2368,8 +2373,8 @@ xfs_imap(
* find the location. Otherwise we have to do a btree
* lookup to find the location.
*/
if (mp->m_inoalign_mask) {
offset_agbno = agbno & mp->m_inoalign_mask;
if (M_IGEO(mp)->inoalign_mask) {
offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
chunk_agbno = agbno - offset_agbno;
} else {
error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
@@ -2381,13 +2386,13 @@ xfs_imap(
out_map:
ASSERT(agbno >= chunk_agbno);
cluster_agbno = chunk_agbno +
((offset_agbno / mp->m_blocks_per_cluster) *
mp->m_blocks_per_cluster);
((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
M_IGEO(mp)->blocks_per_cluster);
offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
XFS_INO_TO_OFFSET(mp, ino);
imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
imap->im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
/*
@@ -2409,7 +2414,7 @@ out_map:
}
/*
* Compute and fill in value of m_in_maxlevels.
* Compute and fill in value of m_ino_geo.inobt_maxlevels.
*/
void
xfs_ialloc_compute_maxlevels(
@@ -2418,8 +2423,8 @@ xfs_ialloc_compute_maxlevels(
uint inodes;
inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp->m_inobt_mnr,
inodes);
M_IGEO(mp)->inobt_maxlevels = xfs_btree_compute_maxlevels(
M_IGEO(mp)->inobt_mnr, inodes);
}
/*

View File

@@ -28,9 +28,9 @@ static inline int
xfs_icluster_size_fsb(
struct xfs_mount *mp)
{
if (mp->m_sb.sb_blocksize >= mp->m_inode_cluster_size)
if (mp->m_sb.sb_blocksize >= M_IGEO(mp)->inode_cluster_size)
return 1;
return mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
return M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_blocklog;
}
/*
@@ -96,7 +96,7 @@ xfs_imap(
uint flags); /* flags for inode btree lookup */
/*
* Compute and fill in value of m_in_maxlevels.
* Compute and fill in value of m_ino_geo.inobt_maxlevels.
*/
void
xfs_ialloc_compute_maxlevels(

View File

@@ -28,7 +28,7 @@ xfs_inobt_get_minrecs(
struct xfs_btree_cur *cur,
int level)
{
return cur->bc_mp->m_inobt_mnr[level != 0];
return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
}
STATIC struct xfs_btree_cur *
@@ -164,7 +164,7 @@ xfs_inobt_get_maxrecs(
struct xfs_btree_cur *cur,
int level)
{
return cur->bc_mp->m_inobt_mxr[level != 0];
return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
}
STATIC void
@@ -281,10 +281,11 @@ xfs_inobt_verify(
/* level verification */
level = be16_to_cpu(block->bb_level);
if (level >= mp->m_in_maxlevels)
if (level >= M_IGEO(mp)->inobt_maxlevels)
return __this_address;
return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
return xfs_btree_sblock_verify(bp,
M_IGEO(mp)->inobt_mxr[level != 0]);
}
static void
@@ -546,7 +547,7 @@ xfs_inobt_max_size(
xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno);
/* Bail out if we're uninitialized, which can happen in mkfs. */
if (mp->m_inobt_mxr[0] == 0)
if (M_IGEO(mp)->inobt_mxr[0] == 0)
return 0;
/*
@@ -558,7 +559,7 @@ xfs_inobt_max_size(
XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
agblocks -= mp->m_sb.sb_logblocks;
return xfs_btree_calc_size(mp->m_inobt_mnr,
return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
(uint64_t)agblocks * mp->m_sb.sb_inopblock /
XFS_INODES_PER_CHUNK);
}
@@ -619,5 +620,5 @@ xfs_iallocbt_calc_size(
struct xfs_mount *mp,
unsigned long long len)
{
return xfs_btree_calc_size(mp->m_inobt_mnr, len);
return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
}

View File

@@ -36,7 +36,7 @@ xfs_inobp_check(
int j;
xfs_dinode_t *dip;
j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
j = M_IGEO(mp)->inode_cluster_size >> mp->m_sb.sb_inodelog;
for (i = 0; i < j; i++) {
dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);

View File

@@ -804,16 +804,18 @@ const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
*/
void
xfs_sb_mount_common(
struct xfs_mount *mp,
struct xfs_sb *sbp)
struct xfs_mount *mp,
struct xfs_sb *sbp)
{
struct xfs_ino_geometry *igeo = M_IGEO(mp);
mp->m_agfrotor = mp->m_agirotor = 0;
mp->m_maxagi = mp->m_sb.sb_agcount;
mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
mp->m_blockmask = sbp->sb_blocksize - 1;
mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
mp->m_blockwmask = mp->m_blockwsize - 1;
@@ -823,10 +825,10 @@ xfs_sb_mount_common(
mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
@@ -844,14 +846,14 @@ xfs_sb_mount_common(
mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2;
mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
mp->m_ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
sbp->sb_inopblock);
mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
if (sbp->sb_spino_align)
mp->m_ialloc_min_blks = sbp->sb_spino_align;
igeo->ialloc_min_blks = sbp->sb_spino_align;
else
mp->m_ialloc_min_blks = mp->m_ialloc_blks;
igeo->ialloc_min_blks = igeo->ialloc_blks;
mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
mp->m_ag_max_usable = xfs_alloc_ag_max_usable(mp);
}

View File

@@ -136,9 +136,10 @@ STATIC uint
xfs_calc_inobt_res(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
return xfs_calc_buf_res(M_IGEO(mp)->inobt_maxlevels,
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
/*
@@ -167,7 +168,7 @@ xfs_calc_finobt_res(
* includes:
*
* the allocation btrees: 2 trees * (max depth - 1) * block size
* the inode chunk: m_ialloc_blks * N
* the inode chunk: m_ino_geo.ialloc_blks * N
*
* The size N of the inode chunk reservation depends on whether it is for
* allocation or free and which type of create transaction is in use. An inode
@@ -193,7 +194,7 @@ xfs_calc_inode_chunk_res(
size = XFS_FSB_TO_B(mp, 1);
}
res += xfs_calc_buf_res(mp->m_ialloc_blks, size);
res += xfs_calc_buf_res(M_IGEO(mp)->ialloc_blks, size);
return res;
}
@@ -307,7 +308,8 @@ xfs_calc_iunlink_remove_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
2 * max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
2 * max_t(uint, XFS_FSB_TO_B(mp, 1),
M_IGEO(mp)->inode_cluster_size);
}
/*
@@ -345,7 +347,8 @@ STATIC uint
xfs_calc_iunlink_add_reservation(xfs_mount_t *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
max_t(uint, XFS_FSB_TO_B(mp, 1),
M_IGEO(mp)->inode_cluster_size);
}
/*

View File

@@ -56,9 +56,9 @@
#define XFS_DIRREMOVE_SPACE_RES(mp) \
XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
#define XFS_IALLOC_SPACE_RES(mp) \
((mp)->m_ialloc_blks + \
(M_IGEO(mp)->ialloc_blks + \
(xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1 * \
((mp)->m_in_maxlevels - 1)))
(M_IGEO(mp)->inobt_maxlevels - 1)))
/*
* Space reservation values for various transactions.
@@ -94,7 +94,8 @@
#define XFS_SYMLINK_SPACE_RES(mp,nl,b) \
(XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl) + (b))
#define XFS_IFREE_SPACE_RES(mp) \
(xfs_sb_version_hasfinobt(&mp->m_sb) ? (mp)->m_in_maxlevels : 0)
(xfs_sb_version_hasfinobt(&mp->m_sb) ? \
M_IGEO(mp)->inobt_maxlevels : 0)
#endif /* __XFS_TRANS_SPACE_H__ */

View File

@@ -87,14 +87,14 @@ xfs_agino_range(
* Calculate the first inode, which will be in the first
* cluster-aligned block after the AGFL.
*/
bno = round_up(XFS_AGFL_BLOCK(mp) + 1, mp->m_cluster_align);
bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align);
*first = XFS_AGB_TO_AGINO(mp, bno);
/*
* Calculate the last inode, which will be at the end of the
* last (aligned) cluster that can be allocated in the AG.
*/
bno = round_down(eoag, mp->m_cluster_align);
bno = round_down(eoag, M_IGEO(mp)->cluster_align);
*last = XFS_AGB_TO_AGINO(mp, bno) - 1;
}