Merge branch 'fallocate-insert-range' into for-next

This commit is contained in:
Dave Chinner
2015-03-25 15:12:53 +11:00
9 changed files with 497 additions and 92 deletions

View File

@@ -1376,22 +1376,19 @@ out:
}
/*
* xfs_collapse_file_space()
* This routine frees disk space and shift extent for the given file.
* The first thing we do is to free data blocks in the specified range
* by calling xfs_free_file_space(). It would also sync dirty data
* and invalidate page cache over the region on which collapse range
* is working. And Shift extent records to the left to cover a hole.
* RETURNS:
* 0 on success
* errno on error
*
* @next_fsb will keep track of the extent currently undergoing shift.
* @stop_fsb will keep track of the extent at which we have to stop.
* If we are shifting left, we will start with block (offset + len) and
* shift each extent till last extent.
* If we are shifting right, we will start with last extent inside file space
* and continue until we reach the block corresponding to offset.
*/
int
xfs_collapse_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len)
xfs_shift_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len,
enum shift_direction direction)
{
int done = 0;
struct xfs_mount *mp = ip->i_mount;
@@ -1400,21 +1397,26 @@ xfs_collapse_file_space(
struct xfs_bmap_free free_list;
xfs_fsblock_t first_block;
int committed;
xfs_fileoff_t start_fsb;
xfs_fileoff_t stop_fsb;
xfs_fileoff_t next_fsb;
xfs_fileoff_t shift_fsb;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
trace_xfs_collapse_file_space(ip);
if (direction == SHIFT_LEFT) {
next_fsb = XFS_B_TO_FSB(mp, offset + len);
stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
} else {
/*
* If right shift, delegate the work of initialization of
* next_fsb to xfs_bmap_shift_extent as it has ilock held.
*/
next_fsb = NULLFSBLOCK;
stop_fsb = XFS_B_TO_FSB(mp, offset);
}
next_fsb = XFS_B_TO_FSB(mp, offset + len);
shift_fsb = XFS_B_TO_FSB(mp, len);
error = xfs_free_file_space(ip, offset, len);
if (error)
return error;
/*
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
* into the accessible region of the file.
@@ -1427,20 +1429,28 @@ xfs_collapse_file_space(
/*
* Writeback and invalidate cache for the remainder of the file as we're
* about to shift down every extent from the collapse range to EOF. The
* free of the collapse range above might have already done some of
* this, but we shouldn't rely on it to do anything outside of the range
* that was freed.
* about to shift down every extent from offset to EOF.
*/
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
offset + len, -1);
offset, -1);
if (error)
return error;
error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
(offset + len) >> PAGE_CACHE_SHIFT, -1);
offset >> PAGE_CACHE_SHIFT, -1);
if (error)
return error;
/*
* The extent shiting code works on extent granularity. So, if
* stop_fsb is not the starting block of extent, we need to split
* the extent at stop_fsb.
*/
if (direction == SHIFT_RIGHT) {
error = xfs_bmap_split_extent(ip, stop_fsb);
if (error)
return error;
}
while (!error && !done) {
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
/*
@@ -1464,7 +1474,7 @@ xfs_collapse_file_space(
if (error)
goto out;
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_bmap_init(&free_list, &first_block);
@@ -1472,10 +1482,9 @@ xfs_collapse_file_space(
* We are using the write transaction in which max 2 bmbt
* updates are allowed
*/
start_fsb = next_fsb;
error = xfs_bmap_shift_extents(tp, ip, start_fsb, shift_fsb,
&done, &next_fsb, &first_block, &free_list,
XFS_BMAP_MAX_SHIFT_EXTENTS);
error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
&done, stop_fsb, &first_block, &free_list,
direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
if (error)
goto out;
@@ -1484,17 +1493,69 @@ xfs_collapse_file_space(
goto out;
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
return error;
out:
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
* xfs_collapse_file_space()
* This routine frees disk space and shift extent for the given file.
* The first thing we do is to free data blocks in the specified range
* by calling xfs_free_file_space(). It would also sync dirty data
* and invalidate page cache over the region on which collapse range
* is working. And Shift extent records to the left to cover a hole.
* RETURNS:
* 0 on success
* errno on error
*
*/
int
xfs_collapse_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len)
{
int error;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
trace_xfs_collapse_file_space(ip);
error = xfs_free_file_space(ip, offset, len);
if (error)
return error;
return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
}
/*
* xfs_insert_file_space()
* This routine create hole space by shifting extents for the given file.
* The first thing we do is to sync dirty data and invalidate page cache
* over the region on which insert range is working. And split an extent
* to two extents at given offset by calling xfs_bmap_split_extent.
* And shift all extent records which are laying between [offset,
* last allocated extent] to the right to reserve hole range.
* RETURNS:
* 0 on success
* errno on error
*/
int
xfs_insert_file_space(
struct xfs_inode *ip,
loff_t offset,
loff_t len)
{
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
trace_xfs_insert_file_space(ip);
return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
}
/*
* We need to check that the format of the data fork in the temporary inode is
* valid for the target inode before doing the swap. This is not a problem with