Merge tag 'for-5.8-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs fixes from David Sterba: "A number of fixes, located in two areas, one performance fix and one fixup for better integration with another patchset. - bug fixes in nowait aio: - fix snapshot creation hang after nowait-aio was used - fix failure to write to prealloc extent past EOF - don't block when extent range is locked - block group fixes: - relocation failure when scrub runs in parallel - refcount fix when removing fails - fix race between removal and creation - space accounting fixes - reinstante fast path check for log tree at unlink time, fixes performance drop up to 30% in REAIM - kzfree/kfree fixup to ease treewide patchset renaming kzfree" * tag 'for-5.8-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: use kfree() in btrfs_ioctl_get_subvol_info() btrfs: fix RWF_NOWAIT writes blocking on extent locks and waiting for IO btrfs: fix RWF_NOWAIT write not failling when we need to cow btrfs: fix failure of RWF_NOWAIT write into prealloc extent beyond eof btrfs: fix hang on snapshot creation after RWF_NOWAIT write btrfs: check if a log root exists before locking the log_mutex on unlink btrfs: fix bytes_may_use underflow when running balance and scrub in parallel btrfs: fix data block group relocation failure due to concurrent scrub btrfs: fix race between block group removal and block group creation btrfs: fix a block group ref counter leak after failure to remove block group
This commit is contained in:
@@ -985,6 +985,7 @@ static noinline int cow_file_range(struct inode *inode,
|
||||
u64 num_bytes;
|
||||
unsigned long ram_size;
|
||||
u64 cur_alloc_size = 0;
|
||||
u64 min_alloc_size;
|
||||
u64 blocksize = fs_info->sectorsize;
|
||||
struct btrfs_key ins;
|
||||
struct extent_map *em;
|
||||
@@ -1035,10 +1036,26 @@ static noinline int cow_file_range(struct inode *inode,
|
||||
btrfs_drop_extent_cache(BTRFS_I(inode), start,
|
||||
start + num_bytes - 1, 0);
|
||||
|
||||
/*
|
||||
* Relocation relies on the relocated extents to have exactly the same
|
||||
* size as the original extents. Normally writeback for relocation data
|
||||
* extents follows a NOCOW path because relocation preallocates the
|
||||
* extents. However, due to an operation such as scrub turning a block
|
||||
* group to RO mode, it may fallback to COW mode, so we must make sure
|
||||
* an extent allocated during COW has exactly the requested size and can
|
||||
* not be split into smaller extents, otherwise relocation breaks and
|
||||
* fails during the stage where it updates the bytenr of file extent
|
||||
* items.
|
||||
*/
|
||||
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
|
||||
min_alloc_size = num_bytes;
|
||||
else
|
||||
min_alloc_size = fs_info->sectorsize;
|
||||
|
||||
while (num_bytes > 0) {
|
||||
cur_alloc_size = num_bytes;
|
||||
ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
|
||||
fs_info->sectorsize, 0, alloc_hint,
|
||||
min_alloc_size, 0, alloc_hint,
|
||||
&ins, 1, 1);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
@@ -1361,6 +1378,8 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
|
||||
int *page_started, unsigned long *nr_written)
|
||||
{
|
||||
const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
|
||||
const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid ==
|
||||
BTRFS_DATA_RELOC_TREE_OBJECTID);
|
||||
const u64 range_bytes = end + 1 - start;
|
||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||
u64 range_start = start;
|
||||
@@ -1391,18 +1410,23 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
|
||||
* data space info, which we incremented in the step above.
|
||||
*
|
||||
* If we need to fallback to cow and the inode corresponds to a free
|
||||
* space cache inode, we must also increment bytes_may_use of the data
|
||||
* space_info for the same reason. Space caches always get a prealloc
|
||||
* space cache inode or an inode of the data relocation tree, we must
|
||||
* also increment bytes_may_use of the data space_info for the same
|
||||
* reason. Space caches and relocated data extents always get a prealloc
|
||||
* extent for them, however scrub or balance may have set the block
|
||||
* group that contains that extent to RO mode.
|
||||
* group that contains that extent to RO mode and therefore force COW
|
||||
* when starting writeback.
|
||||
*/
|
||||
count = count_range_bits(io_tree, &range_start, end, range_bytes,
|
||||
EXTENT_NORESERVE, 0);
|
||||
if (count > 0 || is_space_ino) {
|
||||
const u64 bytes = is_space_ino ? range_bytes : count;
|
||||
if (count > 0 || is_space_ino || is_reloc_ino) {
|
||||
u64 bytes = count;
|
||||
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
|
||||
struct btrfs_space_info *sinfo = fs_info->data_sinfo;
|
||||
|
||||
if (is_space_ino || is_reloc_ino)
|
||||
bytes = range_bytes;
|
||||
|
||||
spin_lock(&sinfo->lock);
|
||||
btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
|
||||
spin_unlock(&sinfo->lock);
|
||||
@@ -7865,9 +7889,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
dio_data.overwrite = 1;
|
||||
inode_unlock(inode);
|
||||
relock = true;
|
||||
} else if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
|
||||
offset, count);
|
||||
|
Reference in New Issue
Block a user