Btrfs: don't flush all delalloc inodes when we doesn't get s_umount lock
We needn't flush all delalloc inodes when we doesn't get s_umount lock, or we would make the tasks wait for a long time. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fb.com>
This commit is contained in:
@@ -8437,7 +8437,8 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
|
||||
* some fairly slow code that needs optimization. This walks the list
|
||||
* of all the inodes with pending delalloc and forces them to disk.
|
||||
*/
|
||||
static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
||||
static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
|
||||
int nr)
|
||||
{
|
||||
struct btrfs_inode *binode;
|
||||
struct inode *inode;
|
||||
@@ -8471,23 +8472,19 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
||||
else
|
||||
iput(inode);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
list_add_tail(&work->list, &works);
|
||||
btrfs_queue_work(root->fs_info->flush_workers,
|
||||
&work->work);
|
||||
|
||||
ret++;
|
||||
if (nr != -1 && ret >= nr)
|
||||
break;
|
||||
cond_resched();
|
||||
spin_lock(&root->delalloc_lock);
|
||||
}
|
||||
spin_unlock(&root->delalloc_lock);
|
||||
|
||||
list_for_each_entry_safe(work, next, &works, list) {
|
||||
list_del_init(&work->list);
|
||||
btrfs_wait_and_free_delalloc_work(work);
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
list_for_each_entry_safe(work, next, &works, list) {
|
||||
list_del_init(&work->list);
|
||||
btrfs_wait_and_free_delalloc_work(work);
|
||||
@@ -8508,7 +8505,9 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
||||
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
|
||||
return -EROFS;
|
||||
|
||||
ret = __start_delalloc_inodes(root, delay_iput);
|
||||
ret = __start_delalloc_inodes(root, delay_iput, -1);
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
/*
|
||||
* the filemap_flush will queue IO into the worker threads, but
|
||||
* we have to make sure the IO is actually started and that
|
||||
@@ -8525,7 +8524,8 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput)
|
||||
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
|
||||
int nr)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
struct list_head splice;
|
||||
@@ -8538,7 +8538,7 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput)
|
||||
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
list_splice_init(&fs_info->delalloc_roots, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
while (!list_empty(&splice) && nr) {
|
||||
root = list_first_entry(&splice, struct btrfs_root,
|
||||
delalloc_root);
|
||||
root = btrfs_grab_fs_root(root);
|
||||
@@ -8547,15 +8547,20 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput)
|
||||
&fs_info->delalloc_roots);
|
||||
spin_unlock(&fs_info->delalloc_root_lock);
|
||||
|
||||
ret = __start_delalloc_inodes(root, delay_iput);
|
||||
ret = __start_delalloc_inodes(root, delay_iput, nr);
|
||||
btrfs_put_fs_root(root);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
if (nr != -1) {
|
||||
nr -= ret;
|
||||
WARN_ON(nr < 0);
|
||||
}
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
}
|
||||
spin_unlock(&fs_info->delalloc_root_lock);
|
||||
|
||||
ret = 0;
|
||||
atomic_inc(&fs_info->async_submit_draining);
|
||||
while (atomic_read(&fs_info->nr_async_submits) ||
|
||||
atomic_read(&fs_info->async_delalloc_pages)) {
|
||||
@@ -8564,7 +8569,6 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput)
|
||||
atomic_read(&fs_info->async_delalloc_pages) == 0));
|
||||
}
|
||||
atomic_dec(&fs_info->async_submit_draining);
|
||||
return 0;
|
||||
out:
|
||||
if (!list_empty_careful(&splice)) {
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
|
||||
Reference in New Issue
Block a user