Merge branch 'iov_iter' into for-next
This commit is contained in:
@@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/* lockdep really cares that we take all of these spinlocks
|
||||
* in the right order. If any of the locks in the path are not
|
||||
* currently blocking, it is going to complain. So, make really
|
||||
* really sure by forcing the path to blocking before we clear
|
||||
* the path blocking.
|
||||
*/
|
||||
if (held) {
|
||||
btrfs_set_lock_blocking_rw(held, held_rw);
|
||||
if (held_rw == BTRFS_WRITE_LOCK)
|
||||
@@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
|
||||
held_rw = BTRFS_READ_LOCK_BLOCKING;
|
||||
}
|
||||
btrfs_set_path_blocking(p);
|
||||
#endif
|
||||
|
||||
for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
|
||||
if (p->nodes[i] && p->locks[i]) {
|
||||
@@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
if (held)
|
||||
btrfs_clear_lock_blocking_rw(held, held_rw);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* this also releases the path */
|
||||
@@ -2893,7 +2883,7 @@ cow_done:
|
||||
}
|
||||
p->locks[level] = BTRFS_WRITE_LOCK;
|
||||
} else {
|
||||
err = btrfs_try_tree_read_lock(b);
|
||||
err = btrfs_tree_read_lock_atomic(b);
|
||||
if (!err) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_read_lock(b);
|
||||
@@ -3025,7 +3015,7 @@ again:
|
||||
}
|
||||
|
||||
level = btrfs_header_level(b);
|
||||
err = btrfs_try_tree_read_lock(b);
|
||||
err = btrfs_tree_read_lock_atomic(b);
|
||||
if (!err) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_read_lock(b);
|
||||
|
@@ -3276,7 +3276,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, unsigned long count);
|
||||
int btrfs_async_run_delayed_refs(struct btrfs_root *root,
|
||||
unsigned long count, int wait);
|
||||
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len);
|
||||
int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
|
||||
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, u64 bytenr,
|
||||
u64 offset, int metadata, u64 *refs, u64 *flags);
|
||||
|
@@ -3817,19 +3817,19 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_super_block *sb = fs_info->super_copy;
|
||||
int ret = 0;
|
||||
|
||||
if (sb->root_level > BTRFS_MAX_LEVEL) {
|
||||
printk(KERN_ERR "BTRFS: tree_root level too big: %d > %d\n",
|
||||
sb->root_level, BTRFS_MAX_LEVEL);
|
||||
if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
|
||||
printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
|
||||
btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (sb->chunk_root_level > BTRFS_MAX_LEVEL) {
|
||||
printk(KERN_ERR "BTRFS: chunk_root level too big: %d > %d\n",
|
||||
sb->chunk_root_level, BTRFS_MAX_LEVEL);
|
||||
if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
|
||||
printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
|
||||
btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (sb->log_root_level > BTRFS_MAX_LEVEL) {
|
||||
printk(KERN_ERR "BTRFS: log_root level too big: %d > %d\n",
|
||||
sb->log_root_level, BTRFS_MAX_LEVEL);
|
||||
if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
|
||||
printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
|
||||
btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -3837,15 +3837,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
|
||||
* The common minimum, we don't know if we can trust the nodesize/sectorsize
|
||||
* items yet, they'll be verified later. Issue just a warning.
|
||||
*/
|
||||
if (!IS_ALIGNED(sb->root, 4096))
|
||||
if (!IS_ALIGNED(btrfs_super_root(sb), 4096))
|
||||
printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
|
||||
sb->root);
|
||||
if (!IS_ALIGNED(sb->chunk_root, 4096))
|
||||
if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096))
|
||||
printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
|
||||
sb->chunk_root);
|
||||
if (!IS_ALIGNED(sb->log_root, 4096))
|
||||
if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096))
|
||||
printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
|
||||
sb->log_root);
|
||||
btrfs_super_log_root(sb));
|
||||
|
||||
if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
|
||||
printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
|
||||
@@ -3857,13 +3857,13 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
|
||||
* Hint to catch really bogus numbers, bitflips or so, more exact checks are
|
||||
* done later
|
||||
*/
|
||||
if (sb->num_devices > (1UL << 31))
|
||||
if (btrfs_super_num_devices(sb) > (1UL << 31))
|
||||
printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
|
||||
sb->num_devices);
|
||||
btrfs_super_num_devices(sb));
|
||||
|
||||
if (sb->bytenr != BTRFS_SUPER_INFO_OFFSET) {
|
||||
if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
|
||||
printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
|
||||
sb->bytenr, BTRFS_SUPER_INFO_OFFSET);
|
||||
btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
@@ -3871,14 +3871,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
|
||||
* The generation is a global counter, we'll trust it more than the others
|
||||
* but it's still possible that it's the one that's wrong.
|
||||
*/
|
||||
if (sb->generation < sb->chunk_root_generation)
|
||||
if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
|
||||
printk(KERN_WARNING
|
||||
"BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
|
||||
sb->generation, sb->chunk_root_generation);
|
||||
if (sb->generation < sb->cache_generation && sb->cache_generation != (u64)-1)
|
||||
btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
|
||||
if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
|
||||
&& btrfs_super_cache_generation(sb) != (u64)-1)
|
||||
printk(KERN_WARNING
|
||||
"BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
|
||||
sb->generation, sb->cache_generation);
|
||||
btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -710,8 +710,8 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* simple helper to search for an existing extent at a given offset */
|
||||
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
|
||||
/* simple helper to search for an existing data extent at a given offset */
|
||||
int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_key key;
|
||||
@@ -726,12 +726,6 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
|
||||
0, 0);
|
||||
if (ret > 0) {
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||
if (key.objectid == start &&
|
||||
key.type == BTRFS_METADATA_ITEM_KEY)
|
||||
ret = 0;
|
||||
}
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
}
|
||||
@@ -786,7 +780,6 @@ search_again:
|
||||
else
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
|
||||
again:
|
||||
ret = btrfs_search_slot(trans, root->fs_info->extent_root,
|
||||
&key, path, 0, 0);
|
||||
if (ret < 0)
|
||||
@@ -802,13 +795,6 @@ again:
|
||||
key.offset == root->nodesize)
|
||||
ret = 0;
|
||||
}
|
||||
if (ret) {
|
||||
key.objectid = bytenr;
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
key.offset = root->nodesize;
|
||||
btrfs_release_path(path);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
|
@@ -413,7 +413,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
|
||||
ret = 0;
|
||||
fail:
|
||||
while (ret < 0 && !list_empty(&tmplist)) {
|
||||
sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
|
||||
sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
|
||||
list_del(&sums->list);
|
||||
kfree(sums);
|
||||
}
|
||||
|
@@ -127,6 +127,26 @@ again:
|
||||
atomic_inc(&eb->spinning_readers);
|
||||
}
|
||||
|
||||
/*
|
||||
* take a spinning read lock.
|
||||
* returns 1 if we get the read lock and 0 if we don't
|
||||
* this won't wait for blocking writers
|
||||
*/
|
||||
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
|
||||
{
|
||||
if (atomic_read(&eb->blocking_writers))
|
||||
return 0;
|
||||
|
||||
read_lock(&eb->lock);
|
||||
if (atomic_read(&eb->blocking_writers)) {
|
||||
read_unlock(&eb->lock);
|
||||
return 0;
|
||||
}
|
||||
atomic_inc(&eb->read_locks);
|
||||
atomic_inc(&eb->spinning_readers);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns 1 if we get the read lock and 0 if we don't
|
||||
* this won't wait for blocking writers
|
||||
@@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
|
||||
atomic_read(&eb->blocking_readers))
|
||||
return 0;
|
||||
|
||||
if (!write_trylock(&eb->lock))
|
||||
return 0;
|
||||
|
||||
write_lock(&eb->lock);
|
||||
if (atomic_read(&eb->blocking_writers) ||
|
||||
atomic_read(&eb->blocking_readers)) {
|
||||
write_unlock(&eb->lock);
|
||||
|
@@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
|
||||
void btrfs_assert_tree_locked(struct extent_buffer *eb);
|
||||
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
|
||||
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
|
||||
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
|
||||
|
||||
|
||||
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
|
||||
{
|
||||
|
@@ -2151,6 +2151,7 @@ static void __exit exit_btrfs_fs(void)
|
||||
extent_map_exit();
|
||||
extent_io_exit();
|
||||
btrfs_interface_exit();
|
||||
btrfs_end_io_wq_exit();
|
||||
unregister_filesystem(&btrfs_fs_type);
|
||||
btrfs_exit_sysfs();
|
||||
btrfs_cleanup_fs_uuids();
|
||||
|
@@ -672,7 +672,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
|
||||
* is this extent already allocated in the extent
|
||||
* allocation tree? If so, just add a reference
|
||||
*/
|
||||
ret = btrfs_lookup_extent(root, ins.objectid,
|
||||
ret = btrfs_lookup_data_extent(root, ins.objectid,
|
||||
ins.offset);
|
||||
if (ret == 0) {
|
||||
ret = btrfs_inc_extent_ref(trans, root,
|
||||
|
Reference in New Issue
Block a user