Merge branch 'for-linus-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull more btrfs updates from Chris Mason: "This is part two of my btrfs pull, which is some cleanups and a batch of fixes. Most of the code here is from Jeff Mahoney, making the pointers we pass around internally more consistent and less confusing overall. I noticed a small problem right before I sent this out yesterday, so I fixed it up and re-tested overnight" * 'for-linus-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (40 commits) Btrfs: fix __MAX_CSUM_ITEMS btrfs: btrfs_abort_transaction, drop root parameter btrfs: add btrfs_trans_handle->fs_info pointer btrfs: btrfs_relocate_chunk pass extent_root to btrfs_end_transaction btrfs: convert nodesize macros to static inlines btrfs: introduce BTRFS_MAX_ITEM_SIZE btrfs: cleanup, remove prototype for btrfs_find_root_ref btrfs: copy_to_sk drop unused root parameter btrfs: simpilify btrfs_subvol_inherit_props btrfs: tests, use BTRFS_FS_STATE_DUMMY_FS_INFO instead of dummy root btrfs: tests, require fs_info for root btrfs: tests, move initialization into tests/ btrfs: btrfs_test_opt and friends should take a btrfs_fs_info btrfs: prefix fsid to all trace events btrfs: plumb fs_info into btrfs_work btrfs: remove obsolete part of comment in statfs btrfs: hide test-only member under ifdef btrfs: Ratelimit "no csum found" info message btrfs: Add ratelimit to btrfs printing Btrfs: fix unexpected balance crash due to BUG_ON ...
This commit is contained in:
@@ -163,13 +163,13 @@ int __init extent_io_init(void)
|
||||
{
|
||||
extent_state_cache = kmem_cache_create("btrfs_extent_state",
|
||||
sizeof(struct extent_state), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
SLAB_MEM_SPREAD, NULL);
|
||||
if (!extent_state_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
|
||||
sizeof(struct extent_buffer), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
SLAB_MEM_SPREAD, NULL);
|
||||
if (!extent_buffer_cache)
|
||||
goto free_state_cache;
|
||||
|
||||
@@ -2756,7 +2756,6 @@ static int merge_bio(struct extent_io_tree *tree, struct page *page,
|
||||
if (tree->ops && tree->ops->merge_bio_hook)
|
||||
ret = tree->ops->merge_bio_hook(page, offset, size, bio,
|
||||
bio_flags);
|
||||
BUG_ON(ret < 0);
|
||||
return ret;
|
||||
|
||||
}
|
||||
@@ -2879,6 +2878,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
|
||||
* into the tree that are removed when the IO is done (by the end_io
|
||||
* handlers)
|
||||
* XXX JDM: This needs looking at to ensure proper page locking
|
||||
* return 0 on success, otherwise return error
|
||||
*/
|
||||
static int __do_readpage(struct extent_io_tree *tree,
|
||||
struct page *page,
|
||||
@@ -2900,7 +2900,7 @@ static int __do_readpage(struct extent_io_tree *tree,
|
||||
sector_t sector;
|
||||
struct extent_map *em;
|
||||
struct block_device *bdev;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
int nr = 0;
|
||||
size_t pg_offset = 0;
|
||||
size_t iosize;
|
||||
@@ -3081,6 +3081,7 @@ static int __do_readpage(struct extent_io_tree *tree,
|
||||
} else {
|
||||
SetPageError(page);
|
||||
unlock_extent(tree, cur, cur + iosize - 1);
|
||||
goto out;
|
||||
}
|
||||
cur = cur + iosize;
|
||||
pg_offset += iosize;
|
||||
@@ -3091,7 +3092,7 @@ out:
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
|
||||
@@ -5230,14 +5231,31 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
||||
atomic_set(&eb->io_pages, num_reads);
|
||||
for (i = start_i; i < num_pages; i++) {
|
||||
page = eb->pages[i];
|
||||
|
||||
if (!PageUptodate(page)) {
|
||||
if (ret) {
|
||||
atomic_dec(&eb->io_pages);
|
||||
unlock_page(page);
|
||||
continue;
|
||||
}
|
||||
|
||||
ClearPageError(page);
|
||||
err = __extent_read_full_page(tree, page,
|
||||
get_extent, &bio,
|
||||
mirror_num, &bio_flags,
|
||||
REQ_META);
|
||||
if (err)
|
||||
if (err) {
|
||||
ret = err;
|
||||
/*
|
||||
* We use &bio in above __extent_read_full_page,
|
||||
* so we ensure that if it returns error, the
|
||||
* current page fails to add itself to bio and
|
||||
* it's been unlocked.
|
||||
*
|
||||
* We must dec io_pages by ourselves.
|
||||
*/
|
||||
atomic_dec(&eb->io_pages);
|
||||
}
|
||||
} else {
|
||||
unlock_page(page);
|
||||
}
|
||||
|
Reference in New Issue
Block a user