Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason: "This is a pretty big pull, and most of these changes have been floating in btrfs-next for a long time. Filipe's properties work is a cool building block for inheriting attributes like compression down on a per inode basis. Jeff Mahoney kicked in code to export filesystem info into sysfs. Otherwise, lots of performance improvements, cleanups and bug fixes. Looks like there are still a few other small pending incrementals, but I wanted to get the bulk of this in first" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (149 commits) Btrfs: fix spin_unlock in check_ref_cleanup Btrfs: setup inode location during btrfs_init_inode_locked Btrfs: don't use ram_bytes for uncompressed inline items Btrfs: fix btrfs_search_slot_for_read backwards iteration Btrfs: do not export ulist functions Btrfs: rework ulist with list+rb_tree Btrfs: fix memory leaks on walking backrefs failure Btrfs: fix send file hole detection leading to data corruption Btrfs: add a reschedule point in btrfs_find_all_roots() Btrfs: make send's file extent item search more efficient Btrfs: fix to catch all errors when resolving indirect ref Btrfs: fix protection between walking backrefs and root deletion btrfs: fix warning while merging two adjacent extents Btrfs: fix infinite path build loops in incremental send btrfs: undo sysfs when open_ctree() fails Btrfs: fix snprintf usage by send's gen_unique_name btrfs: fix defrag 32-bit integer overflow btrfs: sysfs: list the NO_HOLES feature btrfs: sysfs: don't show reserved incompat feature btrfs: call permission checks earlier in ioctls and return EPERM ...
This commit is contained in:
@@ -59,7 +59,7 @@ void btrfs_leak_debug_check(void)
|
||||
|
||||
while (!list_empty(&states)) {
|
||||
state = list_entry(states.next, struct extent_state, leak_list);
|
||||
printk(KERN_ERR "btrfs state leak: start %llu end %llu "
|
||||
printk(KERN_ERR "BTRFS: state leak: start %llu end %llu "
|
||||
"state %lu in tree %p refs %d\n",
|
||||
state->start, state->end, state->state, state->tree,
|
||||
atomic_read(&state->refs));
|
||||
@@ -69,7 +69,7 @@ void btrfs_leak_debug_check(void)
|
||||
|
||||
while (!list_empty(&buffers)) {
|
||||
eb = list_entry(buffers.next, struct extent_buffer, leak_list);
|
||||
printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
|
||||
printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
|
||||
"refs %d\n",
|
||||
eb->start, eb->len, atomic_read(&eb->refs));
|
||||
list_del(&eb->leak_list);
|
||||
@@ -77,16 +77,22 @@ void btrfs_leak_debug_check(void)
|
||||
}
|
||||
}
|
||||
|
||||
#define btrfs_debug_check_extent_io_range(inode, start, end) \
|
||||
__btrfs_debug_check_extent_io_range(__func__, (inode), (start), (end))
|
||||
#define btrfs_debug_check_extent_io_range(tree, start, end) \
|
||||
__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
|
||||
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
|
||||
struct inode *inode, u64 start, u64 end)
|
||||
struct extent_io_tree *tree, u64 start, u64 end)
|
||||
{
|
||||
u64 isize = i_size_read(inode);
|
||||
struct inode *inode;
|
||||
u64 isize;
|
||||
|
||||
if (!tree->mapping)
|
||||
return;
|
||||
|
||||
inode = tree->mapping->host;
|
||||
isize = i_size_read(inode);
|
||||
if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
|
||||
printk_ratelimited(KERN_DEBUG
|
||||
"btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
|
||||
"BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
|
||||
caller, btrfs_ino(inode), isize, start, end);
|
||||
}
|
||||
}
|
||||
@@ -124,6 +130,8 @@ static noinline void flush_write_bio(void *data);
|
||||
static inline struct btrfs_fs_info *
|
||||
tree_fs_info(struct extent_io_tree *tree)
|
||||
{
|
||||
if (!tree->mapping)
|
||||
return NULL;
|
||||
return btrfs_sb(tree->mapping->host->i_sb);
|
||||
}
|
||||
|
||||
@@ -186,11 +194,9 @@ void extent_io_tree_init(struct extent_io_tree *tree,
|
||||
struct address_space *mapping)
|
||||
{
|
||||
tree->state = RB_ROOT;
|
||||
INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
|
||||
tree->ops = NULL;
|
||||
tree->dirty_bytes = 0;
|
||||
spin_lock_init(&tree->lock);
|
||||
spin_lock_init(&tree->buffer_lock);
|
||||
tree->mapping = mapping;
|
||||
}
|
||||
|
||||
@@ -224,12 +230,20 @@ void free_extent_state(struct extent_state *state)
|
||||
}
|
||||
|
||||
static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
|
||||
struct rb_node *node)
|
||||
struct rb_node *node,
|
||||
struct rb_node ***p_in,
|
||||
struct rb_node **parent_in)
|
||||
{
|
||||
struct rb_node **p = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct tree_entry *entry;
|
||||
|
||||
if (p_in && parent_in) {
|
||||
p = *p_in;
|
||||
parent = *parent_in;
|
||||
goto do_insert;
|
||||
}
|
||||
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
entry = rb_entry(parent, struct tree_entry, rb_node);
|
||||
@@ -242,35 +256,43 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
|
||||
return parent;
|
||||
}
|
||||
|
||||
do_insert:
|
||||
rb_link_node(node, parent, p);
|
||||
rb_insert_color(node, root);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
|
||||
struct rb_node **prev_ret,
|
||||
struct rb_node **next_ret)
|
||||
struct rb_node **prev_ret,
|
||||
struct rb_node **next_ret,
|
||||
struct rb_node ***p_ret,
|
||||
struct rb_node **parent_ret)
|
||||
{
|
||||
struct rb_root *root = &tree->state;
|
||||
struct rb_node *n = root->rb_node;
|
||||
struct rb_node **n = &root->rb_node;
|
||||
struct rb_node *prev = NULL;
|
||||
struct rb_node *orig_prev = NULL;
|
||||
struct tree_entry *entry;
|
||||
struct tree_entry *prev_entry = NULL;
|
||||
|
||||
while (n) {
|
||||
entry = rb_entry(n, struct tree_entry, rb_node);
|
||||
prev = n;
|
||||
while (*n) {
|
||||
prev = *n;
|
||||
entry = rb_entry(prev, struct tree_entry, rb_node);
|
||||
prev_entry = entry;
|
||||
|
||||
if (offset < entry->start)
|
||||
n = n->rb_left;
|
||||
n = &(*n)->rb_left;
|
||||
else if (offset > entry->end)
|
||||
n = n->rb_right;
|
||||
n = &(*n)->rb_right;
|
||||
else
|
||||
return n;
|
||||
return *n;
|
||||
}
|
||||
|
||||
if (p_ret)
|
||||
*p_ret = n;
|
||||
if (parent_ret)
|
||||
*parent_ret = prev;
|
||||
|
||||
if (prev_ret) {
|
||||
orig_prev = prev;
|
||||
while (prev && offset > prev_entry->end) {
|
||||
@@ -292,18 +314,27 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct rb_node *tree_search(struct extent_io_tree *tree,
|
||||
u64 offset)
|
||||
static inline struct rb_node *
|
||||
tree_search_for_insert(struct extent_io_tree *tree,
|
||||
u64 offset,
|
||||
struct rb_node ***p_ret,
|
||||
struct rb_node **parent_ret)
|
||||
{
|
||||
struct rb_node *prev = NULL;
|
||||
struct rb_node *ret;
|
||||
|
||||
ret = __etree_search(tree, offset, &prev, NULL);
|
||||
ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
|
||||
if (!ret)
|
||||
return prev;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct rb_node *tree_search(struct extent_io_tree *tree,
|
||||
u64 offset)
|
||||
{
|
||||
return tree_search_for_insert(tree, offset, NULL, NULL);
|
||||
}
|
||||
|
||||
static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
|
||||
struct extent_state *other)
|
||||
{
|
||||
@@ -385,23 +416,25 @@ static void set_state_bits(struct extent_io_tree *tree,
|
||||
*/
|
||||
static int insert_state(struct extent_io_tree *tree,
|
||||
struct extent_state *state, u64 start, u64 end,
|
||||
struct rb_node ***p,
|
||||
struct rb_node **parent,
|
||||
unsigned long *bits)
|
||||
{
|
||||
struct rb_node *node;
|
||||
|
||||
if (end < start)
|
||||
WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
|
||||
WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
|
||||
end, start);
|
||||
state->start = start;
|
||||
state->end = end;
|
||||
|
||||
set_state_bits(tree, state, bits);
|
||||
|
||||
node = tree_insert(&tree->state, end, &state->rb_node);
|
||||
node = tree_insert(&tree->state, end, &state->rb_node, p, parent);
|
||||
if (node) {
|
||||
struct extent_state *found;
|
||||
found = rb_entry(node, struct extent_state, rb_node);
|
||||
printk(KERN_ERR "btrfs found node %llu %llu on insert of "
|
||||
printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
|
||||
"%llu %llu\n",
|
||||
found->start, found->end, start, end);
|
||||
return -EEXIST;
|
||||
@@ -444,7 +477,8 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
|
||||
prealloc->state = orig->state;
|
||||
orig->start = split;
|
||||
|
||||
node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
|
||||
node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node,
|
||||
NULL, NULL);
|
||||
if (node) {
|
||||
free_extent_state(prealloc);
|
||||
return -EEXIST;
|
||||
@@ -542,7 +576,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
int err;
|
||||
int clear = 0;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
btrfs_debug_check_extent_io_range(tree, start, end);
|
||||
|
||||
if (bits & EXTENT_DELALLOC)
|
||||
bits |= EXTENT_NORESERVE;
|
||||
@@ -702,7 +736,7 @@ static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
struct extent_state *state;
|
||||
struct rb_node *node;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
btrfs_debug_check_extent_io_range(tree, start, end);
|
||||
|
||||
spin_lock(&tree->lock);
|
||||
again:
|
||||
@@ -783,11 +817,13 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
struct extent_state *state;
|
||||
struct extent_state *prealloc = NULL;
|
||||
struct rb_node *node;
|
||||
struct rb_node **p;
|
||||
struct rb_node *parent;
|
||||
int err = 0;
|
||||
u64 last_start;
|
||||
u64 last_end;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
btrfs_debug_check_extent_io_range(tree, start, end);
|
||||
|
||||
bits |= EXTENT_FIRST_DELALLOC;
|
||||
again:
|
||||
@@ -809,14 +845,16 @@ again:
|
||||
* this search will find all the extents that end after
|
||||
* our range starts.
|
||||
*/
|
||||
node = tree_search(tree, start);
|
||||
node = tree_search_for_insert(tree, start, &p, &parent);
|
||||
if (!node) {
|
||||
prealloc = alloc_extent_state_atomic(prealloc);
|
||||
BUG_ON(!prealloc);
|
||||
err = insert_state(tree, prealloc, start, end, &bits);
|
||||
err = insert_state(tree, prealloc, start, end,
|
||||
&p, &parent, &bits);
|
||||
if (err)
|
||||
extent_io_tree_panic(tree, err);
|
||||
|
||||
cache_state(prealloc, cached_state);
|
||||
prealloc = NULL;
|
||||
goto out;
|
||||
}
|
||||
@@ -919,7 +957,7 @@ hit_next:
|
||||
* the later extent.
|
||||
*/
|
||||
err = insert_state(tree, prealloc, start, this_end,
|
||||
&bits);
|
||||
NULL, NULL, &bits);
|
||||
if (err)
|
||||
extent_io_tree_panic(tree, err);
|
||||
|
||||
@@ -1005,11 +1043,13 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
struct extent_state *state;
|
||||
struct extent_state *prealloc = NULL;
|
||||
struct rb_node *node;
|
||||
struct rb_node **p;
|
||||
struct rb_node *parent;
|
||||
int err = 0;
|
||||
u64 last_start;
|
||||
u64 last_end;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
btrfs_debug_check_extent_io_range(tree, start, end);
|
||||
|
||||
again:
|
||||
if (!prealloc && (mask & __GFP_WAIT)) {
|
||||
@@ -1032,17 +1072,19 @@ again:
|
||||
* this search will find all the extents that end after
|
||||
* our range starts.
|
||||
*/
|
||||
node = tree_search(tree, start);
|
||||
node = tree_search_for_insert(tree, start, &p, &parent);
|
||||
if (!node) {
|
||||
prealloc = alloc_extent_state_atomic(prealloc);
|
||||
if (!prealloc) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
err = insert_state(tree, prealloc, start, end, &bits);
|
||||
prealloc = NULL;
|
||||
err = insert_state(tree, prealloc, start, end,
|
||||
&p, &parent, &bits);
|
||||
if (err)
|
||||
extent_io_tree_panic(tree, err);
|
||||
cache_state(prealloc, cached_state);
|
||||
prealloc = NULL;
|
||||
goto out;
|
||||
}
|
||||
state = rb_entry(node, struct extent_state, rb_node);
|
||||
@@ -1135,7 +1177,7 @@ hit_next:
|
||||
* the later extent.
|
||||
*/
|
||||
err = insert_state(tree, prealloc, start, this_end,
|
||||
&bits);
|
||||
NULL, NULL, &bits);
|
||||
if (err)
|
||||
extent_io_tree_panic(tree, err);
|
||||
cache_state(prealloc, cached_state);
|
||||
@@ -2012,9 +2054,10 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
|
||||
"(dev %s sector %llu)\n", page->mapping->host->i_ino,
|
||||
start, rcu_str_deref(dev->name), sector);
|
||||
printk_ratelimited_in_rcu(KERN_INFO
|
||||
"BTRFS: read error corrected: ino %lu off %llu "
|
||||
"(dev %s sector %llu)\n", page->mapping->host->i_ino,
|
||||
start, rcu_str_deref(dev->name), sector);
|
||||
|
||||
bio_put(bio);
|
||||
return 0;
|
||||
@@ -2156,7 +2199,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (em->start > start || em->start + em->len < start) {
|
||||
if (em->start > start || em->start + em->len <= start) {
|
||||
free_extent_map(em);
|
||||
em = NULL;
|
||||
}
|
||||
@@ -2333,25 +2376,29 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
|
||||
static void end_bio_extent_writepage(struct bio *bio, int err)
|
||||
{
|
||||
struct bio_vec *bvec;
|
||||
struct extent_io_tree *tree;
|
||||
u64 start;
|
||||
u64 end;
|
||||
int i;
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, i) {
|
||||
struct page *page = bvec->bv_page;
|
||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||
|
||||
/* We always issue full-page reads, but if some block
|
||||
* in a page fails to read, blk_update_request() will
|
||||
* advance bv_offset and adjust bv_len to compensate.
|
||||
* Print a warning for nonzero offsets, and an error
|
||||
* if they don't add up to a full page. */
|
||||
if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
|
||||
printk("%s page write in btrfs with offset %u and length %u\n",
|
||||
bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
|
||||
? KERN_ERR "partial" : KERN_INFO "incomplete",
|
||||
bvec->bv_offset, bvec->bv_len);
|
||||
if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
|
||||
if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
|
||||
btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
|
||||
"partial page write in btrfs with offset %u and length %u",
|
||||
bvec->bv_offset, bvec->bv_len);
|
||||
else
|
||||
btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
|
||||
"incomplete page write in btrfs with offset %u and "
|
||||
"length %u",
|
||||
bvec->bv_offset, bvec->bv_len);
|
||||
}
|
||||
|
||||
start = page_offset(page);
|
||||
end = start + bvec->bv_offset + bvec->bv_len - 1;
|
||||
@@ -2421,11 +2468,17 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
||||
* advance bv_offset and adjust bv_len to compensate.
|
||||
* Print a warning for nonzero offsets, and an error
|
||||
* if they don't add up to a full page. */
|
||||
if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
|
||||
printk("%s page read in btrfs with offset %u and length %u\n",
|
||||
bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
|
||||
? KERN_ERR "partial" : KERN_INFO "incomplete",
|
||||
bvec->bv_offset, bvec->bv_len);
|
||||
if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
|
||||
if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
|
||||
btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
|
||||
"partial page read in btrfs with offset %u and length %u",
|
||||
bvec->bv_offset, bvec->bv_len);
|
||||
else
|
||||
btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
|
||||
"incomplete page read in btrfs with offset %u and "
|
||||
"length %u",
|
||||
bvec->bv_offset, bvec->bv_len);
|
||||
}
|
||||
|
||||
start = page_offset(page);
|
||||
end = start + bvec->bv_offset + bvec->bv_len - 1;
|
||||
@@ -3281,8 +3334,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
|
||||
|
||||
set_range_writeback(tree, cur, cur + iosize - 1);
|
||||
if (!PageWriteback(page)) {
|
||||
printk(KERN_ERR "btrfs warning page %lu not "
|
||||
"writeback, cur %llu end %llu\n",
|
||||
btrfs_err(BTRFS_I(inode)->root->fs_info,
|
||||
"page %lu not writeback, cur %llu end %llu",
|
||||
page->index, cur, end);
|
||||
}
|
||||
|
||||
@@ -3438,6 +3491,7 @@ static int write_one_eb(struct extent_buffer *eb,
|
||||
struct extent_page_data *epd)
|
||||
{
|
||||
struct block_device *bdev = fs_info->fs_devices->latest_bdev;
|
||||
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
|
||||
u64 offset = eb->start;
|
||||
unsigned long i, num_pages;
|
||||
unsigned long bio_flags = 0;
|
||||
@@ -3455,7 +3509,7 @@ static int write_one_eb(struct extent_buffer *eb,
|
||||
|
||||
clear_page_dirty_for_io(p);
|
||||
set_page_writeback(p);
|
||||
ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
|
||||
ret = submit_extent_page(rw, tree, p, offset >> 9,
|
||||
PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
|
||||
-1, end_bio_extent_buffer_writepage,
|
||||
0, epd->bio_flags, bio_flags);
|
||||
@@ -4073,12 +4127,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
struct extent_map *em = NULL;
|
||||
struct extent_state *cached_state = NULL;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_file_extent_item *item;
|
||||
int end = 0;
|
||||
u64 em_start = 0;
|
||||
u64 em_len = 0;
|
||||
u64 em_end = 0;
|
||||
unsigned long emflags;
|
||||
|
||||
if (len == 0)
|
||||
return -EINVAL;
|
||||
@@ -4103,8 +4155,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
}
|
||||
WARN_ON(!ret);
|
||||
path->slots[0]--;
|
||||
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
|
||||
found_type = btrfs_key_type(&found_key);
|
||||
|
||||
@@ -4172,7 +4222,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
offset_in_extent = em_start - em->start;
|
||||
em_end = extent_map_end(em);
|
||||
em_len = em_end - em_start;
|
||||
emflags = em->flags;
|
||||
disko = 0;
|
||||
flags = 0;
|
||||
|
||||
@@ -4324,10 +4373,9 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
|
||||
__free_extent_buffer(eb);
|
||||
}
|
||||
|
||||
static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
|
||||
u64 start,
|
||||
unsigned long len,
|
||||
gfp_t mask)
|
||||
static struct extent_buffer *
|
||||
__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
|
||||
unsigned long len, gfp_t mask)
|
||||
{
|
||||
struct extent_buffer *eb = NULL;
|
||||
|
||||
@@ -4336,7 +4384,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
|
||||
return NULL;
|
||||
eb->start = start;
|
||||
eb->len = len;
|
||||
eb->tree = tree;
|
||||
eb->fs_info = fs_info;
|
||||
eb->bflags = 0;
|
||||
rwlock_init(&eb->lock);
|
||||
atomic_set(&eb->write_locks, 0);
|
||||
@@ -4468,13 +4516,14 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb)
|
||||
}
|
||||
}
|
||||
|
||||
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
|
||||
u64 start)
|
||||
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
u64 start)
|
||||
{
|
||||
struct extent_buffer *eb;
|
||||
|
||||
rcu_read_lock();
|
||||
eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
|
||||
eb = radix_tree_lookup(&fs_info->buffer_radix,
|
||||
start >> PAGE_CACHE_SHIFT);
|
||||
if (eb && atomic_inc_not_zero(&eb->refs)) {
|
||||
rcu_read_unlock();
|
||||
mark_extent_buffer_accessed(eb);
|
||||
@@ -4485,7 +4534,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
|
||||
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
|
||||
u64 start, unsigned long len)
|
||||
{
|
||||
unsigned long num_pages = num_extent_pages(start, len);
|
||||
@@ -4494,16 +4543,15 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
|
||||
struct extent_buffer *eb;
|
||||
struct extent_buffer *exists = NULL;
|
||||
struct page *p;
|
||||
struct address_space *mapping = tree->mapping;
|
||||
struct address_space *mapping = fs_info->btree_inode->i_mapping;
|
||||
int uptodate = 1;
|
||||
int ret;
|
||||
|
||||
|
||||
eb = find_extent_buffer(tree, start);
|
||||
eb = find_extent_buffer(fs_info, start);
|
||||
if (eb)
|
||||
return eb;
|
||||
|
||||
eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
|
||||
eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
|
||||
if (!eb)
|
||||
return NULL;
|
||||
|
||||
@@ -4558,12 +4606,13 @@ again:
|
||||
if (ret)
|
||||
goto free_eb;
|
||||
|
||||
spin_lock(&tree->buffer_lock);
|
||||
ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
|
||||
spin_unlock(&tree->buffer_lock);
|
||||
spin_lock(&fs_info->buffer_lock);
|
||||
ret = radix_tree_insert(&fs_info->buffer_radix,
|
||||
start >> PAGE_CACHE_SHIFT, eb);
|
||||
spin_unlock(&fs_info->buffer_lock);
|
||||
radix_tree_preload_end();
|
||||
if (ret == -EEXIST) {
|
||||
exists = find_extent_buffer(tree, start);
|
||||
exists = find_extent_buffer(fs_info, start);
|
||||
if (exists)
|
||||
goto free_eb;
|
||||
else
|
||||
@@ -4571,6 +4620,7 @@ again:
|
||||
}
|
||||
/* add one reference for the tree */
|
||||
check_buffer_tree_ref(eb);
|
||||
set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
|
||||
|
||||
/*
|
||||
* there is a race where release page may have
|
||||
@@ -4614,17 +4664,17 @@ static int release_extent_buffer(struct extent_buffer *eb)
|
||||
{
|
||||
WARN_ON(atomic_read(&eb->refs) == 0);
|
||||
if (atomic_dec_and_test(&eb->refs)) {
|
||||
if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
|
||||
spin_unlock(&eb->refs_lock);
|
||||
} else {
|
||||
struct extent_io_tree *tree = eb->tree;
|
||||
if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
|
||||
struct btrfs_fs_info *fs_info = eb->fs_info;
|
||||
|
||||
spin_unlock(&eb->refs_lock);
|
||||
|
||||
spin_lock(&tree->buffer_lock);
|
||||
radix_tree_delete(&tree->buffer,
|
||||
spin_lock(&fs_info->buffer_lock);
|
||||
radix_tree_delete(&fs_info->buffer_radix,
|
||||
eb->start >> PAGE_CACHE_SHIFT);
|
||||
spin_unlock(&tree->buffer_lock);
|
||||
spin_unlock(&fs_info->buffer_lock);
|
||||
} else {
|
||||
spin_unlock(&eb->refs_lock);
|
||||
}
|
||||
|
||||
/* Should be safe to release our pages at this point */
|
||||
@@ -5103,12 +5153,12 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
|
||||
unsigned long src_i;
|
||||
|
||||
if (src_offset + len > dst->len) {
|
||||
printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
|
||||
printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
|
||||
"len %lu dst len %lu\n", src_offset, len, dst->len);
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (dst_offset + len > dst->len) {
|
||||
printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
|
||||
printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
|
||||
"len %lu dst len %lu\n", dst_offset, len, dst->len);
|
||||
BUG_ON(1);
|
||||
}
|
||||
@@ -5150,12 +5200,12 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
|
||||
unsigned long src_i;
|
||||
|
||||
if (src_offset + len > dst->len) {
|
||||
printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
|
||||
printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
|
||||
"len %lu len %lu\n", src_offset, len, dst->len);
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (dst_offset + len > dst->len) {
|
||||
printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
|
||||
printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
|
||||
"len %lu len %lu\n", dst_offset, len, dst->len);
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
Reference in New Issue
Block a user