ext4: reimplement ext4_find_delay_alloc_range on extent status tree
Signed-off-by: Yongqiang Yang <xiaoqiangnk@gmail.com> Signed-off-by: Allison Henderson <achender@linux.vnet.ibm.com> Signed-off-by: Zheng Liu <wenqing.lz@taobao.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
This commit is contained in:
@@ -483,49 +483,6 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
|
||||
return num;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
|
||||
*/
|
||||
static void set_buffers_da_mapped(struct inode *inode,
|
||||
struct ext4_map_blocks *map)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct pagevec pvec;
|
||||
int i, nr_pages;
|
||||
pgoff_t index, end;
|
||||
|
||||
index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
||||
end = (map->m_lblk + map->m_len - 1) >>
|
||||
(PAGE_CACHE_SHIFT - inode->i_blkbits);
|
||||
|
||||
pagevec_init(&pvec, 0);
|
||||
while (index <= end) {
|
||||
nr_pages = pagevec_lookup(&pvec, mapping, index,
|
||||
min(end - index + 1,
|
||||
(pgoff_t)PAGEVEC_SIZE));
|
||||
if (nr_pages == 0)
|
||||
break;
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
struct buffer_head *bh, *head;
|
||||
|
||||
if (unlikely(page->mapping != mapping) ||
|
||||
!PageDirty(page))
|
||||
break;
|
||||
|
||||
if (page_has_buffers(page)) {
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
set_buffer_da_mapped(bh);
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
}
|
||||
index++;
|
||||
}
|
||||
pagevec_release(&pvec);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The ext4_map_blocks() function tries to look up the requested blocks,
|
||||
* and returns if the blocks are already mapped.
|
||||
@@ -661,13 +618,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
||||
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
|
||||
ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
|
||||
|
||||
/* If we have successfully mapped the delayed allocated blocks,
|
||||
* set the BH_Da_Mapped bit on them. Its important to do this
|
||||
* under the protection of i_data_sem.
|
||||
*/
|
||||
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
|
||||
int ret;
|
||||
set_buffers_da_mapped(inode, map);
|
||||
delayed_mapped:
|
||||
/* delayed allocation blocks has been allocated */
|
||||
ret = ext4_es_remove_extent(inode, map->m_lblk,
|
||||
@@ -1330,7 +1282,6 @@ static void ext4_da_page_release_reservation(struct page *page,
|
||||
if ((offset <= curr_off) && (buffer_delay(bh))) {
|
||||
to_release++;
|
||||
clear_buffer_delay(bh);
|
||||
clear_buffer_da_mapped(bh);
|
||||
}
|
||||
curr_off = next_off;
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
@@ -1347,7 +1298,7 @@ static void ext4_da_page_release_reservation(struct page *page,
|
||||
lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
|
||||
((num_clusters - 1) << sbi->s_cluster_bits);
|
||||
if (sbi->s_cluster_ratio == 1 ||
|
||||
!ext4_find_delalloc_cluster(inode, lblk, 1))
|
||||
!ext4_find_delalloc_cluster(inode, lblk))
|
||||
ext4_da_release_space(inode, 1);
|
||||
|
||||
num_clusters--;
|
||||
@@ -1453,8 +1404,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
|
||||
clear_buffer_delay(bh);
|
||||
bh->b_blocknr = pblock;
|
||||
}
|
||||
if (buffer_da_mapped(bh))
|
||||
clear_buffer_da_mapped(bh);
|
||||
if (buffer_unwritten(bh) ||
|
||||
buffer_mapped(bh))
|
||||
BUG_ON(bh->b_blocknr != pblock);
|
||||
|
Reference in New Issue
Block a user