btrfs: convert from readpages to readahead

Implement the new readahead method in btrfs using the new
readahead_page_batch() function.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-18-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle)
2020-06-01 21:47:05 -07:00
committed by Linus Torvalds
parent d4388340ae
commit ba206a026f
3 changed files with 20 additions and 42 deletions

View File

@@ -4367,51 +4367,32 @@ int extent_writepages(struct address_space *mapping,
return ret;
}
int extent_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages)
void extent_readahead(struct readahead_control *rac)
{
struct bio *bio = NULL;
unsigned long bio_flags = 0;
struct page *pagepool[16];
struct extent_map *em_cached = NULL;
int nr = 0;
u64 prev_em_start = (u64)-1;
int nr;
while (!list_empty(pages)) {
u64 contig_end = 0;
while ((nr = readahead_page_batch(rac, pagepool))) {
u64 contig_start = page_offset(pagepool[0]);
u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1;
for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
struct page *page = lru_to_page(pages);
ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
prefetchw(&page->flags);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping, page->index,
readahead_gfp_mask(mapping))) {
put_page(page);
break;
}
pagepool[nr++] = page;
contig_end = page_offset(page) + PAGE_SIZE - 1;
}
if (nr) {
u64 contig_start = page_offset(pagepool[0]);
ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
contiguous_readpages(pagepool, nr, contig_start,
contig_end, &em_cached, &bio, &bio_flags,
&prev_em_start);
}
contiguous_readpages(pagepool, nr, contig_start, contig_end,
&em_cached, &bio, &bio_flags, &prev_em_start);
}
if (em_cached)
free_extent_map(em_cached);
if (bio)
return submit_one_bio(bio, 0, bio_flags);
return 0;
if (bio) {
if (submit_one_bio(bio, 0, bio_flags))
return;
}
}
/*