readahead: combine file_ra_state.prev_index/prev_offset into prev_pos
Combine the file_ra_state members unsigned long prev_index unsigned int prev_offset into loff_t prev_pos It is more consistent and better supports huge files. Thanks to Peter for the nice proposal! [akpm@linux-foundation.org: fix shift overflow] Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committad av
Linus Torvalds

förälder
0bb7ba6b9c
incheckning
f4e6b498d6
@@ -46,7 +46,7 @@ void
|
||||
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
|
||||
{
|
||||
ra->ra_pages = mapping->backing_dev_info->ra_pages;
|
||||
ra->prev_index = -1;
|
||||
ra->prev_pos = -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(file_ra_state_init);
|
||||
|
||||
@@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
|
||||
* indicator. The flag won't be set on already cached pages, to avoid the
|
||||
* readahead-for-nothing fuss, saving pointless page cache lookups.
|
||||
*
|
||||
* prev_index tracks the last visited page in the _previous_ read request.
|
||||
* prev_pos tracks the last visited byte in the _previous_ read request.
|
||||
* It should be maintained by the caller, and will be used for detecting
|
||||
* small random reads. Note that the readahead algorithm checks loosely
|
||||
* for sequential patterns. Hence interleaved reads might be served as
|
||||
@@ -351,11 +351,9 @@ ondemand_readahead(struct address_space *mapping,
|
||||
bool hit_readahead_marker, pgoff_t offset,
|
||||
unsigned long req_size)
|
||||
{
|
||||
int max; /* max readahead pages */
|
||||
int sequential;
|
||||
|
||||
max = ra->ra_pages;
|
||||
sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
|
||||
int max = ra->ra_pages; /* max readahead pages */
|
||||
pgoff_t prev_offset;
|
||||
int sequential;
|
||||
|
||||
/*
|
||||
* It's the expected callback offset, assume sequential access.
|
||||
@@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping,
|
||||
goto readit;
|
||||
}
|
||||
|
||||
prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
|
||||
sequential = offset - prev_offset <= 1UL || req_size > max;
|
||||
|
||||
/*
|
||||
* Standalone, small read.
|
||||
* Read as is, and do not pollute the readahead state.
|
||||
|
Referens i nytt ärende
Block a user