mm/readahead: make do_page_cache_ra take a readahead_control

Rename __do_page_cache_readahead() to do_page_cache_ra() and call it
directly from ondemand_readahead() instead of indirecting via ra_submit().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Link: https://lkml.kernel.org/r/20200903140844.14194-5-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2020-10-15 20:06:17 -07:00 committed by Linus Torvalds
parent 73bb49da50
commit 8238287ead
2 changed files with 20 additions and 19 deletions

View File

@ -51,18 +51,17 @@ void unmap_page_range(struct mmu_gather *tlb,
void force_page_cache_readahead(struct address_space *, struct file *,
pgoff_t index, unsigned long nr_to_read);
void __do_page_cache_readahead(struct address_space *, struct file *,
pgoff_t index, unsigned long nr_to_read,
unsigned long lookahead_size);
void do_page_cache_ra(struct readahead_control *,
unsigned long nr_to_read, unsigned long lookahead_size);
/*
* Submit IO for the read-ahead request in file_ra_state.
*/
static inline void ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *filp)
struct address_space *mapping, struct file *file)
{
__do_page_cache_readahead(mapping, filp,
ra->start, ra->size, ra->async_size);
DEFINE_READAHEAD(ractl, file, mapping, ra->start);
do_page_cache_ra(&ractl, ra->size, ra->async_size);
}
struct page *find_get_entry(struct address_space *mapping, pgoff_t index);

View File

@ -241,17 +241,16 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
/*
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates
* do_page_cache_ra() actually reads a chunk of disk. It allocates
* the pages first, then submits them for I/O. This avoids the very bad
* behaviour which would occur if page allocations are causing VM writeback.
* We really don't want to intermingle reads and writes like that.
*/
void __do_page_cache_readahead(struct address_space *mapping,
struct file *file, pgoff_t index, unsigned long nr_to_read,
unsigned long lookahead_size)
void do_page_cache_ra(struct readahead_control *ractl,
unsigned long nr_to_read, unsigned long lookahead_size)
{
DEFINE_READAHEAD(ractl, file, mapping, index);
struct inode *inode = mapping->host;
struct inode *inode = ractl->mapping->host;
unsigned long index = readahead_index(ractl);
loff_t isize = i_size_read(inode);
pgoff_t end_index; /* The last page we want to read */
@ -265,7 +264,7 @@ void __do_page_cache_readahead(struct address_space *mapping,
if (nr_to_read > end_index - index)
nr_to_read = end_index - index + 1;
page_cache_ra_unbounded(&ractl, nr_to_read, lookahead_size);
page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
}
/*
@ -273,10 +272,11 @@ void __do_page_cache_readahead(struct address_space *mapping,
* memory at once.
*/
void force_page_cache_readahead(struct address_space *mapping,
struct file *filp, pgoff_t index, unsigned long nr_to_read)
struct file *file, pgoff_t index, unsigned long nr_to_read)
{
DEFINE_READAHEAD(ractl, file, mapping, index);
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
struct file_ra_state *ra = &filp->f_ra;
struct file_ra_state *ra = &file->f_ra;
unsigned long max_pages;
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
@ -294,7 +294,7 @@ void force_page_cache_readahead(struct address_space *mapping,
if (this_chunk > nr_to_read)
this_chunk = nr_to_read;
__do_page_cache_readahead(mapping, filp, index, this_chunk, 0);
do_page_cache_ra(&ractl, this_chunk, 0);
index += this_chunk;
nr_to_read -= this_chunk;
@ -432,10 +432,11 @@ static int try_context_readahead(struct address_space *mapping,
* A minimal readahead algorithm for trivial sequential/random reads.
*/
static void ondemand_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *filp,
struct file_ra_state *ra, struct file *file,
bool hit_readahead_marker, pgoff_t index,
unsigned long req_size)
{
DEFINE_READAHEAD(ractl, file, mapping, index);
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages = ra->ra_pages;
unsigned long add_pages;
@ -516,7 +517,7 @@ static void ondemand_readahead(struct address_space *mapping,
* standalone, small random read
* Read as is, and do not pollute the readahead state.
*/
__do_page_cache_readahead(mapping, filp, index, req_size, 0);
do_page_cache_ra(&ractl, req_size, 0);
return;
initial_readahead:
@ -542,7 +543,8 @@ static void ondemand_readahead(struct address_space *mapping,
}
}
ra_submit(ra, mapping, filp);
ractl._index = ra->start;
do_page_cache_ra(&ractl, ra->size, ra->async_size);
}
/**