mm/filemap: fold ra_submit into do_sync_mmap_readahead
Fold ra_submit() into its last remaining user and pass the readahead_control struct to both do_page_cache_ra() and page_cache_sync_ra(). Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Eric Biggers <ebiggers@google.com> Link: https://lkml.kernel.org/r/20200903140844.14194-9-willy@infradead.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fefa7c478f
commit
db660d4625
10
mm/filemap.c
10
mm/filemap.c
|
@ -2588,8 +2588,8 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||||
struct file *file = vmf->vma->vm_file;
|
struct file *file = vmf->vma->vm_file;
|
||||||
struct file_ra_state *ra = &file->f_ra;
|
struct file_ra_state *ra = &file->f_ra;
|
||||||
struct address_space *mapping = file->f_mapping;
|
struct address_space *mapping = file->f_mapping;
|
||||||
|
DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff);
|
||||||
struct file *fpin = NULL;
|
struct file *fpin = NULL;
|
||||||
pgoff_t offset = vmf->pgoff;
|
|
||||||
unsigned int mmap_miss;
|
unsigned int mmap_miss;
|
||||||
|
|
||||||
/* If we don't want any read-ahead, don't bother */
|
/* If we don't want any read-ahead, don't bother */
|
||||||
|
@ -2600,8 +2600,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||||
|
|
||||||
if (vmf->vma->vm_flags & VM_SEQ_READ) {
|
if (vmf->vma->vm_flags & VM_SEQ_READ) {
|
||||||
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
||||||
page_cache_sync_readahead(mapping, ra, file, offset,
|
page_cache_sync_ra(&ractl, ra, ra->ra_pages);
|
||||||
ra->ra_pages);
|
|
||||||
return fpin;
|
return fpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2621,10 +2620,11 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||||
* mmap read-around
|
* mmap read-around
|
||||||
*/
|
*/
|
||||||
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
||||||
ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
|
ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
|
||||||
ra->size = ra->ra_pages;
|
ra->size = ra->ra_pages;
|
||||||
ra->async_size = ra->ra_pages / 4;
|
ra->async_size = ra->ra_pages / 4;
|
||||||
ra_submit(ra, mapping, file);
|
ractl._index = ra->start;
|
||||||
|
do_page_cache_ra(&ractl, ra->size, ra->async_size);
|
||||||
return fpin;
|
return fpin;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -59,16 +59,6 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
|
||||||
force_page_cache_ra(&ractl, nr_to_read);
|
force_page_cache_ra(&ractl, nr_to_read);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Submit IO for the read-ahead request in file_ra_state.
|
|
||||||
*/
|
|
||||||
static inline void ra_submit(struct file_ra_state *ra,
|
|
||||||
struct address_space *mapping, struct file *file)
|
|
||||||
{
|
|
||||||
DEFINE_READAHEAD(ractl, file, mapping, ra->start);
|
|
||||||
do_page_cache_ra(&ractl, ra->size, ra->async_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct page *find_get_entry(struct address_space *mapping, pgoff_t index);
|
struct page *find_get_entry(struct address_space *mapping, pgoff_t index);
|
||||||
struct page *find_lock_entry(struct address_space *mapping, pgoff_t index);
|
struct page *find_lock_entry(struct address_space *mapping, pgoff_t index);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue