readahead: record mmap read-around states in file_ra_state

Mmap read-around now shares the same code style and data structure with
readahead code.

This also removes do_page_cache_readahead().  Its last user, mmap
read-around, has been changed to call ra_submit().

The no-readahead-if-congested logic is dumped by the way.  Users will be
pretty sensitive about the slow loading of executables.  So it's
unfavorable to disabled mmap read-around on a congested queue.

[akpm@linux-foundation.org: coding-style fixes]
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Wu Fengguang 2009-06-16 15:31:30 -07:00 committed by Linus Torvalds
parent 2fad6f5dee
commit d30a11004e
3 changed files with 12 additions and 28 deletions

View File

@ -1178,8 +1178,6 @@ void task_dirty_inc(struct task_struct *tsk);
#define VM_MAX_READAHEAD 128 /* kbytes */ #define VM_MAX_READAHEAD 128 /* kbytes */
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read);
int force_page_cache_readahead(struct address_space *mapping, struct file *filp, int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read); pgoff_t offset, unsigned long nr_to_read);
@ -1197,6 +1195,9 @@ void page_cache_async_readahead(struct address_space *mapping,
unsigned long size); unsigned long size);
unsigned long max_sane_readahead(unsigned long nr); unsigned long max_sane_readahead(unsigned long nr);
unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping,
struct file *filp);
/* Do stack extension */ /* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address); extern int expand_stack(struct vm_area_struct *vma, unsigned long address);

View File

@ -1488,13 +1488,15 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
if (ra->mmap_miss > MMAP_LOTSAMISS) if (ra->mmap_miss > MMAP_LOTSAMISS)
return; return;
/*
* mmap read-around
*/
ra_pages = max_sane_readahead(ra->ra_pages); ra_pages = max_sane_readahead(ra->ra_pages);
if (ra_pages) { if (ra_pages) {
pgoff_t start = 0; ra->start = max_t(long, 0, offset - ra_pages/2);
ra->size = ra_pages;
if (offset > ra_pages / 2) ra->async_size = 0;
start = offset - ra_pages / 2; ra_submit(ra, mapping, file);
do_page_cache_readahead(mapping, file, start, ra_pages);
} }
} }

View File

@ -133,15 +133,12 @@ static int read_pages(struct address_space *mapping, struct file *filp,
} }
/* /*
* do_page_cache_readahead actually reads a chunk of disk. It allocates all * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
* the pages first, then submits them all for I/O. This avoids the very bad * the pages first, then submits them all for I/O. This avoids the very bad
* behaviour which would occur if page allocations are causing VM writeback. * behaviour which would occur if page allocations are causing VM writeback.
* We really don't want to intermingle reads and writes like that. * We really don't want to intermingle reads and writes like that.
* *
* Returns the number of pages requested, or the maximum amount of I/O allowed. * Returns the number of pages requested, or the maximum amount of I/O allowed.
*
* do_page_cache_readahead() returns -1 if it encountered request queue
* congestion.
*/ */
static int static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp, __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
@ -231,22 +228,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
return ret; return ret;
} }
/*
* This version skips the IO if the queue is read-congested, and will tell the
* block layer to abandon the readahead if request allocation would block.
*
* force_page_cache_readahead() will ignore queue congestion and will block on
* request queues.
*/
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read)
{
if (bdi_read_congested(mapping->backing_dev_info))
return -1;
return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
}
/* /*
* Given a desired number of PAGE_CACHE_SIZE readahead pages, return a * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
* sensible upper limit. * sensible upper limit.
@ -260,7 +241,7 @@ unsigned long max_sane_readahead(unsigned long nr)
/* /*
* Submit IO for the read-ahead request in file_ra_state. * Submit IO for the read-ahead request in file_ra_state.
*/ */
static unsigned long ra_submit(struct file_ra_state *ra, unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *filp) struct address_space *mapping, struct file *filp)
{ {
int actual; int actual;