fs/mpage.c: factor clean_buffers() out of __mpage_writepage()
__mpage_writepage() is over 200 lines long, has 20 local variables, four goto labels and could desperately use simplification. Splitting clean_buffers() into a helper function improves matters a little, removing 20+ lines from it. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Dheeraj Reddy <dheeraj.reddy@intel.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1b938c0827
commit
90768eee45
54
fs/mpage.c
54
fs/mpage.c
|
@ -439,6 +439,35 @@ struct mpage_data {
|
|||
unsigned use_writepage;
|
||||
};
|
||||
|
||||
/*
|
||||
* We have our BIO, so we can now mark the buffers clean. Make
|
||||
* sure to only clean buffers which we know we'll be writing.
|
||||
*/
|
||||
static void clean_buffers(struct page *page, unsigned first_unmapped)
|
||||
{
|
||||
unsigned buffer_counter = 0;
|
||||
struct buffer_head *bh, *head;
|
||||
if (!page_has_buffers(page))
|
||||
return;
|
||||
head = page_buffers(page);
|
||||
bh = head;
|
||||
|
||||
do {
|
||||
if (buffer_counter++ == first_unmapped)
|
||||
break;
|
||||
clear_buffer_dirty(bh);
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
|
||||
/*
|
||||
* we cannot drop the bh if the page is not uptodate or a concurrent
|
||||
* readpage would fail to serialize with the bh and it would read from
|
||||
* disk before we reach the platter.
|
||||
*/
|
||||
if (buffer_heads_over_limit && PageUptodate(page))
|
||||
try_to_free_buffers(page);
|
||||
}
|
||||
|
||||
static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
|
||||
void *data)
|
||||
{
|
||||
|
@ -591,30 +620,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
|
|||
goto alloc_new;
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, we have our BIO, so we can now mark the buffers clean. Make
|
||||
* sure to only clean buffers which we know we'll be writing.
|
||||
*/
|
||||
if (page_has_buffers(page)) {
|
||||
struct buffer_head *head = page_buffers(page);
|
||||
struct buffer_head *bh = head;
|
||||
unsigned buffer_counter = 0;
|
||||
|
||||
do {
|
||||
if (buffer_counter++ == first_unmapped)
|
||||
break;
|
||||
clear_buffer_dirty(bh);
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
|
||||
/*
|
||||
* we cannot drop the bh if the page is not uptodate
|
||||
* or a concurrent readpage would fail to serialize with the bh
|
||||
* and it would read from disk before we reach the platter.
|
||||
*/
|
||||
if (buffer_heads_over_limit && PageUptodate(page))
|
||||
try_to_free_buffers(page);
|
||||
}
|
||||
clean_buffers(page, first_unmapped);
|
||||
|
||||
BUG_ON(PageWriteback(page));
|
||||
set_page_writeback(page);
|
||||
|
|
Loading…
Reference in New Issue