f2fs: expose f2fs_mpage_readpages
This patch implements f2fs_mpage_readpages for further optimization on encryption support. The basic code was taken from fs/mpage.c, and changed to be simple by adjusting that block_size is equal to page_size in f2fs. Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
26d815ad75
commit
f1e8866016
157
fs/f2fs/data.c
157
fs/f2fs/data.c
|
@ -18,6 +18,7 @@
|
|||
#include <linux/bio.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/cleancache.h>
|
||||
|
||||
#include "f2fs.h"
|
||||
#include "node.h"
|
||||
|
@ -47,6 +48,30 @@ static void f2fs_read_end_io(struct bio *bio, int err)
|
|||
bio_put(bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* I/O completion handler for multipage BIOs.
|
||||
* copied from fs/mpage.c
|
||||
*/
|
||||
static void mpage_end_io(struct bio *bio, int err)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
int i;
|
||||
|
||||
bio_for_each_segment_all(bv, bio, i) {
|
||||
struct page *page = bv->bv_page;
|
||||
|
||||
if (!err) {
|
||||
SetPageUptodate(page);
|
||||
} else {
|
||||
ClearPageUptodate(page);
|
||||
SetPageError(page);
|
||||
}
|
||||
unlock_page(page);
|
||||
}
|
||||
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static void f2fs_write_end_io(struct bio *bio, int err)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = bio->bi_private;
|
||||
|
@ -1349,6 +1374,133 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||
start, len, get_data_block_fiemap);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function was originally taken from fs/mpage.c, and customized for f2fs.
|
||||
* Major change was from block_size == page_size in f2fs by default.
|
||||
*/
|
||||
static int f2fs_mpage_readpages(struct address_space *mapping,
|
||||
struct list_head *pages, struct page *page,
|
||||
unsigned nr_pages)
|
||||
{
|
||||
struct bio *bio = NULL;
|
||||
unsigned page_idx;
|
||||
sector_t last_block_in_bio = 0;
|
||||
struct inode *inode = mapping->host;
|
||||
const unsigned blkbits = inode->i_blkbits;
|
||||
const unsigned blocksize = 1 << blkbits;
|
||||
sector_t block_in_file;
|
||||
sector_t last_block;
|
||||
sector_t last_block_in_file;
|
||||
sector_t block_nr;
|
||||
struct block_device *bdev = inode->i_sb->s_bdev;
|
||||
struct f2fs_map_blocks map;
|
||||
|
||||
map.m_pblk = 0;
|
||||
map.m_lblk = 0;
|
||||
map.m_len = 0;
|
||||
map.m_flags = 0;
|
||||
|
||||
for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
|
||||
|
||||
prefetchw(&page->flags);
|
||||
if (pages) {
|
||||
page = list_entry(pages->prev, struct page, lru);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping,
|
||||
page->index, GFP_KERNEL))
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
block_in_file = (sector_t)page->index;
|
||||
last_block = block_in_file + nr_pages;
|
||||
last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
|
||||
blkbits;
|
||||
if (last_block > last_block_in_file)
|
||||
last_block = last_block_in_file;
|
||||
|
||||
/*
|
||||
* Map blocks using the previous result first.
|
||||
*/
|
||||
if ((map.m_flags & F2FS_MAP_MAPPED) &&
|
||||
block_in_file > map.m_lblk &&
|
||||
block_in_file < (map.m_lblk + map.m_len))
|
||||
goto got_it;
|
||||
|
||||
/*
|
||||
* Then do more f2fs_map_blocks() calls until we are
|
||||
* done with this page.
|
||||
*/
|
||||
map.m_flags = 0;
|
||||
|
||||
if (block_in_file < last_block) {
|
||||
map.m_lblk = block_in_file;
|
||||
map.m_len = last_block - block_in_file;
|
||||
|
||||
if (f2fs_map_blocks(inode, &map, 0, false))
|
||||
goto set_error_page;
|
||||
}
|
||||
got_it:
|
||||
if ((map.m_flags & F2FS_MAP_MAPPED)) {
|
||||
block_nr = map.m_pblk + block_in_file - map.m_lblk;
|
||||
SetPageMappedToDisk(page);
|
||||
|
||||
if (!PageUptodate(page) && !cleancache_get_page(page)) {
|
||||
SetPageUptodate(page);
|
||||
goto confused;
|
||||
}
|
||||
} else {
|
||||
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
/*
|
||||
* This page will go to BIO. Do we need to send this
|
||||
* BIO off first?
|
||||
*/
|
||||
if (bio && (last_block_in_bio != block_nr - 1)) {
|
||||
submit_and_realloc:
|
||||
submit_bio(READ, bio);
|
||||
bio = NULL;
|
||||
}
|
||||
if (bio == NULL) {
|
||||
bio = bio_alloc(GFP_KERNEL,
|
||||
min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
|
||||
if (!bio)
|
||||
goto set_error_page;
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
|
||||
bio->bi_end_io = mpage_end_io;
|
||||
bio->bi_private = NULL;
|
||||
}
|
||||
|
||||
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
|
||||
goto submit_and_realloc;
|
||||
|
||||
last_block_in_bio = block_nr;
|
||||
goto next_page;
|
||||
set_error_page:
|
||||
SetPageError(page);
|
||||
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
|
||||
unlock_page(page);
|
||||
goto next_page;
|
||||
confused:
|
||||
if (bio) {
|
||||
submit_bio(READ, bio);
|
||||
bio = NULL;
|
||||
}
|
||||
unlock_page(page);
|
||||
next_page:
|
||||
if (pages)
|
||||
page_cache_release(page);
|
||||
}
|
||||
BUG_ON(pages && !list_empty(pages));
|
||||
if (bio)
|
||||
submit_bio(READ, bio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int f2fs_read_data_page(struct file *file, struct page *page)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
|
@ -1360,8 +1512,7 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
|
|||
if (f2fs_has_inline_data(inode))
|
||||
ret = f2fs_read_inline_data(inode, page);
|
||||
if (ret == -EAGAIN)
|
||||
ret = mpage_readpage(page, get_data_block);
|
||||
|
||||
ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1375,7 +1526,7 @@ static int f2fs_read_data_pages(struct file *file,
|
|||
if (f2fs_has_inline_data(inode))
|
||||
return 0;
|
||||
|
||||
return mpage_readpages(mapping, pages, nr_pages, get_data_block);
|
||||
return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
|
||||
}
|
||||
|
||||
int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
|
||||
|
|
Loading…
Reference in New Issue