Btrfs: remove bio_flags which indicates a meta block of log-tree
Since both committing transaction and writing log-tree are doing plugging on metadata IO, we can unify to use %sync_writers to benefit both cases, instead of checking bio_flags while writing meta blocks of log-tree. We can remove this bio_flags because in order to write dirty blocks, log tree also uses btrfs_write_marked_extents(), inside which we have enabled %sync_writers, therefore, every write goes in a synchronous way, so does checksuming. Please also note that, bio_flags is applied per-context while %sync_writers is applied per-inode, so this might incur some overhead, ie. 1) while log tree is flushing its dirty blocks via btrfs_write_marked_extents(), in which %sync_writers is increased by one. 2) in the meantime, some writeback operations may happen upon btrfs's metadata inode, so these writes go synchronously, too. However, AFAICS, the overhead is not a big one while the win is that we unify the two places that needs synchronous way and remove a special hack/flag. This removes the bio_flags related stuff for writing log-tree. Signed-off-by: Liu Bo <bo.li.liu@oracle.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
6300463b14
commit
18fdc67900
|
@ -1005,12 +1005,10 @@ static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int check_async_write(struct btrfs_inode *bi, unsigned long bio_flags)
|
||||
static int check_async_write(struct btrfs_inode *bi)
|
||||
{
|
||||
if (atomic_read(&bi->sync_writers))
|
||||
return 0;
|
||||
if (bio_flags & EXTENT_BIO_TREE_LOG)
|
||||
return 0;
|
||||
#ifdef CONFIG_X86
|
||||
if (static_cpu_has(X86_FEATURE_XMM4_2))
|
||||
return 0;
|
||||
|
@ -1024,7 +1022,7 @@ static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
|
|||
{
|
||||
struct inode *inode = private_data;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
int async = check_async_write(BTRFS_I(inode), bio_flags);
|
||||
int async = check_async_write(BTRFS_I(inode));
|
||||
blk_status_t ret;
|
||||
|
||||
if (bio_op(bio) != REQ_OP_WRITE) {
|
||||
|
|
|
@ -109,7 +109,6 @@ struct extent_page_data {
|
|||
struct bio *bio;
|
||||
struct extent_io_tree *tree;
|
||||
get_extent_t *get_extent;
|
||||
unsigned long bio_flags;
|
||||
|
||||
/* tells writepage not to lock the state bits for this range
|
||||
* it still does the unlocking
|
||||
|
@ -3715,7 +3714,6 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
|||
u64 offset = eb->start;
|
||||
u32 nritems;
|
||||
unsigned long i, num_pages;
|
||||
unsigned long bio_flags = 0;
|
||||
unsigned long start, end;
|
||||
unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
|
||||
int ret = 0;
|
||||
|
@ -3723,8 +3721,6 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
|||
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
|
||||
num_pages = num_extent_pages(eb->start, eb->len);
|
||||
atomic_set(&eb->io_pages, num_pages);
|
||||
if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
|
||||
bio_flags = EXTENT_BIO_TREE_LOG;
|
||||
|
||||
/* set btree blocks beyond nritems with 0 to avoid stale content. */
|
||||
nritems = btrfs_header_nritems(eb);
|
||||
|
@ -3751,8 +3747,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
|
|||
p, offset >> 9, PAGE_SIZE, 0, bdev,
|
||||
&epd->bio,
|
||||
end_bio_extent_buffer_writepage,
|
||||
0, epd->bio_flags, bio_flags, false);
|
||||
epd->bio_flags = bio_flags;
|
||||
0, 0, 0, false);
|
||||
if (ret) {
|
||||
set_btree_ioerr(p);
|
||||
if (PageWriteback(p))
|
||||
|
@ -3789,7 +3784,6 @@ int btree_write_cache_pages(struct address_space *mapping,
|
|||
.tree = tree,
|
||||
.extent_locked = 0,
|
||||
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
|
||||
.bio_flags = 0,
|
||||
};
|
||||
int ret = 0;
|
||||
int done = 0;
|
||||
|
@ -4062,7 +4056,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
|
|||
if (epd->bio) {
|
||||
int ret;
|
||||
|
||||
ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
|
||||
ret = submit_one_bio(epd->bio, 0, 0);
|
||||
BUG_ON(ret < 0); /* -ENOMEM */
|
||||
epd->bio = NULL;
|
||||
}
|
||||
|
@ -4085,7 +4079,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
|
|||
.get_extent = get_extent,
|
||||
.extent_locked = 0,
|
||||
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
|
||||
.bio_flags = 0,
|
||||
};
|
||||
|
||||
ret = __extent_writepage(page, wbc, &epd);
|
||||
|
@ -4110,7 +4103,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
|
|||
.get_extent = get_extent,
|
||||
.extent_locked = 1,
|
||||
.sync_io = mode == WB_SYNC_ALL,
|
||||
.bio_flags = 0,
|
||||
};
|
||||
struct writeback_control wbc_writepages = {
|
||||
.sync_mode = mode,
|
||||
|
@ -4150,7 +4142,6 @@ int extent_writepages(struct extent_io_tree *tree,
|
|||
.get_extent = get_extent,
|
||||
.extent_locked = 0,
|
||||
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
|
||||
.bio_flags = 0,
|
||||
};
|
||||
|
||||
ret = extent_write_cache_pages(mapping, wbc, __extent_writepage, &epd,
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
* type for this bio
|
||||
*/
|
||||
#define EXTENT_BIO_COMPRESSED 1
|
||||
#define EXTENT_BIO_TREE_LOG 2
|
||||
#define EXTENT_BIO_FLAG_SHIFT 16
|
||||
|
||||
/* these are bit numbers for test/set bit */
|
||||
|
|
Loading…
Reference in New Issue