Merge branch 'for-linus-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes from Chris Mason: "A couple of small fixes" * 'for-linus-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: Btrfs: check prepare_uptodate_page() error code earlier Btrfs: check for empty bitmap list in setup_cluster_bitmaps btrfs: fix misleading warning when space cache failed to load Btrfs: fix transaction handle leak in balance Btrfs: fix unprotected list move from unused_bgs to deleted_bgs list
This commit is contained in:
commit
fc315e3e5c
|
@ -10480,11 +10480,15 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
|
|||
* until transaction commit to do the actual discard.
|
||||
*/
|
||||
if (trimming) {
|
||||
WARN_ON(!list_empty(&block_group->bg_list));
|
||||
spin_lock(&trans->transaction->deleted_bgs_lock);
|
||||
spin_lock(&fs_info->unused_bgs_lock);
|
||||
/*
|
||||
* A concurrent scrub might have added us to the list
|
||||
* fs_info->unused_bgs, so use a list_move operation
|
||||
* to add the block group to the deleted_bgs list.
|
||||
*/
|
||||
list_move(&block_group->bg_list,
|
||||
&trans->transaction->deleted_bgs);
|
||||
spin_unlock(&trans->transaction->deleted_bgs_lock);
|
||||
spin_unlock(&fs_info->unused_bgs_lock);
|
||||
btrfs_get_block_group(block_group);
|
||||
}
|
||||
end_trans:
|
||||
|
|
|
@ -1291,7 +1291,8 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
|
|||
* on error we return an unlocked page and the error value
|
||||
* on success we return a locked page and 0
|
||||
*/
|
||||
static int prepare_uptodate_page(struct page *page, u64 pos,
|
||||
static int prepare_uptodate_page(struct inode *inode,
|
||||
struct page *page, u64 pos,
|
||||
bool force_uptodate)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -1306,6 +1307,10 @@ static int prepare_uptodate_page(struct page *page, u64 pos,
|
|||
unlock_page(page);
|
||||
return -EIO;
|
||||
}
|
||||
if (page->mapping != inode->i_mapping) {
|
||||
unlock_page(page);
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1324,6 +1329,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
|
|||
int faili;
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
again:
|
||||
pages[i] = find_or_create_page(inode->i_mapping, index + i,
|
||||
mask | __GFP_WRITE);
|
||||
if (!pages[i]) {
|
||||
|
@ -1333,13 +1339,17 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
|
|||
}
|
||||
|
||||
if (i == 0)
|
||||
err = prepare_uptodate_page(pages[i], pos,
|
||||
err = prepare_uptodate_page(inode, pages[i], pos,
|
||||
force_uptodate);
|
||||
if (i == num_pages - 1)
|
||||
err = prepare_uptodate_page(pages[i],
|
||||
if (!err && i == num_pages - 1)
|
||||
err = prepare_uptodate_page(inode, pages[i],
|
||||
pos + write_bytes, false);
|
||||
if (err) {
|
||||
page_cache_release(pages[i]);
|
||||
if (err == -EAGAIN) {
|
||||
err = 0;
|
||||
goto again;
|
||||
}
|
||||
faili = i - 1;
|
||||
goto fail;
|
||||
}
|
||||
|
|
|
@ -891,7 +891,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
|
|||
spin_unlock(&block_group->lock);
|
||||
ret = 0;
|
||||
|
||||
btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
|
||||
btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now",
|
||||
block_group->key.objectid);
|
||||
}
|
||||
|
||||
|
@ -2972,7 +2972,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
|
|||
u64 cont1_bytes, u64 min_bytes)
|
||||
{
|
||||
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
|
||||
struct btrfs_free_space *entry;
|
||||
struct btrfs_free_space *entry = NULL;
|
||||
int ret = -ENOSPC;
|
||||
u64 bitmap_offset = offset_to_bitmap(ctl, offset);
|
||||
|
||||
|
@ -2983,8 +2983,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
|
|||
* The bitmap that covers offset won't be in the list unless offset
|
||||
* is just its start offset.
|
||||
*/
|
||||
entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
|
||||
if (entry->offset != bitmap_offset) {
|
||||
if (!list_empty(bitmaps))
|
||||
entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
|
||||
|
||||
if (!entry || entry->offset != bitmap_offset) {
|
||||
entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
|
||||
if (entry && list_empty(&entry->list))
|
||||
list_add(&entry->list, bitmaps);
|
||||
|
|
|
@ -274,7 +274,6 @@ static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
|
|||
cur_trans->num_dirty_bgs = 0;
|
||||
spin_lock_init(&cur_trans->dirty_bgs_lock);
|
||||
INIT_LIST_HEAD(&cur_trans->deleted_bgs);
|
||||
spin_lock_init(&cur_trans->deleted_bgs_lock);
|
||||
spin_lock_init(&cur_trans->dropped_roots_lock);
|
||||
list_add_tail(&cur_trans->list, &fs_info->trans_list);
|
||||
extent_io_tree_init(&cur_trans->dirty_pages,
|
||||
|
|
|
@ -77,8 +77,8 @@ struct btrfs_transaction {
|
|||
*/
|
||||
struct mutex cache_write_mutex;
|
||||
spinlock_t dirty_bgs_lock;
|
||||
/* Protected by spin lock fs_info->unused_bgs_lock. */
|
||||
struct list_head deleted_bgs;
|
||||
spinlock_t deleted_bgs_lock;
|
||||
spinlock_t dropped_roots_lock;
|
||||
struct btrfs_delayed_ref_root delayed_refs;
|
||||
int aborted;
|
||||
|
|
|
@ -3548,12 +3548,11 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
|
|||
|
||||
ret = btrfs_force_chunk_alloc(trans, chunk_root,
|
||||
BTRFS_BLOCK_GROUP_DATA);
|
||||
btrfs_end_transaction(trans, chunk_root);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
|
||||
goto error;
|
||||
}
|
||||
|
||||
btrfs_end_transaction(trans, chunk_root);
|
||||
chunk_reserved = 1;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue