From 9cba40a693e69badb567d6ce0eaa0150f25c3d39 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 28 Jun 2019 23:11:26 +0100 Subject: [PATCH 001/138] Btrfs: factor out extent dropping code from hole punch handler Move the code that is responsible for dropping extents in a range out of btrfs_punch_hole() into a new helper function, btrfs_punch_hole_range(), so that later it can be used by the reflinking (extent cloning and dedup) code to fix a ENOSPC bug. Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/file.c | 308 ++++++++++++++++++++++++++---------------------- 1 file changed, 166 insertions(+), 142 deletions(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 58a18ed11546..16dc09736310 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2439,27 +2439,171 @@ static int btrfs_punch_hole_lock_range(struct inode *inode, return 0; } +/* + * The respective range must have been previously locked, as well as the inode. + * The end offset is inclusive (last byte of the range). + */ +static int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, + const u64 start, const u64 end, + struct btrfs_trans_handle **trans_out) +{ + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1); + u64 ino_size = round_up(inode->i_size, fs_info->sectorsize); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans = NULL; + struct btrfs_block_rsv *rsv; + unsigned int rsv_count; + u64 cur_offset; + u64 drop_end; + u64 len = end - start; + int ret = 0; + + if (end <= start) + return -EINVAL; + + rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); + if (!rsv) { + ret = -ENOMEM; + goto out; + } + rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1); + rsv->failfast = 1; + + /* + * 1 - update the inode + * 1 - removing the extents in the range + * 1 - adding the hole extent if no_holes isn't set + */ + rsv_count = btrfs_fs_incompat(fs_info, NO_HOLES) ? 2 : 3; + trans = btrfs_start_transaction(root, rsv_count); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + trans = NULL; + goto out_free; + } + + ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, + min_size, false); + BUG_ON(ret); + trans->block_rsv = rsv; + + cur_offset = start; + while (cur_offset < end) { + ret = __btrfs_drop_extents(trans, root, inode, path, + cur_offset, end + 1, &drop_end, + 1, 0, 0, NULL); + if (ret != -ENOSPC) + break; + + trans->block_rsv = &fs_info->trans_block_rsv; + + if (cur_offset < drop_end && cur_offset < ino_size) { + ret = fill_holes(trans, BTRFS_I(inode), path, + cur_offset, drop_end); + if (ret) { + /* + * If we failed then we didn't insert our hole + * entries for the area we dropped, so now the + * fs is corrupted, so we must abort the + * transaction. + */ + btrfs_abort_transaction(trans, ret); + break; + } + } + + cur_offset = drop_end; + + ret = btrfs_update_inode(trans, root, inode); + if (ret) + break; + + btrfs_end_transaction(trans); + btrfs_btree_balance_dirty(fs_info); + + trans = btrfs_start_transaction(root, rsv_count); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + trans = NULL; + break; + } + + ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, + rsv, min_size, false); + BUG_ON(ret); /* shouldn't happen */ + trans->block_rsv = rsv; + + ret = find_first_non_hole(inode, &cur_offset, &len); + if (unlikely(ret < 0)) + break; + if (ret && !len) { + ret = 0; + break; + } + } + + if (ret) + goto out_trans; + + trans->block_rsv = &fs_info->trans_block_rsv; + /* + * If we are using the NO_HOLES feature we might have had already an + * hole that overlaps a part of the region [lockstart, lockend] and + * ends at (or beyond) lockend. Since we have no file extent items to + * represent holes, drop_end can be less than lockend and so we must + * make sure we have an extent map representing the existing hole (the + * call to __btrfs_drop_extents() might have dropped the existing extent + * map representing the existing hole), otherwise the fast fsync path + * will not record the existence of the hole region + * [existing_hole_start, lockend]. + */ + if (drop_end <= end) + drop_end = end + 1; + /* + * Don't insert file hole extent item if it's for a range beyond eof + * (because it's useless) or if it represents a 0 bytes range (when + * cur_offset == drop_end). + */ + if (cur_offset < ino_size && cur_offset < drop_end) { + ret = fill_holes(trans, BTRFS_I(inode), path, + cur_offset, drop_end); + if (ret) { + /* Same comment as above. */ + btrfs_abort_transaction(trans, ret); + goto out_trans; + } + } + +out_trans: + if (!trans) + goto out_free; + + trans->block_rsv = &fs_info->trans_block_rsv; + if (ret) + btrfs_end_transaction(trans); + else + *trans_out = trans; +out_free: + btrfs_free_block_rsv(fs_info, rsv); +out: + return ret; +} + static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_state *cached_state = NULL; struct btrfs_path *path; - struct btrfs_block_rsv *rsv; - struct btrfs_trans_handle *trans; + struct btrfs_trans_handle *trans = NULL; u64 lockstart; u64 lockend; u64 tail_start; u64 tail_len; u64 orig_start = offset; - u64 cur_offset; - u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1); - u64 drop_end; int ret = 0; - int err = 0; - unsigned int rsv_count; bool same_block; - bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES); u64 ino_size; bool truncated_block = false; bool updated_inode = false; @@ -2566,145 +2710,23 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) goto out; } - rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); - if (!rsv) { - ret = -ENOMEM; - goto out_free; - } - rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1); - rsv->failfast = 1; - - /* - * 1 - update the inode - * 1 - removing the extents in the range - * 1 - adding the hole extent if no_holes isn't set - */ - rsv_count = no_holes ? 2 : 3; - trans = btrfs_start_transaction(root, rsv_count); - if (IS_ERR(trans)) { - err = PTR_ERR(trans); - goto out_free; - } - - ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, - min_size, false); - BUG_ON(ret); - trans->block_rsv = rsv; - - cur_offset = lockstart; - len = lockend - cur_offset; - while (cur_offset < lockend) { - ret = __btrfs_drop_extents(trans, root, inode, path, - cur_offset, lockend + 1, - &drop_end, 1, 0, 0, NULL); - if (ret != -ENOSPC) - break; - - trans->block_rsv = &fs_info->trans_block_rsv; - - if (cur_offset < drop_end && cur_offset < ino_size) { - ret = fill_holes(trans, BTRFS_I(inode), path, - cur_offset, drop_end); - if (ret) { - /* - * If we failed then we didn't insert our hole - * entries for the area we dropped, so now the - * fs is corrupted, so we must abort the - * transaction. - */ - btrfs_abort_transaction(trans, ret); - err = ret; - break; - } - } - - cur_offset = drop_end; - - ret = btrfs_update_inode(trans, root, inode); - if (ret) { - err = ret; - break; - } - - btrfs_end_transaction(trans); - btrfs_btree_balance_dirty(fs_info); - - trans = btrfs_start_transaction(root, rsv_count); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - trans = NULL; - break; - } - - ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, - rsv, min_size, false); - BUG_ON(ret); /* shouldn't happen */ - trans->block_rsv = rsv; - - ret = find_first_non_hole(inode, &cur_offset, &len); - if (unlikely(ret < 0)) - break; - if (ret && !len) { - ret = 0; - break; - } - } - - if (ret) { - err = ret; - goto out_trans; - } - - trans->block_rsv = &fs_info->trans_block_rsv; - /* - * If we are using the NO_HOLES feature we might have had already an - * hole that overlaps a part of the region [lockstart, lockend] and - * ends at (or beyond) lockend. Since we have no file extent items to - * represent holes, drop_end can be less than lockend and so we must - * make sure we have an extent map representing the existing hole (the - * call to __btrfs_drop_extents() might have dropped the existing extent - * map representing the existing hole), otherwise the fast fsync path - * will not record the existence of the hole region - * [existing_hole_start, lockend]. - */ - if (drop_end <= lockend) - drop_end = lockend + 1; - /* - * Don't insert file hole extent item if it's for a range beyond eof - * (because it's useless) or if it represents a 0 bytes range (when - * cur_offset == drop_end). - */ - if (cur_offset < ino_size && cur_offset < drop_end) { - ret = fill_holes(trans, BTRFS_I(inode), path, - cur_offset, drop_end); - if (ret) { - /* Same comment as above. */ - btrfs_abort_transaction(trans, ret); - err = ret; - goto out_trans; - } - } - -out_trans: - if (!trans) - goto out_free; + ret = btrfs_punch_hole_range(inode, path, lockstart, lockend, &trans); + btrfs_free_path(path); + if (ret) + goto out; + ASSERT(trans != NULL); inode_inc_iversion(inode); inode->i_mtime = inode->i_ctime = current_time(inode); - - trans->block_rsv = &fs_info->trans_block_rsv; ret = btrfs_update_inode(trans, root, inode); updated_inode = true; btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); -out_free: - btrfs_free_path(path); - btrfs_free_block_rsv(fs_info, rsv); out: unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state); out_only_mutex: - if (!updated_inode && truncated_block && !ret && !err) { + if (!updated_inode && truncated_block && !ret) { /* * If we only end up zeroing part of a page, we still need to * update the inode item, so that all the time fields are @@ -2719,16 +2741,18 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) inode->i_ctime = now; trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { - err = PTR_ERR(trans); + ret = PTR_ERR(trans); } else { - err = btrfs_update_inode(trans, root, inode); - ret = btrfs_end_transaction(trans); + int ret2; + + ret = btrfs_update_inode(trans, root, inode); + ret2 = btrfs_end_transaction(trans); + if (!ret) + ret = ret2; } } inode_unlock(inode); - if (ret && !err) - err = ret; - return err; + return ret; } /* Helper structure to record which range is already reserved */ From 690a5dbfc5131572910e6350d65d7b9d55439817 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 5 Jul 2019 11:09:50 +0100 Subject: [PATCH 002/138] Btrfs: fix ENOSPC errors, leading to transaction aborts, when cloning extents When cloning extents (or deduplicating) we create a transaction with a space reservation that considers we will drop or update a single file extent item of the destination inode (that we modify a single leaf). That is fine for the vast majority of scenarios, however it might happen that we need to drop many file extent items, and adjust at most two file extent items, in the destination root, which can span multiple leafs. This will lead to either the call to btrfs_drop_extents() to fail with ENOSPC or the subsequent calls to btrfs_insert_empty_item() or btrfs_update_inode() (called through clone_finish_inode_update()) to fail with ENOSPC. Such failure results in a transaction abort, leaving the filesystem in a read-only mode. In order to fix this we need to follow the same approach as the hole punching code, where we create a local reservation with 1 unit and keep ending and starting transactions, after balancing the btree inode, when __btrfs_drop_extents() returns ENOSPC. So fix this by making the extent cloning call calls the recently added btrfs_punch_hole_range() helper, which is what does the mentioned work for hole punching, and make sure whenever we drop extent items in a transaction, we also add a replacing file extent item, to avoid corruption (a hole) if after ending a transaction and before starting a new one, the old transaction gets committed and a power failure happens before we finish cloning. A test case for fstests follows soon. Reported-by: David Goodwin Link: https://lore.kernel.org/linux-btrfs/a4a4cf31-9cf4-e52c-1f86-c62d336c9cd1@codepoets.co.uk/ Reported-by: Sam Tygier Link: https://lore.kernel.org/linux-btrfs/82aace9f-a1e3-1f0b-055f-3ea75f7a41a0@tygier.co.uk/ Fixes: b6f3409b2197e8f ("Btrfs: reserve sufficient space for ioctl clone") Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 14 ++++ fs/btrfs/file.c | 146 +++++++++++++++++++++++++++++++---- fs/btrfs/ioctl.c | 195 +++++++++++------------------------------------ 3 files changed, 188 insertions(+), 167 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 94660063a162..670973025048 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1279,6 +1279,16 @@ struct btrfs_root { #endif }; +struct btrfs_clone_extent_info { + u64 disk_offset; + u64 disk_len; + u64 data_offset; + u64 data_len; + u64 file_offset; + char *extent_buf; + u32 item_size; +}; + struct btrfs_file_private { void *filldir_buf; }; @@ -3233,6 +3243,10 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, int drop_cache); +int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, + const u64 start, const u64 end, + struct btrfs_clone_extent_info *clone_info, + struct btrfs_trans_handle **trans_out); int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 16dc09736310..474ff1cac640 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2439,13 +2439,76 @@ static int btrfs_punch_hole_lock_range(struct inode *inode, return 0; } +static int btrfs_insert_clone_extent(struct btrfs_trans_handle *trans, + struct inode *inode, + struct btrfs_path *path, + struct btrfs_clone_extent_info *clone_info, + const u64 clone_len) +{ + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_file_extent_item *extent; + struct extent_buffer *leaf; + struct btrfs_key key; + int slot; + struct btrfs_ref ref = { 0 }; + u64 ref_offset; + int ret; + + if (clone_len == 0) + return 0; + + if (clone_info->disk_offset == 0 && + btrfs_fs_incompat(fs_info, NO_HOLES)) + return 0; + + key.objectid = btrfs_ino(BTRFS_I(inode)); + key.type = BTRFS_EXTENT_DATA_KEY; + key.offset = clone_info->file_offset; + ret = btrfs_insert_empty_item(trans, root, path, &key, + clone_info->item_size); + if (ret) + return ret; + leaf = path->nodes[0]; + slot = path->slots[0]; + write_extent_buffer(leaf, clone_info->extent_buf, + btrfs_item_ptr_offset(leaf, slot), + clone_info->item_size); + extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + btrfs_set_file_extent_offset(leaf, extent, clone_info->data_offset); + btrfs_set_file_extent_num_bytes(leaf, extent, clone_len); + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(path); + + /* If it's a hole, nothing more needs to be done. */ + if (clone_info->disk_offset == 0) + return 0; + + inode_add_bytes(inode, clone_len); + btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, + clone_info->disk_offset, + clone_info->disk_len, 0); + ref_offset = clone_info->file_offset - clone_info->data_offset; + btrfs_init_data_ref(&ref, root->root_key.objectid, + btrfs_ino(BTRFS_I(inode)), ref_offset); + ret = btrfs_inc_extent_ref(trans, &ref); + + return ret; +} + /* * The respective range must have been previously locked, as well as the inode. * The end offset is inclusive (last byte of the range). + * @clone_info is NULL for fallocate's hole punching and non-NULL for extent + * cloning. + * When cloning, we don't want to end up in a state where we dropped extents + * without inserting a new one, so we must abort the transaction to avoid a + * corruption. */ -static int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, - const u64 start, const u64 end, - struct btrfs_trans_handle **trans_out) +int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, + const u64 start, const u64 end, + struct btrfs_clone_extent_info *clone_info, + struct btrfs_trans_handle **trans_out) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1); @@ -2473,9 +2536,14 @@ static int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, /* * 1 - update the inode * 1 - removing the extents in the range - * 1 - adding the hole extent if no_holes isn't set + * 1 - adding the hole extent if no_holes isn't set or if we are cloning + * an extent */ - rsv_count = btrfs_fs_incompat(fs_info, NO_HOLES) ? 2 : 3; + if (!btrfs_fs_incompat(fs_info, NO_HOLES) || clone_info) + rsv_count = 3; + else + rsv_count = 2; + trans = btrfs_start_transaction(root, rsv_count); if (IS_ERR(trans)) { ret = PTR_ERR(trans); @@ -2493,12 +2561,23 @@ static int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, ret = __btrfs_drop_extents(trans, root, inode, path, cur_offset, end + 1, &drop_end, 1, 0, 0, NULL); - if (ret != -ENOSPC) + if (ret != -ENOSPC) { + /* + * When cloning we want to avoid transaction aborts when + * nothing was done and we are attempting to clone parts + * of inline extents, in such cases -EOPNOTSUPP is + * returned by __btrfs_drop_extents() without having + * changed anything in the file. + */ + if (clone_info && ret && ret != -EOPNOTSUPP) + btrfs_abort_transaction(trans, ret); break; + } trans->block_rsv = &fs_info->trans_block_rsv; - if (cur_offset < drop_end && cur_offset < ino_size) { + if (!clone_info && cur_offset < drop_end && + cur_offset < ino_size) { ret = fill_holes(trans, BTRFS_I(inode), path, cur_offset, drop_end); if (ret) { @@ -2513,6 +2592,20 @@ static int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, } } + if (clone_info) { + u64 clone_len = drop_end - cur_offset; + + ret = btrfs_insert_clone_extent(trans, inode, path, + clone_info, clone_len); + if (ret) { + btrfs_abort_transaction(trans, ret); + break; + } + clone_info->data_len -= clone_len; + clone_info->data_offset += clone_len; + clone_info->file_offset += clone_len; + } + cur_offset = drop_end; ret = btrfs_update_inode(trans, root, inode); @@ -2534,15 +2627,29 @@ static int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, BUG_ON(ret); /* shouldn't happen */ trans->block_rsv = rsv; - ret = find_first_non_hole(inode, &cur_offset, &len); - if (unlikely(ret < 0)) - break; - if (ret && !len) { - ret = 0; - break; + if (!clone_info) { + ret = find_first_non_hole(inode, &cur_offset, &len); + if (unlikely(ret < 0)) + break; + if (ret && !len) { + ret = 0; + break; + } } } + /* + * If we were cloning, force the next fsync to be a full one since we + * we replaced (or just dropped in the case of cloning holes when + * NO_HOLES is enabled) extents and extent maps. + * This is for the sake of simplicity, and cloning into files larger + * than 16Mb would force the full fsync any way (when + * try_release_extent_mapping() is invoked during page cache truncation. + */ + if (clone_info) + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &BTRFS_I(inode)->runtime_flags); + if (ret) goto out_trans; @@ -2565,7 +2672,7 @@ static int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, * (because it's useless) or if it represents a 0 bytes range (when * cur_offset == drop_end). */ - if (cur_offset < ino_size && cur_offset < drop_end) { + if (!clone_info && cur_offset < ino_size && cur_offset < drop_end) { ret = fill_holes(trans, BTRFS_I(inode), path, cur_offset, drop_end); if (ret) { @@ -2574,6 +2681,14 @@ static int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, goto out_trans; } } + if (clone_info) { + ret = btrfs_insert_clone_extent(trans, inode, path, clone_info, + clone_info->data_len); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out_trans; + } + } out_trans: if (!trans) @@ -2710,7 +2825,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) goto out; } - ret = btrfs_punch_hole_range(inode, path, lockstart, lockend, &trans); + ret = btrfs_punch_hole_range(inode, path, lockstart, lockend, NULL, + &trans); btrfs_free_path(path); if (ret) goto out; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 818f7ec8bb0e..0a0c54e99b22 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3324,61 +3324,6 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans, return ret; } -static void clone_update_extent_map(struct btrfs_inode *inode, - const struct btrfs_trans_handle *trans, - const struct btrfs_path *path, - const u64 hole_offset, - const u64 hole_len) -{ - struct extent_map_tree *em_tree = &inode->extent_tree; - struct extent_map *em; - int ret; - - em = alloc_extent_map(); - if (!em) { - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); - return; - } - - if (path) { - struct btrfs_file_extent_item *fi; - - fi = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_file_extent_item); - btrfs_extent_item_to_extent_map(inode, path, fi, false, em); - em->generation = -1; - if (btrfs_file_extent_type(path->nodes[0], fi) == - BTRFS_FILE_EXTENT_INLINE) - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &inode->runtime_flags); - } else { - em->start = hole_offset; - em->len = hole_len; - em->ram_bytes = em->len; - em->orig_start = hole_offset; - em->block_start = EXTENT_MAP_HOLE; - em->block_len = 0; - em->orig_block_len = 0; - em->compress_type = BTRFS_COMPRESS_NONE; - em->generation = trans->transid; - } - - while (1) { - write_lock(&em_tree->lock); - ret = add_extent_mapping(em_tree, em, 1); - write_unlock(&em_tree->lock); - if (ret != -EEXIST) { - free_extent_map(em); - break; - } - btrfs_drop_extent_cache(inode, em->start, - em->start + em->len - 1, 0); - } - - if (ret) - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); -} - /* * Make sure we do not end up inserting an inline extent into a file that has * already other (non-inline) extents. If a file has an inline extent it can @@ -3519,6 +3464,7 @@ static int clone_copy_inline_extent(struct inode *dst, path->slots[0]), size); inode_add_bytes(dst, datal); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags); return 0; } @@ -3678,19 +3624,10 @@ static int btrfs_clone(struct inode *src, struct inode *inode, else drop_start = new_key.offset; - /* - * 1 - adjusting old extent (we may have to split it) - * 1 - add new extent - * 1 - inode update - */ - trans = btrfs_start_transaction(root, 3); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out; - } - if (type == BTRFS_FILE_EXTENT_REG || type == BTRFS_FILE_EXTENT_PREALLOC) { + struct btrfs_clone_extent_info clone_info; + /* * a | --- range to clone ---| b * | ------------- extent ------------- | @@ -3706,63 +3643,19 @@ static int btrfs_clone(struct inode *src, struct inode *inode, datal -= off - key.offset; } - ret = btrfs_drop_extents(trans, root, inode, - drop_start, - new_key.offset + datal, - 1); - if (ret) { - if (ret != -EOPNOTSUPP) - btrfs_abort_transaction(trans, - ret); - btrfs_end_transaction(trans); + clone_info.disk_offset = disko; + clone_info.disk_len = diskl; + clone_info.data_offset = datao; + clone_info.data_len = datal; + clone_info.file_offset = new_key.offset; + clone_info.extent_buf = buf; + clone_info.item_size = size; + ret = btrfs_punch_hole_range(inode, path, + drop_start, + new_key.offset + datal - 1, + &clone_info, &trans); + if (ret) goto out; - } - - ret = btrfs_insert_empty_item(trans, root, path, - &new_key, size); - if (ret) { - btrfs_abort_transaction(trans, ret); - btrfs_end_transaction(trans); - goto out; - } - - leaf = path->nodes[0]; - slot = path->slots[0]; - write_extent_buffer(leaf, buf, - btrfs_item_ptr_offset(leaf, slot), - size); - - extent = btrfs_item_ptr(leaf, slot, - struct btrfs_file_extent_item); - - /* disko == 0 means it's a hole */ - if (!disko) - datao = 0; - - btrfs_set_file_extent_offset(leaf, extent, - datao); - btrfs_set_file_extent_num_bytes(leaf, extent, - datal); - - if (disko) { - struct btrfs_ref ref = { 0 }; - inode_add_bytes(inode, datal); - btrfs_init_generic_ref(&ref, - BTRFS_ADD_DELAYED_REF, disko, - diskl, 0); - btrfs_init_data_ref(&ref, - root->root_key.objectid, - btrfs_ino(BTRFS_I(inode)), - new_key.offset - datao); - ret = btrfs_inc_extent_ref(trans, &ref); - if (ret) { - btrfs_abort_transaction(trans, - ret); - btrfs_end_transaction(trans); - goto out; - - } - } } else if (type == BTRFS_FILE_EXTENT_INLINE) { u64 skip = 0; u64 trim = 0; @@ -3777,12 +3670,27 @@ static int btrfs_clone(struct inode *src, struct inode *inode, if (comp && (skip || trim)) { ret = -EINVAL; - btrfs_end_transaction(trans); goto out; } size -= skip + trim; datal -= skip + trim; + /* + * If our extent is inline, we know we will drop + * or adjust at most 1 extent item in the + * destination root. + * + * 1 - adjusting old extent (we may have to + * split it) + * 1 - add new extent + * 1 - inode update + */ + trans = btrfs_start_transaction(root, 3); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out; + } + ret = clone_copy_inline_extent(inode, trans, path, &new_key, @@ -3796,20 +3704,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode, btrfs_end_transaction(trans); goto out; } - leaf = path->nodes[0]; - slot = path->slots[0]; } - /* If we have an implicit hole (NO_HOLES feature). */ - if (drop_start < new_key.offset) - clone_update_extent_map(BTRFS_I(inode), trans, - NULL, drop_start, - new_key.offset - drop_start); - - clone_update_extent_map(BTRFS_I(inode), trans, - path, 0, 0); - - btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); last_dest_end = ALIGN(new_key.offset + datal, @@ -3834,32 +3730,27 @@ static int btrfs_clone(struct inode *src, struct inode *inode, ret = 0; if (last_dest_end < destoff + len) { + struct btrfs_clone_extent_info clone_info = { 0 }; /* * We have an implicit hole (NO_HOLES feature is enabled) that * fully or partially overlaps our cloning range at its end. */ btrfs_release_path(path); + path->leave_spinning = 0; /* - * 1 - remove extent(s) - * 1 - inode update + * We are dealing with a hole and our clone_info already has a + * disk_offset of 0, we only need to fill the data length and + * file offset. */ - trans = btrfs_start_transaction(root, 2); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); + clone_info.data_len = destoff + len - last_dest_end; + clone_info.file_offset = last_dest_end; + ret = btrfs_punch_hole_range(inode, path, + last_dest_end, destoff + len - 1, + &clone_info, &trans); + if (ret) goto out; - } - ret = btrfs_drop_extents(trans, root, inode, - last_dest_end, destoff + len, 1); - if (ret) { - if (ret != -EOPNOTSUPP) - btrfs_abort_transaction(trans, ret); - btrfs_end_transaction(trans); - goto out; - } - clone_update_extent_map(BTRFS_I(inode), trans, NULL, - last_dest_end, - destoff + len - last_dest_end); + ret = clone_finish_inode_update(trans, inode, destoff + len, destoff, olen, no_time_update); } From 99fccf33c203f63ea10e611c505a26686b0b8738 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 2 Jul 2019 22:15:21 +0800 Subject: [PATCH 003/138] btrfs: remove set but not used variable 'offset' Fixes gcc '-Wunused-but-set-variable' warning: fs/btrfs/volumes.c: In function __btrfs_map_block: fs/btrfs/volumes.c:6023:6: warning: variable offset set but not used [-Wunused-but-set-variable] It is not used any more since commit 343abd1c0ca9 ("btrfs: Use btrfs_get_io_geometry appropriately") Reported-by: Hulk Robot Signed-off-by: YueHaibing Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a447d3ec48d5..662a9d14f151 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6011,7 +6011,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, { struct extent_map *em; struct map_lookup *map; - u64 offset; u64 stripe_offset; u64 stripe_nr; u64 stripe_len; @@ -6046,7 +6045,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, map = em->map_lookup; *length = geom.len; - offset = geom.offset; stripe_len = geom.stripe_len; stripe_nr = geom.stripe_nr; stripe_offset = geom.stripe_offset; From 8ddc319706e5ca201a128e3f2477938d7f174ca8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 8 Jul 2019 14:40:09 +0200 Subject: [PATCH 004/138] btrfs: reduce stack usage for btrfsic_process_written_block btrfsic_process_written_block() cals btrfsic_process_metablock(), which has a fairly large stack usage due to the btrfsic_stack_frame variable. It also calls btrfsic_test_for_metadata(), which now needs several hundreds of bytes for its SHASH_DESC_ON_STACK(). In some configurations, we end up with both functions on the same stack, and gcc warns about the excessive stack usage that might cause the available stack space to run out: fs/btrfs/check-integrity.c:1743:13: error: stack frame size of 1152 bytes in function 'btrfsic_process_written_block' [-Werror,-Wframe-larger-than=] Marking both child functions as noinline_for_stack helps because this guarantees that the large variables are not on the same stack frame. Fixes: d5178578bcd4 ("btrfs: directly call into crypto framework for checksumming") Reviewed-by: Johannes Thumshirn Signed-off-by: Arnd Bergmann Signed-off-by: David Sterba --- fs/btrfs/check-integrity.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index 81a9731959a9..0b52ab4cb964 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -940,7 +940,7 @@ static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf) kfree(sf); } -static int btrfsic_process_metablock( +static noinline_for_stack int btrfsic_process_metablock( struct btrfsic_state *state, struct btrfsic_block *const first_block, struct btrfsic_block_data_ctx *const first_block_ctx, @@ -1706,8 +1706,9 @@ static void btrfsic_dump_database(struct btrfsic_state *state) * Test whether the disk block contains a tree block (leaf or node) * (note that this test fails for the super block) */ -static int btrfsic_test_for_metadata(struct btrfsic_state *state, - char **datav, unsigned int num_pages) +static noinline_for_stack int btrfsic_test_for_metadata( + struct btrfsic_state *state, + char **datav, unsigned int num_pages) { struct btrfs_fs_info *fs_info = state->fs_info; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); From 5044ed4f394cb371fb85db3e3ec0296487b7d324 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 25 Jul 2019 11:27:28 +0300 Subject: [PATCH 005/138] btrfs: Remove unused locking functions Those were split out of btrfs_clear_lock_blocking_rw by aa12c02778a9 ("btrfs: split btrfs_clear_lock_blocking_rw to read and write helpers") however at that time this function was unused due to commit 523983401644 ("Btrfs: kill btrfs_clear_path_blocking"). Put the final nail in the coffin of those 2 functions. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/locking.c | 36 ------------------------------------ fs/btrfs/locking.h | 2 -- include/trace/events/btrfs.h | 2 -- 3 files changed, 40 deletions(-) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 393eceda57c8..e4309bcf0b5f 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -119,42 +119,6 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) } } -void btrfs_clear_lock_blocking_read(struct extent_buffer *eb) -{ - trace_btrfs_clear_lock_blocking_read(eb); - /* - * No lock is required. The lock owner may change if we have a read - * lock, but it won't change to or away from us. If we have the write - * lock, we are the owner and it'll never change. - */ - if (eb->lock_nested && current->pid == eb->lock_owner) - return; - BUG_ON(atomic_read(&eb->blocking_readers) == 0); - read_lock(&eb->lock); - btrfs_assert_spinning_readers_get(eb); - /* atomic_dec_and_test implies a barrier */ - if (atomic_dec_and_test(&eb->blocking_readers)) - cond_wake_up_nomb(&eb->read_lock_wq); -} - -void btrfs_clear_lock_blocking_write(struct extent_buffer *eb) -{ - trace_btrfs_clear_lock_blocking_write(eb); - /* - * no lock is required. The lock owner may change if - * we have a read lock, but it won't change to or away - * from us. If we have the write lock, we are the owner - * and it'll never change. - */ - if (eb->lock_nested && current->pid == eb->lock_owner) - return; - write_lock(&eb->lock); - BUG_ON(eb->blocking_writers != 1); - btrfs_assert_spinning_writers_get(eb); - if (--eb->blocking_writers == 0) - cond_wake_up(&eb->write_lock_wq); -} - /* * take a spinning read lock. This will wait for any blocking * writers diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index 595014f64830..b775a4207ed9 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -19,8 +19,6 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb); void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); void btrfs_set_lock_blocking_read(struct extent_buffer *eb); void btrfs_set_lock_blocking_write(struct extent_buffer *eb); -void btrfs_clear_lock_blocking_read(struct extent_buffer *eb); -void btrfs_clear_lock_blocking_write(struct extent_buffer *eb); void btrfs_assert_tree_locked(struct extent_buffer *eb); int btrfs_try_tree_read_lock(struct extent_buffer *eb); int btrfs_try_tree_write_lock(struct extent_buffer *eb); diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 2f6a669408bb..5cb95646b94e 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -2086,8 +2086,6 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock); DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking); DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read); DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write); -DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_read); -DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_write); DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock); DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock); DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic); From 40cf931fa81bedea08823dda9e6e73630db41b70 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Wed, 17 Jul 2019 12:39:20 -0500 Subject: [PATCH 006/138] btrfs: use common vfs LABEL ioctl definitions I lifted the btrfs label get/set ioctls to the vfs some time ago, but never followed up to use those common definitions directly in btrfs. This patch does that. Signed-off-by: Eric Sandeen Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 8 ++++---- include/uapi/linux/btrfs.h | 6 ++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 0a0c54e99b22..9eaf78d7b8eb 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -5453,6 +5453,10 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_setflags(file, argp); case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(file, argp); + case FS_IOC_GETFSLABEL: + return btrfs_ioctl_get_fslabel(file, argp); + case FS_IOC_SETFSLABEL: + return btrfs_ioctl_set_fslabel(file, argp); case FITRIM: return btrfs_ioctl_fitrim(file, argp); case BTRFS_IOC_SNAP_CREATE: @@ -5564,10 +5568,6 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_quota_rescan_wait(file, argp); case BTRFS_IOC_DEV_REPLACE: return btrfs_ioctl_dev_replace(fs_info, argp); - case BTRFS_IOC_GET_FSLABEL: - return btrfs_ioctl_get_fslabel(file, argp); - case BTRFS_IOC_SET_FSLABEL: - return btrfs_ioctl_set_fslabel(file, argp); case BTRFS_IOC_GET_SUPPORTED_FEATURES: return btrfs_ioctl_get_supported_features(argp); case BTRFS_IOC_GET_FEATURES: diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index c195896d478f..7885d79f7515 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -917,10 +917,8 @@ enum btrfs_err_code { #define BTRFS_IOC_QUOTA_RESCAN_STATUS _IOR(BTRFS_IOCTL_MAGIC, 45, \ struct btrfs_ioctl_quota_rescan_args) #define BTRFS_IOC_QUOTA_RESCAN_WAIT _IO(BTRFS_IOCTL_MAGIC, 46) -#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \ - char[BTRFS_LABEL_SIZE]) -#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \ - char[BTRFS_LABEL_SIZE]) +#define BTRFS_IOC_GET_FSLABEL FS_IOC_GETFSLABEL +#define BTRFS_IOC_SET_FSLABEL FS_IOC_SETFSLABEL #define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \ struct btrfs_ioctl_get_dev_stats) #define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \ From ac3e99334d640b6dc8d2d8cbc57e080ba308b4c0 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 17 Jul 2019 14:41:44 +0300 Subject: [PATCH 007/138] btrfs: Return number of compressed extents directly in compress_file_range compress_file_range returns a void, yet uses a function parameter as a return value. Make that more idiomatic by simply returning the number of compressed extents directly. Also track such extents in more aptly named variables. No functional changes. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ee582a36653d..db814f555b26 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -462,8 +462,7 @@ static inline void inode_should_defrag(struct btrfs_inode *inode, * are written in the same order that the flusher thread sent them * down. */ -static noinline void compress_file_range(struct async_chunk *async_chunk, - int *num_added) +static noinline int compress_file_range(struct async_chunk *async_chunk) { struct inode *inode = async_chunk->inode; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); @@ -479,6 +478,7 @@ static noinline void compress_file_range(struct async_chunk *async_chunk, int i; int will_compress; int compress_type = fs_info->compress_type; + int compressed_extents = 0; int redirty = 0; inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, @@ -641,7 +641,7 @@ static noinline void compress_file_range(struct async_chunk *async_chunk, */ total_in = ALIGN(total_in, PAGE_SIZE); if (total_compressed + blocksize <= total_in) { - *num_added += 1; + compressed_extents++; /* * The async work queues will take care of doing actual @@ -658,7 +658,7 @@ static noinline void compress_file_range(struct async_chunk *async_chunk, cond_resched(); goto again; } - return; + return compressed_extents; } } if (pages) { @@ -697,9 +697,9 @@ static noinline void compress_file_range(struct async_chunk *async_chunk, extent_range_redirty_for_io(inode, start, end); add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, BTRFS_COMPRESS_NONE); - *num_added += 1; + compressed_extents++; - return; + return compressed_extents; free_pages_out: for (i = 0; i < nr_pages; i++) { @@ -707,6 +707,8 @@ static noinline void compress_file_range(struct async_chunk *async_chunk, put_page(pages[i]); } kfree(pages); + + return 0; } static void free_async_extent_pages(struct async_extent *async_extent) @@ -1144,12 +1146,12 @@ static noinline int cow_file_range(struct inode *inode, static noinline void async_cow_start(struct btrfs_work *work) { struct async_chunk *async_chunk; - int num_added = 0; + int compressed_extents; async_chunk = container_of(work, struct async_chunk, work); - compress_file_range(async_chunk, &num_added); - if (num_added == 0) { + compressed_extents = compress_file_range(async_chunk); + if (compressed_extents == 0) { btrfs_add_delayed_iput(async_chunk->inode); async_chunk->inode = NULL; } From cecc8d9038d164eda61fbcd72520975a554ea63e Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 17 Jul 2019 14:41:45 +0300 Subject: [PATCH 008/138] btrfs: Move free_pages_out label in inline extent handling branch in compress_file_range This label is only executed if compress_file_range fails to create an inline extent. So move its code in the semantically related inline extent handling branch. No functional changes. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index db814f555b26..385127ab0841 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -622,7 +622,14 @@ static noinline int compress_file_range(struct async_chunk *async_chunk) PAGE_SET_WRITEBACK | page_error_op | PAGE_END_WRITEBACK); - goto free_pages_out; + + for (i = 0; i < nr_pages; i++) { + WARN_ON(pages[i]->mapping); + put_page(pages[i]); + } + kfree(pages); + + return 0; } } @@ -700,15 +707,6 @@ static noinline int compress_file_range(struct async_chunk *async_chunk) compressed_extents++; return compressed_extents; - -free_pages_out: - for (i = 0; i < nr_pages; i++) { - WARN_ON(pages[i]->mapping); - put_page(pages[i]); - } - kfree(pages); - - return 0; } static void free_async_extent_pages(struct async_extent *async_extent) From 74e9194afb2c5c6b45ada5653b2609499c372d77 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 17 Jul 2019 16:18:16 +0300 Subject: [PATCH 009/138] btrfs: Remove delalloc_end argument from extent_clear_unlock_delalloc It was added in ba8b04c1d4ad ("btrfs: extend btrfs_set_extent_delalloc and its friends to support in-band dedupe and subpage size patchset") as a preparatory patch for in-band and subapge block size patchsets. However neither of those are likely to be merged anytime soon and the code has diverged significantly from the last public post of either of those patchsets. It's unlikely either of the patchests are going to use those preparatory steps so just remove the variables. Since cow_file_range also took delalloc_end to pass it to extent_clear_unlock_delalloc remove the parameter from that function as well. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 6 +++--- fs/btrfs/extent_io.h | 6 +++--- fs/btrfs/inode.c | 48 ++++++++++++++++++-------------------------- 3 files changed, 25 insertions(+), 35 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1ff438fd5bc2..bac59d721b54 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1938,9 +1938,9 @@ static int __process_pages_contig(struct address_space *mapping, } void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, - u64 delalloc_end, struct page *locked_page, - unsigned clear_bits, - unsigned long page_ops) + struct page *locked_page, + unsigned clear_bits, + unsigned long page_ops) { clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0, NULL); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 401423b16976..cf3424d58fec 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -494,9 +494,9 @@ int map_private_extent_buffer(const struct extent_buffer *eb, void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, - u64 delalloc_end, struct page *locked_page, - unsigned bits_to_clear, - unsigned long page_ops); + struct page *locked_page, + unsigned bits_to_clear, + unsigned long page_ops); struct bio *btrfs_bio_alloc(u64 first_byte); struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs); struct bio *btrfs_bio_clone(struct bio *bio); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 385127ab0841..d038fc6b3e2f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -80,9 +80,9 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback); static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); static noinline int cow_file_range(struct inode *inode, struct page *locked_page, - u64 start, u64 end, u64 delalloc_end, - int *page_started, unsigned long *nr_written, - int unlock, struct btrfs_dedupe_hash *hash); + u64 start, u64 end, int *page_started, + unsigned long *nr_written, int unlock, + struct btrfs_dedupe_hash *hash); static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, u64 orig_start, u64 block_start, u64 block_len, u64 orig_block_len, @@ -615,8 +615,8 @@ static noinline int compress_file_range(struct async_chunk *async_chunk) * our outstanding extent for clearing delalloc for this * range. */ - extent_clear_unlock_delalloc(inode, start, end, end, - NULL, clear_flags, + extent_clear_unlock_delalloc(inode, start, end, NULL, + clear_flags, PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | @@ -762,8 +762,6 @@ static noinline void submit_compressed_extents(struct async_chunk *async_chunk) async_extent->start, async_extent->start + async_extent->ram_size - 1, - async_extent->start + - async_extent->ram_size - 1, &page_started, &nr_written, 0, NULL); @@ -853,8 +851,6 @@ static noinline void submit_compressed_extents(struct async_chunk *async_chunk) * clear dirty, set writeback and unlock the pages. */ extent_clear_unlock_delalloc(inode, async_extent->start, - async_extent->start + - async_extent->ram_size - 1, async_extent->start + async_extent->ram_size - 1, NULL, EXTENT_LOCKED | EXTENT_DELALLOC, @@ -875,7 +871,7 @@ static noinline void submit_compressed_extents(struct async_chunk *async_chunk) btrfs_writepage_endio_finish_ordered(p, start, end, 0); p->mapping = NULL; - extent_clear_unlock_delalloc(inode, start, end, end, + extent_clear_unlock_delalloc(inode, start, end, NULL, 0, PAGE_END_WRITEBACK | PAGE_SET_ERROR); @@ -891,8 +887,6 @@ static noinline void submit_compressed_extents(struct async_chunk *async_chunk) btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); out_free: extent_clear_unlock_delalloc(inode, async_extent->start, - async_extent->start + - async_extent->ram_size - 1, async_extent->start + async_extent->ram_size - 1, NULL, EXTENT_LOCKED | EXTENT_DELALLOC | @@ -953,9 +947,9 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start, */ static noinline int cow_file_range(struct inode *inode, struct page *locked_page, - u64 start, u64 end, u64 delalloc_end, - int *page_started, unsigned long *nr_written, - int unlock, struct btrfs_dedupe_hash *hash) + u64 start, u64 end, int *page_started, + unsigned long *nr_written, int unlock, + struct btrfs_dedupe_hash *hash) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; @@ -994,8 +988,7 @@ static noinline int cow_file_range(struct inode *inode, * our outstanding extent for clearing delalloc for this * range. */ - extent_clear_unlock_delalloc(inode, start, end, - delalloc_end, NULL, + extent_clear_unlock_delalloc(inode, start, end, NULL, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | @@ -1078,7 +1071,7 @@ static noinline int cow_file_range(struct inode *inode, extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, - delalloc_end, locked_page, + locked_page, EXTENT_LOCKED | EXTENT_DELALLOC, page_ops); if (num_bytes < cur_alloc_size) @@ -1122,7 +1115,6 @@ static noinline int cow_file_range(struct inode *inode, */ if (extent_reserved) { extent_clear_unlock_delalloc(inode, start, - start + cur_alloc_size, start + cur_alloc_size, locked_page, clear_bits, @@ -1131,8 +1123,7 @@ static noinline int cow_file_range(struct inode *inode, if (start >= end) goto out; } - extent_clear_unlock_delalloc(inode, start, end, delalloc_end, - locked_page, + extent_clear_unlock_delalloc(inode, start, end, locked_page, clear_bits | EXTENT_CLEAR_DATA_RESV, page_ops); goto out; @@ -1235,7 +1226,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | PAGE_SET_ERROR; - extent_clear_unlock_delalloc(inode, start, end, 0, locked_page, + extent_clear_unlock_delalloc(inode, start, end, locked_page, clear_bits, page_ops); return -ENOMEM; } @@ -1338,8 +1329,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, path = btrfs_alloc_path(); if (!path) { - extent_clear_unlock_delalloc(inode, start, end, end, - locked_page, + extent_clear_unlock_delalloc(inode, start, end, locked_page, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, PAGE_UNLOCK | @@ -1516,7 +1506,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, found_key.offset - 1, - end, page_started, nr_written, 1, + page_started, nr_written, 1, NULL); if (ret) { if (nocow) @@ -1570,7 +1560,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, num_bytes); extent_clear_unlock_delalloc(inode, cur_offset, - cur_offset + num_bytes - 1, end, + cur_offset + num_bytes - 1, locked_page, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_CLEAR_DATA_RESV, @@ -1595,7 +1585,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, if (cow_start != (u64)-1) { cur_offset = end; - ret = cow_file_range(inode, locked_page, cow_start, end, end, + ret = cow_file_range(inode, locked_page, cow_start, end, page_started, nr_written, 1, NULL); if (ret) goto error; @@ -1603,7 +1593,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, error: if (ret && cur_offset < end) - extent_clear_unlock_delalloc(inode, cur_offset, end, end, + extent_clear_unlock_delalloc(inode, cur_offset, end, locked_page, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | @@ -1654,7 +1644,7 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, page_started, 0, nr_written); } else if (!inode_can_compress(inode) || !inode_need_compress(inode, start, end)) { - ret = cow_file_range(inode, locked_page, start, end, end, + ret = cow_file_range(inode, locked_page, start, end, page_started, nr_written, 1, NULL); } else { set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, From 330a582790452a159686c5dab8f4286babd9c00e Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 17 Jul 2019 16:18:17 +0300 Subject: [PATCH 010/138] btrfs: Remove leftover of in-band dedupe It's unlikely in-band dedupe is going to land so just remove any leftovers - dedupe.h header as well as the 'dedupe' parameter to btrfs_set_extent_delalloc. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 2 +- fs/btrfs/dedupe.h | 12 ------------ fs/btrfs/file.c | 2 +- fs/btrfs/inode.c | 25 ++++++++++--------------- fs/btrfs/relocation.c | 2 +- fs/btrfs/tests/inode-tests.c | 12 ++++++------ 6 files changed, 19 insertions(+), 36 deletions(-) delete mode 100644 fs/btrfs/dedupe.h diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 670973025048..d2807b99aa97 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3147,7 +3147,7 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root); int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr); int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, unsigned int extra_bits, - struct extent_state **cached_state, int dedupe); + struct extent_state **cached_state); int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, struct btrfs_root *new_root, struct btrfs_root *parent_root, diff --git a/fs/btrfs/dedupe.h b/fs/btrfs/dedupe.h deleted file mode 100644 index 90281a7a35a8..000000000000 --- a/fs/btrfs/dedupe.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2016 Fujitsu. All rights reserved. - */ - -#ifndef BTRFS_DEDUPE_H -#define BTRFS_DEDUPE_H - -/* later in-band dedupe will expand this struct */ -struct btrfs_dedupe_hash; - -#endif diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 474ff1cac640..b31991f0f440 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -559,7 +559,7 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, } err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, - extra_bits, cached, 0); + extra_bits, cached); if (err) return err; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d038fc6b3e2f..c3feb5310e17 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -46,7 +46,6 @@ #include "backref.h" #include "props.h" #include "qgroup.h" -#include "dedupe.h" #include "delalloc-space.h" struct btrfs_iget_args { @@ -81,8 +80,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); static noinline int cow_file_range(struct inode *inode, struct page *locked_page, u64 start, u64 end, int *page_started, - unsigned long *nr_written, int unlock, - struct btrfs_dedupe_hash *hash); + unsigned long *nr_written, int unlock); static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, u64 orig_start, u64 block_start, u64 block_len, u64 orig_block_len, @@ -762,8 +760,7 @@ static noinline void submit_compressed_extents(struct async_chunk *async_chunk) async_extent->start, async_extent->start + async_extent->ram_size - 1, - &page_started, &nr_written, 0, - NULL); + &page_started, &nr_written, 0); /* JDM XXX */ @@ -948,8 +945,7 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start, static noinline int cow_file_range(struct inode *inode, struct page *locked_page, u64 start, u64 end, int *page_started, - unsigned long *nr_written, int unlock, - struct btrfs_dedupe_hash *hash) + unsigned long *nr_written, int unlock) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; @@ -1506,8 +1502,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, found_key.offset - 1, - page_started, nr_written, 1, - NULL); + page_started, nr_written, 1); if (ret) { if (nocow) btrfs_dec_nocow_writers(fs_info, @@ -1586,7 +1581,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, if (cow_start != (u64)-1) { cur_offset = end; ret = cow_file_range(inode, locked_page, cow_start, end, - page_started, nr_written, 1, NULL); + page_started, nr_written, 1); if (ret) goto error; } @@ -1645,7 +1640,7 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, } else if (!inode_can_compress(inode) || !inode_need_compress(inode, start, end)) { ret = cow_file_range(inode, locked_page, start, end, - page_started, nr_written, 1, NULL); + page_started, nr_written, 1); } else { set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &BTRFS_I(inode)->runtime_flags); @@ -2080,7 +2075,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, unsigned int extra_bits, - struct extent_state **cached_state, int dedupe) + struct extent_state **cached_state) { WARN_ON(PAGE_ALIGNED(end)); return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, @@ -2146,7 +2141,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) } ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, - &cached_state, 0); + &cached_state); if (ret) { mapping_set_error(page->mapping, ret); end_extent_writepage(page, ret, page_start, page_end); @@ -4941,7 +4936,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, 0, 0, &cached_state); ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, - &cached_state, 0); + &cached_state); if (ret) { unlock_extent_cached(io_tree, block_start, block_end, &cached_state); @@ -8992,7 +8987,7 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) 0, 0, &cached_state); ret2 = btrfs_set_extent_delalloc(inode, page_start, end, 0, - &cached_state, 0); + &cached_state); if (ret2) { unlock_extent_cached(io_tree, page_start, page_end, &cached_state); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 7f219851fa23..7ec632d4d960 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3311,7 +3311,7 @@ static int relocate_file_extent_cluster(struct inode *inode, } ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, - NULL, 0); + NULL); if (ret) { unlock_page(page); put_page(page); diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index bc6dbd1b42fd..b363fb990cec 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -957,7 +957,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) /* [BTRFS_MAX_EXTENT_SIZE] */ ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, 0, - NULL, 0); + NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; @@ -972,7 +972,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */ ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, BTRFS_MAX_EXTENT_SIZE + sectorsize - 1, - 0, NULL, 0); + 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; @@ -1005,7 +1005,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, - 0, NULL, 0); + 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; @@ -1023,7 +1023,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize, (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1, - 0, NULL, 0); + 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; @@ -1040,7 +1040,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) */ ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + sectorsize, - BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0); + BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; @@ -1075,7 +1075,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) */ ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + sectorsize, - BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0); + BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL); if (ret) { test_err("btrfs_set_extent_delalloc returned %d", ret); goto out; From 982f1f5d161735efb85f85cd9c5fb4d61ccfc0aa Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 27 Jul 2019 16:51:13 +0800 Subject: [PATCH 011/138] btrfs: Add an assertion to warn incorrect case in insert_inline_extent() In insert_inline_extent(), the case that checks compressed_size > 0 and compressed_pages = NULL cannot occur, otherwise a null-pointer dereference may occur on line 215: cpage = compressed_pages[i]; To catch this incorrect case, an assertion is added. Reviewed-by: Qu Wenruo Signed-off-by: Jia-Ju Bai Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c3feb5310e17..20963b6567ae 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -176,6 +176,9 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans, size_t cur_size = size; unsigned long offset; + ASSERT((compressed_size > 0 && compressed_pages) || + (compressed_size == 0 && !compressed_pages)); + if (compressed_size && compressed_pages) cur_size = compressed_size; From d23ea3fa7dcb0d4a2c405de0879bc4ddcf521d7d Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 27 Mar 2019 16:19:55 +0100 Subject: [PATCH 012/138] btrfs: assert extent map tree lock in add_extent_mapping As add_extent_mapping is called from several functions, let's add the lock annotation. The tree is going to be modified so it must be the exclusive lock. Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/extent_map.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 9558d79faf1e..9d30acca55e1 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -384,6 +384,8 @@ int add_extent_mapping(struct extent_map_tree *tree, { int ret = 0; + lockdep_assert_held_write(&tree->lock); + ret = tree_insert(&tree->map, em); if (ret) goto out; From 73e82fe4099bbf3e06a351430bd4ed9703212dda Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 27 Mar 2019 16:19:55 +0100 Subject: [PATCH 013/138] btrfs: assert tree mod log lock in __tree_mod_log_insert The tree is going to be modified so it must be the exclusive lock. Reviewed-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 5df76c17775a..9d1d0a926cb0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -376,8 +376,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, * The 'start address' is the logical address of the *new* root node * for root replace operations, or the logical address of the affected * block for all other operations. - * - * Note: must be called with write lock for fs_info::tree_mod_log_lock. */ static noinline int __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) @@ -387,6 +385,8 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) struct rb_node *parent = NULL; struct tree_mod_elem *cur; + lockdep_assert_held_write(&fs_info->tree_mod_log_lock); + tm->seq = btrfs_inc_tree_mod_seq(fs_info); tm_root = &fs_info->tree_mod_log; From 9e3246a5f675932079cf4cc1818b7c1b02f93e36 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 19 Jul 2019 14:51:41 +0800 Subject: [PATCH 014/138] btrfs: volumes: Unexport find_free_dev_extent_start() This function is only used locally in find_free_dev_extent(), no external callers. So unexport it. Reviewed-by: Johannes Thumshirn Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 5 +++-- fs/btrfs/volumes.h | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 662a9d14f151..8d64279d117c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1552,8 +1552,9 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start, * But if we don't find suitable free space, it is used to store the size of * the max free space. */ -int find_free_dev_extent_start(struct btrfs_device *device, u64 num_bytes, - u64 search_start, u64 *start, u64 *len) +static int find_free_dev_extent_start(struct btrfs_device *device, + u64 num_bytes, u64 search_start, u64 *start, + u64 *len) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 7f6aa1816409..c71354fe1363 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -475,8 +475,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info); int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info); int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset); -int find_free_dev_extent_start(struct btrfs_device *device, u64 num_bytes, - u64 search_start, u64 *start, u64 *max_avail); int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *max_avail); void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); From 135da9766eea6134229146f6f67fdcbba41120e1 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 19 Jul 2019 14:51:42 +0800 Subject: [PATCH 015/138] btrfs: volumes: Add comment for find_free_dev_extent_start() Since commit 6df9a95e6339 ("Btrfs: make the chunk allocator completely tree lockless") we search commit root of device tree to avoid deadlock. This introduced a safety feature, find_free_dev_extent_start() won't use dev extents which just get freed in current transaction. This safety feature makes sure we won't allocate new block group using just freed dev extents to break CoW. However, this feature also makes find_free_dev_extent_start() not reliable reporting free device space. Just add such comment to make later viewer careful about this behavior. This behavior makes one caller, btrfs_can_relocate() unreliable determining the device free space. Reviewed-by: Johannes Thumshirn Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8d64279d117c..ac16734c0f44 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1551,6 +1551,12 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start, * @len is used to store the size of the free space that we find. * But if we don't find suitable free space, it is used to store the size of * the max free space. + * + * NOTE: This function will search *commit* root of device tree, and does extra + * check to ensure dev extents are not double allocated. + * This makes the function safe to allocate dev extents but may not report + * correct usable device space, as device extent freed in current transaction + * is not reported as avaiable. */ static int find_free_dev_extent_start(struct btrfs_device *device, u64 num_bytes, u64 search_start, u64 *start, From e91381421f87403f9fd4d3d3b0143fa14d5aa85f Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 19 Jul 2019 14:51:43 +0800 Subject: [PATCH 016/138] btrfs: extent-tree: Add comment for inc_block_group_ro() inc_block_group_ro() is only designed to mark one block group read-only, it doesn't really care if other block groups have enough free space to contain the used space in the block group. However due to the close connection between this function and relocation, sometimes we can be confused and think this function is responsible for balance space reservation, which is not true. Add some comment to make the functionality clear. Reviewed-by: Johannes Thumshirn Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 8b7eb22d508a..e1a39fe183c1 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7341,6 +7341,19 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) return flags; } +/* + * Mark block group @cache read-only, so later write won't happen to block + * group @cache. + * + * If @force is not set, this function will only mark the block group readonly + * if we have enough free space (1M) in other metadata/system block groups. + * If @force is not set, this function will mark the block group readonly + * without checking free space. + * + * NOTE: This function doesn't care if other block groups can contain all the + * data in this block group. That check should be done by relocation routine, + * not this function. + */ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) { struct btrfs_space_info *sinfo = cache->space_info; @@ -7374,6 +7387,12 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) cache->bytes_super - btrfs_block_group_used(&cache->item); sinfo_used = btrfs_space_info_used(sinfo, true); + /* + * sinfo_used + num_bytes should always <= sinfo->total_bytes. + * + * Here we make sure if we mark this bg RO, we still have enough + * free space as buffer (if min_allocable_bytes is not 0). + */ if (sinfo_used + num_bytes + min_allocable_bytes <= sinfo->total_bytes) { sinfo->bytes_readonly += num_bytes; From 112974d4067ba29ae59f94e0bc79f19bf9db1a53 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 19 Jul 2019 14:51:44 +0800 Subject: [PATCH 017/138] btrfs: volumes: Remove ENOSPC-prone btrfs_can_relocate() [BUG] Test case btrfs/156 fails since commit 302167c50b32 ("btrfs: don't end the transaction for delayed refs in throttle") with ENOSPC. [CAUSE] The ENOSPC is reported from btrfs_can_relocate(). This function will check: - If this block group is empty, we can relocate - If we can enough free space, we can relocate Above checks are valid but the following check is vague due to its implementation: - If and only if we can allocated a new block group to contain all the used space, we can relocate This design itself is OK, but the way to determine if we can allocate a new block group is problematic. btrfs_can_relocate() uses find_free_dev_extent() to find free space on a device. However find_free_dev_extent() only searches commit root and excludes dev extents allocated in current trans, this makes it unable to use dev extent just freed in current transaction. So for the following example, btrfs_can_relocate() will report ENOSPC: The example block group layout: 1M 129M 257M 385M 513M 550M |///////|///////////|//////////| | | // = Used bg, consider all bg is 100% used for easy calculation. And all block groups are SINGLE, on-disk bytenr is the same as the logical bytenr. 1) Bg in [129M, 257M) get relocated to [385M, 513M), transid=100 1M 129M 257M 385M 513M 550M |///////| |//////////|/////////| In transid 100, bg in [129M, 257M) get relocated to [385M, 513M) However transid 100 is not committed yet, so in dev commit tree, we still have the old dev extents layout: 1M 129M 257M 385M 513M 550M |///////|///////////|//////////| | | 2) Try to relocate bg [257M, 385M) We goes into btrfs_can_relocate(), no free space in current bgs, so we check if we can find large enough free dev extents. The first slot is [385M, 513M), but that is already used by new bg at [385M, 513M), so we continue search. The remaining slot is [512M, 550M), smaller than the bg's length 128M. So btrfs_can_relocate report ENOSPC. However this is over killed, in fact if we just skip btrfs_can_relocate() check, and go into regular relocation routine, at extent reservation time, if we can't find free extent, then we fallback to commit transaction, which will free up the dev extents and allow new block group to be created. [FIX] The fix here is to remove btrfs_can_relocate() completely. If we hit the false ENOSPC case just like btrfs/156, extent allocator will push harder by committing transaction and we will have space for new block group, avoiding the false ENOSPC. If we really ran out of space, we will hit ENOSPC at relocate_block_group(), and btrfs will just reports the ENOSPC error as usual. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 - fs/btrfs/extent-tree.c | 141 ----------------------------------------- fs/btrfs/volumes.c | 4 -- 3 files changed, 146 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d2807b99aa97..6bb42460d7ff 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2701,7 +2701,6 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); int btrfs_free_block_groups(struct btrfs_fs_info *info); int btrfs_read_block_groups(struct btrfs_fs_info *info); -int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr); int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e1a39fe183c1..4bd88b6b4865 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7547,147 +7547,6 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) spin_unlock(&sinfo->lock); } -/* - * Checks to see if it's even possible to relocate this block group. - * - * @return - -1 if it's not a good idea to relocate this block group, 0 if its - * ok to go ahead and try. - */ -int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr) -{ - struct btrfs_block_group_cache *block_group; - struct btrfs_space_info *space_info; - struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; - struct btrfs_device *device; - u64 min_free; - u64 dev_min = 1; - u64 dev_nr = 0; - u64 target; - int debug; - int index; - int full = 0; - int ret = 0; - - debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG); - - block_group = btrfs_lookup_block_group(fs_info, bytenr); - - /* odd, couldn't find the block group, leave it alone */ - if (!block_group) { - if (debug) - btrfs_warn(fs_info, - "can't find block group for bytenr %llu", - bytenr); - return -1; - } - - min_free = btrfs_block_group_used(&block_group->item); - - /* no bytes used, we're good */ - if (!min_free) - goto out; - - space_info = block_group->space_info; - spin_lock(&space_info->lock); - - full = space_info->full; - - /* - * if this is the last block group we have in this space, we can't - * relocate it unless we're able to allocate a new chunk below. - * - * Otherwise, we need to make sure we have room in the space to handle - * all of the extents from this block group. If we can, we're good - */ - if ((space_info->total_bytes != block_group->key.offset) && - (btrfs_space_info_used(space_info, false) + min_free < - space_info->total_bytes)) { - spin_unlock(&space_info->lock); - goto out; - } - spin_unlock(&space_info->lock); - - /* - * ok we don't have enough space, but maybe we have free space on our - * devices to allocate new chunks for relocation, so loop through our - * alloc devices and guess if we have enough space. if this block - * group is going to be restriped, run checks against the target - * profile instead of the current one. - */ - ret = -1; - - /* - * index: - * 0: raid10 - * 1: raid1 - * 2: dup - * 3: raid0 - * 4: single - */ - target = get_restripe_target(fs_info, block_group->flags); - if (target) { - index = btrfs_bg_flags_to_raid_index(extended_to_chunk(target)); - } else { - /* - * this is just a balance, so if we were marked as full - * we know there is no space for a new chunk - */ - if (full) { - if (debug) - btrfs_warn(fs_info, - "no space to alloc new chunk for block group %llu", - block_group->key.objectid); - goto out; - } - - index = btrfs_bg_flags_to_raid_index(block_group->flags); - } - - if (index == BTRFS_RAID_RAID10) { - dev_min = 4; - /* Divide by 2 */ - min_free >>= 1; - } else if (index == BTRFS_RAID_RAID1) { - dev_min = 2; - } else if (index == BTRFS_RAID_DUP) { - /* Multiply by 2 */ - min_free <<= 1; - } else if (index == BTRFS_RAID_RAID0) { - dev_min = fs_devices->rw_devices; - min_free = div64_u64(min_free, dev_min); - } - - mutex_lock(&fs_info->chunk_mutex); - list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { - u64 dev_offset; - - /* - * check to make sure we can actually find a chunk with enough - * space to fit our block group in. - */ - if (device->total_bytes > device->bytes_used + min_free && - !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { - ret = find_free_dev_extent(device, min_free, - &dev_offset, NULL); - if (!ret) - dev_nr++; - - if (dev_nr >= dev_min) - break; - - ret = -1; - } - } - if (debug && ret == -1) - btrfs_warn(fs_info, - "no space to allocate a new chunk for block group %llu", - block_group->key.objectid); - mutex_unlock(&fs_info->chunk_mutex); -out: - btrfs_put_block_group(block_group); - return ret; -} - static int find_first_block_group(struct btrfs_fs_info *fs_info, struct btrfs_path *path, struct btrfs_key *key) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ac16734c0f44..ef3e5b4f88be 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3083,10 +3083,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) */ lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); - ret = btrfs_can_relocate(fs_info, chunk_offset); - if (ret) - return -ENOSPC; - /* step one, relocate all the extents inside this chunk */ btrfs_scrub_pause(fs_info); ret = btrfs_relocate_block_group(fs_info, chunk_offset); From 933c22a7512c5c09b1fdc46b557384efe8d03233 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 16 Jul 2019 17:00:32 +0800 Subject: [PATCH 018/138] btrfs: delayed-inode: Kill the BUG_ON() in btrfs_delete_delayed_dir_index() There is one report of fuzzed image which leads to BUG_ON() in btrfs_delete_delayed_dir_index(). Although that fuzzed image can already be addressed by enhanced extent-tree error handler, it's still better to hunt down more BUG_ON(). This patch will hunt down two BUG_ON()s in btrfs_delete_delayed_dir_index(): - One for error from btrfs_delayed_item_reserve_metadata() Instead of BUG_ON(), we output an error message and free the item. And return the error. All callers of this function handles the error by aborting current trasaction. - One for possible EEXIST from __btrfs_add_delayed_deletion_item() That function can return -EEXIST. We already have a good enough error message for that, only need to clean up the reserved metadata space and allocated item. To help above cleanup, also modifiy __btrfs_remove_delayed_item() called in btrfs_release_delayed_item(), to skip unassociated item. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203253 Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/delayed-inode.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 43fdb2992956..6858a05606dd 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -474,6 +474,9 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) struct rb_root_cached *root; struct btrfs_delayed_root *delayed_root; + /* Not associated with any delayed_node */ + if (!delayed_item->delayed_node) + return; delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; BUG_ON(!delayed_root); @@ -1525,7 +1528,12 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, * we have reserved enough space when we start a new transaction, * so reserving metadata failure is impossible. */ - BUG_ON(ret); + if (ret < 0) { + btrfs_err(trans->fs_info, +"metadata reservation failed for delayed dir item deltiona, should have been reserved"); + btrfs_release_delayed_item(item); + goto end; + } mutex_lock(&node->mutex); ret = __btrfs_add_delayed_deletion_item(node, item); @@ -1534,7 +1542,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)", index, node->root->root_key.objectid, node->inode_id, ret); - BUG(); + btrfs_delayed_item_release_metadata(dir->root, item); + btrfs_release_delayed_item(item); } mutex_unlock(&node->mutex); end: From 2a28468e525f3924efed7f29f2bc5a2926e7e19a Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 16 Jul 2019 17:00:33 +0800 Subject: [PATCH 019/138] btrfs: extent-tree: Make sure we only allocate extents from block groups with the same type [BUG] With fuzzed image and MIXED_GROUPS super flag, we can hit the following BUG_ON(): kernel BUG at fs/btrfs/delayed-ref.c:491! invalid opcode: 0000 [#1] PREEMPT SMP NOPTI CPU: 0 PID: 1849 Comm: sync Tainted: G O 5.2.0-custom #27 RIP: 0010:update_existing_head_ref.cold+0x44/0x46 [btrfs] Call Trace: add_delayed_ref_head+0x20c/0x2d0 [btrfs] btrfs_add_delayed_tree_ref+0x1fc/0x490 [btrfs] btrfs_free_tree_block+0x123/0x380 [btrfs] __btrfs_cow_block+0x435/0x500 [btrfs] btrfs_cow_block+0x110/0x240 [btrfs] btrfs_search_slot+0x230/0xa00 [btrfs] ? __lock_acquire+0x105e/0x1e20 btrfs_insert_empty_items+0x67/0xc0 [btrfs] alloc_reserved_file_extent+0x9e/0x340 [btrfs] __btrfs_run_delayed_refs+0x78e/0x1240 [btrfs] ? kvm_clock_read+0x18/0x30 ? __sched_clock_gtod_offset+0x21/0x50 btrfs_run_delayed_refs.part.0+0x4e/0x180 [btrfs] btrfs_run_delayed_refs+0x23/0x30 [btrfs] btrfs_commit_transaction+0x53/0x9f0 [btrfs] btrfs_sync_fs+0x7c/0x1c0 [btrfs] ? __ia32_sys_fdatasync+0x20/0x20 sync_fs_one_sb+0x23/0x30 iterate_supers+0x95/0x100 ksys_sync+0x62/0xb0 __ia32_sys_sync+0xe/0x20 do_syscall_64+0x65/0x240 entry_SYSCALL_64_after_hwframe+0x49/0xbe [CAUSE] This situation is caused by several factors: - Fuzzed image The extent tree of this fs missed one backref for extent tree root. So we can allocated space from that slot. - MIXED_BG feature Super block has MIXED_BG flag. - No mixed block groups exists All block groups are just regular ones. This makes data space_info->block_groups[] contains metadata block groups. And when we reserve space for data, we can use space in metadata block group. Then we hit the following file operations: - fallocate We need to allocate data extents. find_free_extent() choose to use the metadata block to allocate space from, and choose the space of extent tree root, since its backref is missing. This generate one delayed ref head with is_data = 1. - extent tree update We need to update extent tree at run_delayed_ref time. This generate one delayed ref head with is_data = 0, for the same bytenr of old extent tree root. Then we trigger the BUG_ON(). [FIX] The quick fix here is to check block_group->flags before using it. The problem can only happen for MIXED_GROUPS fs. Regular filesystems won't have space_info with DATA|METADATA flag, and no way to hit the bug. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203255 Reported-by: Jungyeon Yoon Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4bd88b6b4865..0bb095bda01b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5751,6 +5751,14 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, */ if ((flags & extra) && !(block_group->flags & extra)) goto loop; + + /* + * This block group has different flags than we want. + * It's possible that we have MIXED_GROUP flag but no + * block group is mixed. Just skip such block group. + */ + btrfs_release_block_group(block_group, delalloc); + continue; } have_block_group: From 259ee7754b6793af8bdd77f9ca818bc41cfe9541 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 16 Jul 2019 17:00:34 +0800 Subject: [PATCH 020/138] btrfs: tree-checker: Add ROOT_ITEM check This patch will introduce ROOT_ITEM check, which includes: - Key->objectid and key->offset check Currently only some easy check, e.g. 0 as rootid is invalid. - Item size check Root item size is fixed. - Generation checks Generation, generation_v2 and last_snapshot should not be greater than super generation + 1 - Level and alignment check Level should be in [0, 7], and bytenr must be aligned to sector size. - Flags check Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203261 Reported-by: Jungyeon Yoon Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 92 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index ccd5706199d7..d83adda6c090 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -821,6 +821,95 @@ static int check_inode_item(struct extent_buffer *leaf, return 0; } +static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, + int slot) +{ + struct btrfs_fs_info *fs_info = leaf->fs_info; + struct btrfs_root_item ri; + const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY | + BTRFS_ROOT_SUBVOL_DEAD; + + /* No such tree id */ + if (key->objectid == 0) { + generic_err(leaf, slot, "invalid root id 0"); + return -EUCLEAN; + } + + /* + * Some older kernel may create ROOT_ITEM with non-zero offset, so here + * we only check offset for reloc tree whose key->offset must be a + * valid tree. + */ + if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) { + generic_err(leaf, slot, "invalid root id 0 for reloc tree"); + return -EUCLEAN; + } + + if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) { + generic_err(leaf, slot, + "invalid root item size, have %u expect %zu", + btrfs_item_size_nr(leaf, slot), sizeof(ri)); + } + + read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot), + sizeof(ri)); + + /* Generation related */ + if (btrfs_root_generation(&ri) > + btrfs_super_generation(fs_info->super_copy) + 1) { + generic_err(leaf, slot, + "invalid root generation, have %llu expect (0, %llu]", + btrfs_root_generation(&ri), + btrfs_super_generation(fs_info->super_copy) + 1); + return -EUCLEAN; + } + if (btrfs_root_generation_v2(&ri) > + btrfs_super_generation(fs_info->super_copy) + 1) { + generic_err(leaf, slot, + "invalid root v2 generation, have %llu expect (0, %llu]", + btrfs_root_generation_v2(&ri), + btrfs_super_generation(fs_info->super_copy) + 1); + return -EUCLEAN; + } + if (btrfs_root_last_snapshot(&ri) > + btrfs_super_generation(fs_info->super_copy) + 1) { + generic_err(leaf, slot, + "invalid root last_snapshot, have %llu expect (0, %llu]", + btrfs_root_last_snapshot(&ri), + btrfs_super_generation(fs_info->super_copy) + 1); + return -EUCLEAN; + } + + /* Alignment and level check */ + if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) { + generic_err(leaf, slot, + "invalid root bytenr, have %llu expect to be aligned to %u", + btrfs_root_bytenr(&ri), fs_info->sectorsize); + return -EUCLEAN; + } + if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) { + generic_err(leaf, slot, + "invalid root level, have %u expect [0, %u]", + btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1); + return -EUCLEAN; + } + if (ri.drop_level >= BTRFS_MAX_LEVEL) { + generic_err(leaf, slot, + "invalid root level, have %u expect [0, %u]", + ri.drop_level, BTRFS_MAX_LEVEL - 1); + return -EUCLEAN; + } + + /* Flags check */ + if (btrfs_root_flags(&ri) & ~valid_root_flags) { + generic_err(leaf, slot, + "invalid root flags, have 0x%llx expect mask 0x%llx", + btrfs_root_flags(&ri), valid_root_flags); + return -EUCLEAN; + } + return 0; +} + /* * Common point to switch the item-specific validation. */ @@ -856,6 +945,9 @@ static int check_leaf_item(struct extent_buffer *leaf, case BTRFS_INODE_ITEM_KEY: ret = check_inode_item(leaf, key, slot); break; + case BTRFS_ROOT_ITEM_KEY: + ret = check_root_item(leaf, key, slot); + break; } return ret; } From 7764d56baa844d7f6206394f21a0e8c1f303c476 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 4 Jul 2019 16:24:09 +0100 Subject: [PATCH 021/138] Btrfs: fix hang when loading existing inode cache off disk If we are able to load an existing inode cache off disk, we set the state of the cache to BTRFS_CACHE_FINISHED, but we don't wake up any one waiting for the cache to be available. This means that anyone waiting for the cache to be available, waiting on the condition that either its state is BTRFS_CACHE_FINISHED or its available free space is greather than zero, can hang forever. This could be observed running fstests with MOUNT_OPTIONS="-o inode_cache", in particular test case generic/161 triggered it very frequently for me, producing a trace like the following: [63795.739712] BTRFS info (device sdc): enabling inode map caching [63795.739714] BTRFS info (device sdc): disk space caching is enabled [63795.739716] BTRFS info (device sdc): has skinny extents [64036.653886] INFO: task btrfs-transacti:3917 blocked for more than 120 seconds. [64036.654079] Not tainted 5.2.0-rc4-btrfs-next-50 #1 [64036.654143] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [64036.654232] btrfs-transacti D 0 3917 2 0x80004000 [64036.654239] Call Trace: [64036.654258] ? __schedule+0x3ae/0x7b0 [64036.654271] schedule+0x3a/0xb0 [64036.654325] btrfs_commit_transaction+0x978/0xae0 [btrfs] [64036.654339] ? remove_wait_queue+0x60/0x60 [64036.654395] transaction_kthread+0x146/0x180 [btrfs] [64036.654450] ? btrfs_cleanup_transaction+0x620/0x620 [btrfs] [64036.654456] kthread+0x103/0x140 [64036.654464] ? kthread_create_worker_on_cpu+0x70/0x70 [64036.654476] ret_from_fork+0x3a/0x50 [64036.654504] INFO: task xfs_io:3919 blocked for more than 120 seconds. [64036.654568] Not tainted 5.2.0-rc4-btrfs-next-50 #1 [64036.654617] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [64036.654685] xfs_io D 0 3919 3633 0x00000000 [64036.654691] Call Trace: [64036.654703] ? __schedule+0x3ae/0x7b0 [64036.654716] schedule+0x3a/0xb0 [64036.654756] btrfs_find_free_ino+0xa9/0x120 [btrfs] [64036.654764] ? remove_wait_queue+0x60/0x60 [64036.654809] btrfs_create+0x72/0x1f0 [btrfs] [64036.654822] lookup_open+0x6bc/0x790 [64036.654849] path_openat+0x3bc/0xc00 [64036.654854] ? __lock_acquire+0x331/0x1cb0 [64036.654869] do_filp_open+0x99/0x110 [64036.654884] ? __alloc_fd+0xee/0x200 [64036.654895] ? do_raw_spin_unlock+0x49/0xc0 [64036.654909] ? do_sys_open+0x132/0x220 [64036.654913] do_sys_open+0x132/0x220 [64036.654926] do_syscall_64+0x60/0x1d0 [64036.654933] entry_SYSCALL_64_after_hwframe+0x49/0xbe Fix this by adding a wake_up() call right after setting the cache state to BTRFS_CACHE_FINISHED, at start_caching(), when we are able to load the cache from disk. Fixes: 82d5902d9c681b ("Btrfs: Support reading/writing on disk free ino cache") Reviewed-by: Nikolay Borisov Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/inode-map.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 2e8bb402050b..84b2c9ee52a7 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -146,6 +146,7 @@ static void start_caching(struct btrfs_root *root) spin_lock(&root->ino_cache_lock); root->ino_cache_state = BTRFS_CACHE_FINISHED; spin_unlock(&root->ino_cache_lock); + wake_up(&root->ino_cache_wait); return; } From 29d47d00e0ae61668ee0c5d90bef2893c8abbafa Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 4 Jul 2019 16:24:19 +0100 Subject: [PATCH 022/138] Btrfs: fix inode cache block reserve leak on failure to allocate data space If we failed to allocate the data extent(s) for the inode space cache, we were bailing out without releasing the previously reserved metadata. This was triggering the following warnings when unmounting a filesystem: $ cat -n fs/btrfs/inode.c (...) 9268 void btrfs_destroy_inode(struct inode *inode) 9269 { (...) 9276 WARN_ON(BTRFS_I(inode)->block_rsv.reserved); 9277 WARN_ON(BTRFS_I(inode)->block_rsv.size); (...) 9281 WARN_ON(BTRFS_I(inode)->csum_bytes); 9282 WARN_ON(BTRFS_I(inode)->defrag_bytes); (...) Several fstests test cases triggered this often, such as generic/083, generic/102, generic/172, generic/269 and generic/300 at least, producing stack traces like the following in dmesg/syslog: [82039.079546] WARNING: CPU: 2 PID: 13167 at fs/btrfs/inode.c:9276 btrfs_destroy_inode+0x203/0x270 [btrfs] (...) [82039.081543] CPU: 2 PID: 13167 Comm: umount Tainted: G W 5.2.0-rc4-btrfs-next-50 #1 [82039.081912] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.11.2-0-gf9626ccb91-prebuilt.qemu-project.org 04/01/2014 [82039.082673] RIP: 0010:btrfs_destroy_inode+0x203/0x270 [btrfs] (...) [82039.083913] RSP: 0018:ffffac0b426a7d30 EFLAGS: 00010206 [82039.084320] RAX: ffff8ddf77691158 RBX: ffff8dde29b34660 RCX: 0000000000000002 [82039.084736] RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff8dde29b34660 [82039.085156] RBP: ffff8ddf5fbec000 R08: 0000000000000000 R09: 0000000000000000 [82039.085578] R10: ffffac0b426a7c90 R11: ffffffffb9aad768 R12: ffffac0b426a7db0 [82039.086000] R13: ffff8ddf5fbec0a0 R14: dead000000000100 R15: 0000000000000000 [82039.086416] FS: 00007f8db96d12c0(0000) GS:ffff8de036b00000(0000) knlGS:0000000000000000 [82039.086837] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [82039.087253] CR2: 0000000001416108 CR3: 00000002315cc001 CR4: 00000000003606e0 [82039.087672] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [82039.088089] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [82039.088504] Call Trace: [82039.088918] destroy_inode+0x3b/0x70 [82039.089340] btrfs_free_fs_root+0x16/0xa0 [btrfs] [82039.089768] btrfs_free_fs_roots+0xd8/0x160 [btrfs] [82039.090183] ? wait_for_completion+0x65/0x1a0 [82039.090607] close_ctree+0x172/0x370 [btrfs] [82039.091021] generic_shutdown_super+0x6c/0x110 [82039.091427] kill_anon_super+0xe/0x30 [82039.091832] btrfs_kill_super+0x12/0xa0 [btrfs] [82039.092233] deactivate_locked_super+0x3a/0x70 [82039.092636] cleanup_mnt+0x3b/0x80 [82039.093039] task_work_run+0x93/0xc0 [82039.093457] exit_to_usermode_loop+0xfa/0x100 [82039.093856] do_syscall_64+0x162/0x1d0 [82039.094244] entry_SYSCALL_64_after_hwframe+0x49/0xbe [82039.094634] RIP: 0033:0x7f8db8fbab37 (...) [82039.095876] RSP: 002b:00007ffdce35b468 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6 [82039.096290] RAX: 0000000000000000 RBX: 0000560d20b00060 RCX: 00007f8db8fbab37 [82039.096700] RDX: 0000000000000001 RSI: 0000000000000000 RDI: 0000560d20b00240 [82039.097110] RBP: 0000560d20b00240 R08: 0000560d20b00270 R09: 0000000000000015 [82039.097522] R10: 00000000000006b4 R11: 0000000000000246 R12: 00007f8db94bce64 [82039.097937] R13: 0000000000000000 R14: 0000000000000000 R15: 00007ffdce35b6f0 [82039.098350] irq event stamp: 0 [82039.098750] hardirqs last enabled at (0): [<0000000000000000>] 0x0 [82039.099150] hardirqs last disabled at (0): [] copy_process.part.33+0x7f2/0x1f00 [82039.099545] softirqs last enabled at (0): [] copy_process.part.33+0x7f2/0x1f00 [82039.099925] softirqs last disabled at (0): [<0000000000000000>] 0x0 [82039.100292] ---[ end trace f2521afa616ddccc ]--- [82039.100707] WARNING: CPU: 2 PID: 13167 at fs/btrfs/inode.c:9277 btrfs_destroy_inode+0x1ac/0x270 [btrfs] (...) [82039.103050] CPU: 2 PID: 13167 Comm: umount Tainted: G W 5.2.0-rc4-btrfs-next-50 #1 [82039.103428] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.11.2-0-gf9626ccb91-prebuilt.qemu-project.org 04/01/2014 [82039.104203] RIP: 0010:btrfs_destroy_inode+0x1ac/0x270 [btrfs] (...) [82039.105461] RSP: 0018:ffffac0b426a7d30 EFLAGS: 00010206 [82039.105866] RAX: ffff8ddf77691158 RBX: ffff8dde29b34660 RCX: 0000000000000002 [82039.106270] RDX: 0000000000000000 RSI: 0000000000000001 RDI: ffff8dde29b34660 [82039.106673] RBP: ffff8ddf5fbec000 R08: 0000000000000000 R09: 0000000000000000 [82039.107078] R10: ffffac0b426a7c90 R11: ffffffffb9aad768 R12: ffffac0b426a7db0 [82039.107487] R13: ffff8ddf5fbec0a0 R14: dead000000000100 R15: 0000000000000000 [82039.107894] FS: 00007f8db96d12c0(0000) GS:ffff8de036b00000(0000) knlGS:0000000000000000 [82039.108309] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [82039.108723] CR2: 0000000001416108 CR3: 00000002315cc001 CR4: 00000000003606e0 [82039.109146] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [82039.109567] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [82039.109989] Call Trace: [82039.110405] destroy_inode+0x3b/0x70 [82039.110830] btrfs_free_fs_root+0x16/0xa0 [btrfs] [82039.111257] btrfs_free_fs_roots+0xd8/0x160 [btrfs] [82039.111675] ? wait_for_completion+0x65/0x1a0 [82039.112101] close_ctree+0x172/0x370 [btrfs] [82039.112519] generic_shutdown_super+0x6c/0x110 [82039.112988] kill_anon_super+0xe/0x30 [82039.113439] btrfs_kill_super+0x12/0xa0 [btrfs] [82039.113861] deactivate_locked_super+0x3a/0x70 [82039.114278] cleanup_mnt+0x3b/0x80 [82039.114685] task_work_run+0x93/0xc0 [82039.115083] exit_to_usermode_loop+0xfa/0x100 [82039.115476] do_syscall_64+0x162/0x1d0 [82039.115863] entry_SYSCALL_64_after_hwframe+0x49/0xbe [82039.116254] RIP: 0033:0x7f8db8fbab37 (...) [82039.117463] RSP: 002b:00007ffdce35b468 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6 [82039.117882] RAX: 0000000000000000 RBX: 0000560d20b00060 RCX: 00007f8db8fbab37 [82039.118330] RDX: 0000000000000001 RSI: 0000000000000000 RDI: 0000560d20b00240 [82039.118743] RBP: 0000560d20b00240 R08: 0000560d20b00270 R09: 0000000000000015 [82039.119159] R10: 00000000000006b4 R11: 0000000000000246 R12: 00007f8db94bce64 [82039.119574] R13: 0000000000000000 R14: 0000000000000000 R15: 00007ffdce35b6f0 [82039.119987] irq event stamp: 0 [82039.120387] hardirqs last enabled at (0): [<0000000000000000>] 0x0 [82039.120787] hardirqs last disabled at (0): [] copy_process.part.33+0x7f2/0x1f00 [82039.121182] softirqs last enabled at (0): [] copy_process.part.33+0x7f2/0x1f00 [82039.121563] softirqs last disabled at (0): [<0000000000000000>] 0x0 [82039.121933] ---[ end trace f2521afa616ddccd ]--- [82039.122353] WARNING: CPU: 2 PID: 13167 at fs/btrfs/inode.c:9278 btrfs_destroy_inode+0x1bc/0x270 [btrfs] (...) [82039.124606] CPU: 2 PID: 13167 Comm: umount Tainted: G W 5.2.0-rc4-btrfs-next-50 #1 [82039.125008] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.11.2-0-gf9626ccb91-prebuilt.qemu-project.org 04/01/2014 [82039.125801] RIP: 0010:btrfs_destroy_inode+0x1bc/0x270 [btrfs] (...) [82039.126998] RSP: 0018:ffffac0b426a7d30 EFLAGS: 00010202 [82039.127399] RAX: ffff8ddf77691158 RBX: ffff8dde29b34660 RCX: 0000000000000002 [82039.127803] RDX: 0000000000000001 RSI: 0000000000000001 RDI: ffff8dde29b34660 [82039.128206] RBP: ffff8ddf5fbec000 R08: 0000000000000000 R09: 0000000000000000 [82039.128611] R10: ffffac0b426a7c90 R11: ffffffffb9aad768 R12: ffffac0b426a7db0 [82039.129020] R13: ffff8ddf5fbec0a0 R14: dead000000000100 R15: 0000000000000000 [82039.129428] FS: 00007f8db96d12c0(0000) GS:ffff8de036b00000(0000) knlGS:0000000000000000 [82039.129846] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [82039.130261] CR2: 0000000001416108 CR3: 00000002315cc001 CR4: 00000000003606e0 [82039.130684] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [82039.131142] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [82039.131561] Call Trace: [82039.131990] destroy_inode+0x3b/0x70 [82039.132417] btrfs_free_fs_root+0x16/0xa0 [btrfs] [82039.132844] btrfs_free_fs_roots+0xd8/0x160 [btrfs] [82039.133262] ? wait_for_completion+0x65/0x1a0 [82039.133688] close_ctree+0x172/0x370 [btrfs] [82039.134157] generic_shutdown_super+0x6c/0x110 [82039.134575] kill_anon_super+0xe/0x30 [82039.134997] btrfs_kill_super+0x12/0xa0 [btrfs] [82039.135415] deactivate_locked_super+0x3a/0x70 [82039.135832] cleanup_mnt+0x3b/0x80 [82039.136239] task_work_run+0x93/0xc0 [82039.136637] exit_to_usermode_loop+0xfa/0x100 [82039.137029] do_syscall_64+0x162/0x1d0 [82039.137418] entry_SYSCALL_64_after_hwframe+0x49/0xbe [82039.137812] RIP: 0033:0x7f8db8fbab37 (...) [82039.139059] RSP: 002b:00007ffdce35b468 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6 [82039.139475] RAX: 0000000000000000 RBX: 0000560d20b00060 RCX: 00007f8db8fbab37 [82039.139890] RDX: 0000000000000001 RSI: 0000000000000000 RDI: 0000560d20b00240 [82039.140302] RBP: 0000560d20b00240 R08: 0000560d20b00270 R09: 0000000000000015 [82039.140719] R10: 00000000000006b4 R11: 0000000000000246 R12: 00007f8db94bce64 [82039.141138] R13: 0000000000000000 R14: 0000000000000000 R15: 00007ffdce35b6f0 [82039.141597] irq event stamp: 0 [82039.142043] hardirqs last enabled at (0): [<0000000000000000>] 0x0 [82039.142443] hardirqs last disabled at (0): [] copy_process.part.33+0x7f2/0x1f00 [82039.142839] softirqs last enabled at (0): [] copy_process.part.33+0x7f2/0x1f00 [82039.143220] softirqs last disabled at (0): [<0000000000000000>] 0x0 [82039.143588] ---[ end trace f2521afa616ddcce ]--- [82039.167472] WARNING: CPU: 3 PID: 13167 at fs/btrfs/extent-tree.c:10120 btrfs_free_block_groups+0x30d/0x460 [btrfs] (...) [82039.173800] CPU: 3 PID: 13167 Comm: umount Tainted: G W 5.2.0-rc4-btrfs-next-50 #1 [82039.174847] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.11.2-0-gf9626ccb91-prebuilt.qemu-project.org 04/01/2014 [82039.177031] RIP: 0010:btrfs_free_block_groups+0x30d/0x460 [btrfs] (...) [82039.180397] RSP: 0018:ffffac0b426a7dd8 EFLAGS: 00010206 [82039.181574] RAX: ffff8de010a1db40 RBX: ffff8de010a1db40 RCX: 0000000000170014 [82039.182711] RDX: ffff8ddff4380040 RSI: ffff8de010a1da58 RDI: 0000000000000246 [82039.183817] RBP: ffff8ddf5fbec000 R08: 0000000000000000 R09: 0000000000000000 [82039.184925] R10: ffff8de036404380 R11: ffffffffb8a5ea00 R12: ffff8de010a1b2b8 [82039.186090] R13: ffff8de010a1b2b8 R14: 0000000000000000 R15: dead000000000100 [82039.187208] FS: 00007f8db96d12c0(0000) GS:ffff8de036b80000(0000) knlGS:0000000000000000 [82039.188345] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [82039.189481] CR2: 00007fb044005170 CR3: 00000002315cc006 CR4: 00000000003606e0 [82039.190674] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [82039.191829] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [82039.192978] Call Trace: [82039.194160] close_ctree+0x19a/0x370 [btrfs] [82039.195315] generic_shutdown_super+0x6c/0x110 [82039.196486] kill_anon_super+0xe/0x30 [82039.197645] btrfs_kill_super+0x12/0xa0 [btrfs] [82039.198696] deactivate_locked_super+0x3a/0x70 [82039.199619] cleanup_mnt+0x3b/0x80 [82039.200559] task_work_run+0x93/0xc0 [82039.201505] exit_to_usermode_loop+0xfa/0x100 [82039.202436] do_syscall_64+0x162/0x1d0 [82039.203339] entry_SYSCALL_64_after_hwframe+0x49/0xbe [82039.204091] RIP: 0033:0x7f8db8fbab37 (...) [82039.206360] RSP: 002b:00007ffdce35b468 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6 [82039.207132] RAX: 0000000000000000 RBX: 0000560d20b00060 RCX: 00007f8db8fbab37 [82039.207906] RDX: 0000000000000001 RSI: 0000000000000000 RDI: 0000560d20b00240 [82039.208621] RBP: 0000560d20b00240 R08: 0000560d20b00270 R09: 0000000000000015 [82039.209285] R10: 00000000000006b4 R11: 0000000000000246 R12: 00007f8db94bce64 [82039.209984] R13: 0000000000000000 R14: 0000000000000000 R15: 00007ffdce35b6f0 [82039.210642] irq event stamp: 0 [82039.211306] hardirqs last enabled at (0): [<0000000000000000>] 0x0 [82039.211971] hardirqs last disabled at (0): [] copy_process.part.33+0x7f2/0x1f00 [82039.212643] softirqs last enabled at (0): [] copy_process.part.33+0x7f2/0x1f00 [82039.213304] softirqs last disabled at (0): [<0000000000000000>] 0x0 [82039.213875] ---[ end trace f2521afa616ddccf ]--- Fix this by releasing the reserved metadata on failure to allocate data extent(s) for the inode cache. Fixes: 69fe2d75dd91d0 ("btrfs: make the delalloc block rsv per inode") Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/inode-map.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 84b2c9ee52a7..45db4fb4b959 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -486,6 +486,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, prealloc, prealloc, &alloc_hint); if (ret) { btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true); + btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true); goto out_put; } From a68ebe0790fc88b4314d17984a2cf99ce2361901 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 4 Jul 2019 16:24:32 +0100 Subject: [PATCH 023/138] Btrfs: fix inode cache waiters hanging on failure to start caching thread If we fail to start the inode caching thread, we print an error message and disable the inode cache, however we never wake up any waiters, so they hang forever waiting for the caching to finish. Fix this by waking them up and have them fallback to a call to btrfs_find_free_objectid(). Fixes: e60efa84252c05 ("Btrfs: avoid triggering bug_on() when we fail to start inode caching task") Reviewed-by: Nikolay Borisov Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/inode-map.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 45db4fb4b959..c242ae434355 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -13,6 +13,19 @@ #include "transaction.h" #include "delalloc-space.h" +static void fail_caching_thread(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + btrfs_warn(fs_info, "failed to start inode caching task"); + btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE, + "disabling inode map caching"); + spin_lock(&root->ino_cache_lock); + root->ino_cache_state = BTRFS_CACHE_ERROR; + spin_unlock(&root->ino_cache_lock); + wake_up(&root->ino_cache_wait); +} + static int caching_kthread(void *data) { struct btrfs_root *root = data; @@ -165,11 +178,8 @@ static void start_caching(struct btrfs_root *root) tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu", root->root_key.objectid); - if (IS_ERR(tsk)) { - btrfs_warn(fs_info, "failed to start inode caching task"); - btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE, - "disabling inode map caching"); - } + if (IS_ERR(tsk)) + fail_caching_thread(root); } int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) @@ -187,11 +197,14 @@ int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) wait_event(root->ino_cache_wait, root->ino_cache_state == BTRFS_CACHE_FINISHED || + root->ino_cache_state == BTRFS_CACHE_ERROR || root->free_ino_ctl->free_space > 0); if (root->ino_cache_state == BTRFS_CACHE_FINISHED && root->free_ino_ctl->free_space == 0) return -ENOSPC; + else if (root->ino_cache_state == BTRFS_CACHE_ERROR) + return btrfs_find_free_objectid(root, objectid); else goto again; } From 9d123a35d7e97bb2139747b16127c9b22b6a593e Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 4 Jul 2019 16:24:44 +0100 Subject: [PATCH 024/138] Btrfs: fix inode cache waiters hanging on path allocation failure If the caching thread fails to allocate a path, it returns without waking up any cache waiters, leaving them hang forever. Fix this by following the same approach as when we fail to start the caching thread: print an error message, disable inode caching and make the wakers fallback to non-caching mode behaviour (calling btrfs_find_free_objectid()). Fixes: 581bb050941b4f ("Btrfs: Cache free inode numbers in memory") Reviewed-by: Nikolay Borisov Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/inode-map.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c242ae434355..c692c8b6e371 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -42,8 +42,10 @@ static int caching_kthread(void *data) return 0; path = btrfs_alloc_path(); - if (!path) + if (!path) { + fail_caching_thread(root); return -ENOMEM; + } /* Since the commit root is read-only, we can safely skip locking. */ path->skip_locking = 1; From 32e534402ad52e9f35c32be68e25213db722892b Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 4 Jul 2019 16:25:00 +0100 Subject: [PATCH 025/138] Btrfs: wake up inode cache waiters sooner to reduce waiting time If we need to start an inode caching thread, because none currently exists on disk, we can wake up all waiters as soon as we mark the range starting at root's highest objectid + 1 and ending at BTRFS_LAST_FREE_OBJECTID as free, so that they don't need to wait for the caching thread to start and do some progress. We follow the same approach within the caching thread, since as soon as it finds a free range and marks it as free space in the cache, it wakes up all waiters. So improve this by adding such a wakeup call after marking that initial range as free space. Fixes: a47d6b70e28040 ("Btrfs: setup free ino caching in a more asynchronous way") Reviewed-by: Nikolay Borisov Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/inode-map.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c692c8b6e371..86031cdfc356 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -176,6 +176,7 @@ static void start_caching(struct btrfs_root *root) if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) { __btrfs_add_free_space(fs_info, ctl, objectid, BTRFS_LAST_FREE_OBJECTID - objectid + 1); + wake_up(&root->ino_cache_wait); } tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu", From e678934cbe5f026c2765a1da651e61daa5724fb3 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 23 May 2019 14:51:26 +0300 Subject: [PATCH 026/138] btrfs: Remove unnecessary check from join_running_log_trans join_running_log_trans checks btrfs_root::log_root outside of btrfs_root::log_mutex to avoid contention on the mutex. Turns out this check is not necessary because the two callers of join_running_log_trans (both of which deal with removing entries from the tree-log during unlink) explicitly check whether the respective inode has been logged in the current transaction. If it hasn't then it won't have any items in the tree-log and call path will return before calling join_running_log_trans. If the check passes, however, then it's guaranteed that btrfs_root::log_root is set because the inode is logged. Those guarantees allows us to remove the speculative as well as the implicity and tricky memory barrier. Reviewed-by: Filipe Manana Signed-off-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 6c8297bcfeb7..188e6f79ab4e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -188,10 +188,6 @@ static int join_running_log_trans(struct btrfs_root *root) { int ret = -ENOENT; - smp_mb(); - if (!root->log_root) - return -ENOENT; - mutex_lock(&root->log_mutex); if (root->log_root) { ret = 0; From 559ca6ea695e3dedc7b6a11ce31fcc6eac398797 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 3 Jul 2019 15:32:59 +0300 Subject: [PATCH 027/138] btrfs: Refactor btrfs_calc_avail_data_space Simplify the code by removing variables that don't bring any real value as well as simplifying the checks when buidling the candidate list of devices. No functional changes. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/super.c | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 78de9d5d80c6..10bc7e6cca75 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1899,11 +1899,10 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, struct btrfs_device_info *devices_info; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; - u64 skip_space; u64 type; u64 avail_space; u64 min_stripe_size; - int min_stripes, num_stripes = 1; + int num_stripes = 1; int i = 0, nr_devices; const struct btrfs_raid_attr *rattr; @@ -1930,7 +1929,6 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, /* calc min stripe number for data space allocation */ type = btrfs_data_alloc_profile(fs_info); rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)]; - min_stripes = rattr->devs_min; if (type & BTRFS_BLOCK_GROUP_RAID0) num_stripes = nr_devices; @@ -1956,28 +1954,21 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, avail_space = device->total_bytes - device->bytes_used; /* align with stripe_len */ - avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN); - avail_space *= BTRFS_STRIPE_LEN; + avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN); /* * In order to avoid overwriting the superblock on the drive, * btrfs starts at an offset of at least 1MB when doing chunk * allocation. + * + * This ensures we have at least min_stripe_size free space + * after excluding 1MB. */ - skip_space = SZ_1M; - - /* - * we can use the free space in [0, skip_space - 1], subtract - * it from the total. - */ - if (avail_space && avail_space >= skip_space) - avail_space -= skip_space; - else - avail_space = 0; - - if (avail_space < min_stripe_size) + if (avail_space <= SZ_1M + min_stripe_size) continue; + avail_space -= SZ_1M; + devices_info[i].dev = device; devices_info[i].max_avail = avail_space; @@ -1991,9 +1982,8 @@ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, i = nr_devices - 1; avail_space = 0; - while (nr_devices >= min_stripes) { - if (num_stripes > nr_devices) - num_stripes = nr_devices; + while (nr_devices >= rattr->devs_min) { + num_stripes = min(num_stripes, nr_devices); if (devices_info[i].max_avail >= min_stripe_size) { int j; From b64119b5f0438da68a0927e4200c70d7bf8fbb60 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 2 Jul 2019 15:23:07 +0100 Subject: [PATCH 028/138] Btrfs: remove unnecessary condition in btrfs_clone() to avoid too much nesting The bulk of the work done when cloning extents, at ioctl.c:btrfs_clone(), is done inside an if statement that checks if the found key has the type BTRFS_EXTENT_DATA_KEY. That if statement is redundant however, because btrfs_search_slot() always leaves us in a leaf slot that points to a key that is always greater then or equals to the search key, and our search key here has that type, and because we bail out before that if statement if the key at the given leaf slot is greater then BTRFS_EXTENT_DATA_KEY. Therefore just remove that if statement, not only because it is useless but mostly because it increases the nesting/indentation level in this function which is quite big and makes things a bit awkward whenever I need to fix something that requires changing btrfs_clone() (and it has been like that for many years already). Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 288 ++++++++++++++++++++++------------------------- 1 file changed, 137 insertions(+), 151 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 9eaf78d7b8eb..ccac62d40dd2 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3516,6 +3516,14 @@ static int btrfs_clone(struct inode *src, struct inode *inode, while (1) { u64 next_key_min_offset = key.offset + 1; + struct btrfs_file_extent_item *extent; + int type; + u32 size; + struct btrfs_key new_key; + u64 disko = 0, diskl = 0; + u64 datao = 0, datal = 0; + u8 comp; + u64 drop_start; /* * note the key will change type as we walk through the @@ -3556,169 +3564,147 @@ static int btrfs_clone(struct inode *src, struct inode *inode, key.objectid != btrfs_ino(BTRFS_I(src))) break; - if (key.type == BTRFS_EXTENT_DATA_KEY) { - struct btrfs_file_extent_item *extent; - int type; - u32 size; - struct btrfs_key new_key; - u64 disko = 0, diskl = 0; - u64 datao = 0, datal = 0; - u8 comp; - u64 drop_start; + ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); - extent = btrfs_item_ptr(leaf, slot, - struct btrfs_file_extent_item); - comp = btrfs_file_extent_compression(leaf, extent); - type = btrfs_file_extent_type(leaf, extent); - if (type == BTRFS_FILE_EXTENT_REG || - type == BTRFS_FILE_EXTENT_PREALLOC) { - disko = btrfs_file_extent_disk_bytenr(leaf, - extent); - diskl = btrfs_file_extent_disk_num_bytes(leaf, - extent); - datao = btrfs_file_extent_offset(leaf, extent); - datal = btrfs_file_extent_num_bytes(leaf, - extent); - } else if (type == BTRFS_FILE_EXTENT_INLINE) { - /* take upper bound, may be compressed */ - datal = btrfs_file_extent_ram_bytes(leaf, - extent); - } + extent = btrfs_item_ptr(leaf, slot, + struct btrfs_file_extent_item); + comp = btrfs_file_extent_compression(leaf, extent); + type = btrfs_file_extent_type(leaf, extent); + if (type == BTRFS_FILE_EXTENT_REG || + type == BTRFS_FILE_EXTENT_PREALLOC) { + disko = btrfs_file_extent_disk_bytenr(leaf, extent); + diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); + datao = btrfs_file_extent_offset(leaf, extent); + datal = btrfs_file_extent_num_bytes(leaf, extent); + } else if (type == BTRFS_FILE_EXTENT_INLINE) { + /* Take upper bound, may be compressed */ + datal = btrfs_file_extent_ram_bytes(leaf, extent); + } + + /* + * The first search might have left us at an extent item that + * ends before our target range's start, can happen if we have + * holes and NO_HOLES feature enabled. + */ + if (key.offset + datal <= off) { + path->slots[0]++; + goto process_slot; + } else if (key.offset >= off + len) { + break; + } + next_key_min_offset = key.offset + datal; + size = btrfs_item_size_nr(leaf, slot); + read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), + size); + + btrfs_release_path(path); + path->leave_spinning = 0; + + memcpy(&new_key, &key, sizeof(new_key)); + new_key.objectid = btrfs_ino(BTRFS_I(inode)); + if (off <= key.offset) + new_key.offset = key.offset + destoff - off; + else + new_key.offset = destoff; + + /* + * Deal with a hole that doesn't have an extent item that + * represents it (NO_HOLES feature enabled). + * This hole is either in the middle of the cloning range or at + * the beginning (fully overlaps it or partially overlaps it). + */ + if (new_key.offset != last_dest_end) + drop_start = last_dest_end; + else + drop_start = new_key.offset; + + if (type == BTRFS_FILE_EXTENT_REG || + type == BTRFS_FILE_EXTENT_PREALLOC) { + struct btrfs_clone_extent_info clone_info; /* - * The first search might have left us at an extent - * item that ends before our target range's start, can - * happen if we have holes and NO_HOLES feature enabled. + * a | --- range to clone ---| b + * | ------------- extent ------------- | */ - if (key.offset + datal <= off) { - path->slots[0]++; - goto process_slot; - } else if (key.offset >= off + len) { - break; + + /* Subtract range b */ + if (key.offset + datal > off + len) + datal = off + len - key.offset; + + /* Subtract range a */ + if (off > key.offset) { + datao += off - key.offset; + datal -= off - key.offset; } - next_key_min_offset = key.offset + datal; - size = btrfs_item_size_nr(leaf, slot); - read_extent_buffer(leaf, buf, - btrfs_item_ptr_offset(leaf, slot), - size); - btrfs_release_path(path); - path->leave_spinning = 0; - - memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = btrfs_ino(BTRFS_I(inode)); - if (off <= key.offset) - new_key.offset = key.offset + destoff - off; - else - new_key.offset = destoff; - - /* - * Deal with a hole that doesn't have an extent item - * that represents it (NO_HOLES feature enabled). - * This hole is either in the middle of the cloning - * range or at the beginning (fully overlaps it or - * partially overlaps it). - */ - if (new_key.offset != last_dest_end) - drop_start = last_dest_end; - else - drop_start = new_key.offset; - - if (type == BTRFS_FILE_EXTENT_REG || - type == BTRFS_FILE_EXTENT_PREALLOC) { - struct btrfs_clone_extent_info clone_info; - - /* - * a | --- range to clone ---| b - * | ------------- extent ------------- | - */ - - /* subtract range b */ - if (key.offset + datal > off + len) - datal = off + len - key.offset; - - /* subtract range a */ - if (off > key.offset) { - datao += off - key.offset; - datal -= off - key.offset; - } - - clone_info.disk_offset = disko; - clone_info.disk_len = diskl; - clone_info.data_offset = datao; - clone_info.data_len = datal; - clone_info.file_offset = new_key.offset; - clone_info.extent_buf = buf; - clone_info.item_size = size; - ret = btrfs_punch_hole_range(inode, path, + clone_info.disk_offset = disko; + clone_info.disk_len = diskl; + clone_info.data_offset = datao; + clone_info.data_len = datal; + clone_info.file_offset = new_key.offset; + clone_info.extent_buf = buf; + clone_info.item_size = size; + ret = btrfs_punch_hole_range(inode, path, drop_start, new_key.offset + datal - 1, &clone_info, &trans); - if (ret) - goto out; - } else if (type == BTRFS_FILE_EXTENT_INLINE) { - u64 skip = 0; - u64 trim = 0; - - if (off > key.offset) { - skip = off - key.offset; - new_key.offset += skip; - } - - if (key.offset + datal > off + len) - trim = key.offset + datal - (off + len); - - if (comp && (skip || trim)) { - ret = -EINVAL; - goto out; - } - size -= skip + trim; - datal -= skip + trim; - - /* - * If our extent is inline, we know we will drop - * or adjust at most 1 extent item in the - * destination root. - * - * 1 - adjusting old extent (we may have to - * split it) - * 1 - add new extent - * 1 - inode update - */ - trans = btrfs_start_transaction(root, 3); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out; - } - - ret = clone_copy_inline_extent(inode, - trans, path, - &new_key, - drop_start, - datal, - skip, size, buf); - if (ret) { - if (ret != -EOPNOTSUPP) - btrfs_abort_transaction(trans, - ret); - btrfs_end_transaction(trans); - goto out; - } - } - - btrfs_release_path(path); - - last_dest_end = ALIGN(new_key.offset + datal, - fs_info->sectorsize); - ret = clone_finish_inode_update(trans, inode, - last_dest_end, - destoff, olen, - no_time_update); if (ret) goto out; - if (new_key.offset + datal >= destoff + len) - break; + } else if (type == BTRFS_FILE_EXTENT_INLINE) { + u64 skip = 0; + u64 trim = 0; + + if (off > key.offset) { + skip = off - key.offset; + new_key.offset += skip; + } + + if (key.offset + datal > off + len) + trim = key.offset + datal - (off + len); + + if (comp && (skip || trim)) { + ret = -EINVAL; + goto out; + } + size -= skip + trim; + datal -= skip + trim; + + /* + * If our extent is inline, we know we will drop or + * adjust at most 1 extent item in the destination root. + * + * 1 - adjusting old extent (we may have to split it) + * 1 - add new extent + * 1 - inode update + */ + trans = btrfs_start_transaction(root, 3); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out; + } + + ret = clone_copy_inline_extent(inode, trans, path, + &new_key, drop_start, + datal, skip, size, buf); + if (ret) { + if (ret != -EOPNOTSUPP) + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); + goto out; + } } + + btrfs_release_path(path); + + last_dest_end = ALIGN(new_key.offset + datal, + fs_info->sectorsize); + ret = clone_finish_inode_update(trans, inode, last_dest_end, + destoff, olen, no_time_update); + if (ret) + goto out; + if (new_key.offset + datal >= destoff + len) + break; + btrfs_release_path(path); key.offset = next_key_min_offset; From adf4c0c53a987205b95513a160c11b9600e31220 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 19:53:02 +0200 Subject: [PATCH 029/138] btrfs: remove unused btrfs_device::flush_bio_sent The status of flush bio is tracked as a status bit, changed in commit 1c3063b6dbfa ("btrfs: cleanup device states define BTRFS_DEV_STATE_FLUSH_SENT"), the flush_bio_sent was forgotten. Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/volumes.h | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index c71354fe1363..081cb734a239 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -82,7 +82,6 @@ struct btrfs_device { unsigned long dev_state; blk_status_t last_flush_error; - int flush_bio_sent; #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED seqcount_t data_seqcount; From 82253cb6863ccada8df5a9548b35c5d5a12b90af Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 19:53:04 +0200 Subject: [PATCH 030/138] btrfs: remove unused key type set/get helpers The switch to open coded set/get has happend long time ago in 962a298f3511 ("btrfs: kill the key type accessor helpers"), remove the stray helpers. Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 6bb42460d7ff..88042497dbec 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2069,16 +2069,6 @@ static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb, btrfs_disk_key_to_cpu(key, &disk_key); } -static inline u8 btrfs_key_type(const struct btrfs_key *key) -{ - return key->type; -} - -static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val) -{ - key->type = val; -} - /* struct btrfs_header */ BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, From e13976cf120307867206ef8e60545fe99019c963 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 14:50:30 +0200 Subject: [PATCH 031/138] btrfs: tree-log: convert defines to enums Used only for in-memory state tracking. Reviewed-by: Nikolay Borisov Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 188e6f79ab4e..0b85b2ffbdfa 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -24,10 +24,12 @@ * LOG_INODE_EXISTS means to log just enough to recreate the inode * during log replay */ -#define LOG_INODE_ALL 0 -#define LOG_INODE_EXISTS 1 -#define LOG_OTHER_INODE 2 -#define LOG_OTHER_INODE_ALL 3 +enum { + LOG_INODE_ALL, + LOG_INODE_EXISTS, + LOG_OTHER_INODE, + LOG_OTHER_INODE_ALL, +}; /* * directory trouble cases @@ -81,10 +83,12 @@ * The last stage is to deal with directories and links and extents * and all the other fun semantics */ -#define LOG_WALK_PIN_ONLY 0 -#define LOG_WALK_REPLAY_INODES 1 -#define LOG_WALK_REPLAY_DIR_INDEX 2 -#define LOG_WALK_REPLAY_ALL 3 +enum { + LOG_WALK_PIN_ONLY, + LOG_WALK_REPLAY_INODES, + LOG_WALK_REPLAY_DIR_INDEX, + LOG_WALK_REPLAY_ALL, +}; static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_inode *inode, From f64ce7b84c471c5af97697e539a7b4babd73a780 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 14:50:33 +0200 Subject: [PATCH 032/138] btrfs: async-thread: convert defines to enums Reviewed-by: Nikolay Borisov Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/async-thread.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 122cb97c7909..2e9e13ffbd08 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -12,9 +12,11 @@ #include "async-thread.h" #include "ctree.h" -#define WORK_DONE_BIT 0 -#define WORK_ORDER_DONE_BIT 1 -#define WORK_HIGH_PRIO_BIT 2 +enum { + WORK_DONE_BIT, + WORK_ORDER_DONE_BIT, + WORK_HIGH_PRIO_BIT, +}; #define NO_THRESHOLD (-1) #define DFT_THRESHOLD (32) From 430a662602ea7f8db53ece65899ee35dad56c671 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 14:50:35 +0200 Subject: [PATCH 033/138] btrfs: tree-log: use symbolic name for first replay stage Reviewed-by: Nikolay Borisov Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 0b85b2ffbdfa..19a4b9dc669f 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -6233,7 +6233,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) struct btrfs_fs_info *fs_info = log_root_tree->fs_info; struct walk_control wc = { .process_func = process_one_buffer, - .stage = 0, + .stage = LOG_WALK_PIN_ONLY, }; path = btrfs_alloc_path(); From 478b4d9f0105e33cae34445d5ad2eb9798628231 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:43 -0400 Subject: [PATCH 034/138] btrfs: move btrfs_add_free_space out of a header file This is prep work for moving block_group_cache around. Having this in the header file makes the header file include need to be in a certain order, which is awkward, so just move it into free-space-cache.c and then we can re-arrange later. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/free-space-cache.c | 8 ++++++++ fs/btrfs/free-space-cache.h | 10 ++-------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 062be9dde4c6..92cb06dd94d3 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2376,6 +2376,14 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, return ret; } +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 bytenr, u64 size) +{ + return __btrfs_add_free_space(block_group->fs_info, + block_group->free_space_ctl, + bytenr, size); +} + int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 8760acb55ffd..2205a4113ef3 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -73,14 +73,8 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, struct btrfs_free_space_ctl *ctl, u64 bytenr, u64 size); -static inline int -btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 bytenr, u64 size) -{ - return __btrfs_add_free_space(block_group->fs_info, - block_group->free_space_ctl, - bytenr, size); -} +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); From aac0023c2106952538414254960c51dcf0dc39e9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:44 -0400 Subject: [PATCH 035/138] btrfs: move basic block_group definitions to their own header This is prep work for moving all of the block group cache code into its own file. Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ minor comment updates ] Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 154 +++++++++++++++++++++++++ fs/btrfs/ctree.h | 148 +----------------------- fs/btrfs/disk-io.c | 1 + fs/btrfs/extent-tree.c | 1 + fs/btrfs/free-space-cache.c | 1 + fs/btrfs/free-space-tree.c | 1 + fs/btrfs/free-space-tree.h | 2 + fs/btrfs/inode.c | 1 + fs/btrfs/ioctl.c | 1 + fs/btrfs/qgroup.c | 2 +- fs/btrfs/reada.c | 1 + fs/btrfs/relocation.c | 1 + fs/btrfs/scrub.c | 1 + fs/btrfs/space-info.c | 1 + fs/btrfs/super.c | 1 + fs/btrfs/sysfs.c | 1 + fs/btrfs/tests/btrfs-tests.c | 1 + fs/btrfs/tests/free-space-tests.c | 1 + fs/btrfs/tests/free-space-tree-tests.c | 1 + fs/btrfs/transaction.c | 1 + fs/btrfs/volumes.c | 1 + 21 files changed, 175 insertions(+), 148 deletions(-) create mode 100644 fs/btrfs/block-group.h diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h new file mode 100644 index 000000000000..054745007519 --- /dev/null +++ b/fs/btrfs/block-group.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef BTRFS_BLOCK_GROUP_H +#define BTRFS_BLOCK_GROUP_H + +enum btrfs_disk_cache_state { + BTRFS_DC_WRITTEN, + BTRFS_DC_ERROR, + BTRFS_DC_CLEAR, + BTRFS_DC_SETUP, +}; + +struct btrfs_caching_control { + struct list_head list; + struct mutex mutex; + wait_queue_head_t wait; + struct btrfs_work work; + struct btrfs_block_group_cache *block_group; + u64 progress; + refcount_t count; +}; + +/* Once caching_thread() finds this much free space, it will wake up waiters. */ +#define CACHING_CTL_WAKE_UP SZ_2M + +struct btrfs_block_group_cache { + struct btrfs_key key; + struct btrfs_block_group_item item; + struct btrfs_fs_info *fs_info; + struct inode *inode; + spinlock_t lock; + u64 pinned; + u64 reserved; + u64 delalloc_bytes; + u64 bytes_super; + u64 flags; + u64 cache_generation; + + /* + * If the free space extent count exceeds this number, convert the block + * group to bitmaps. + */ + u32 bitmap_high_thresh; + + /* + * If the free space extent count drops below this number, convert the + * block group back to extents. + */ + u32 bitmap_low_thresh; + + /* + * It is just used for the delayed data space allocation because + * only the data space allocation and the relative metadata update + * can be done cross the transaction. + */ + struct rw_semaphore data_rwsem; + + /* For raid56, this is a full stripe, without parity */ + unsigned long full_stripe_len; + + unsigned int ro; + unsigned int iref:1; + unsigned int has_caching_ctl:1; + unsigned int removed:1; + + int disk_cache_state; + + /* Cache tracking stuff */ + int cached; + struct btrfs_caching_control *caching_ctl; + u64 last_byte_to_unpin; + + struct btrfs_space_info *space_info; + + /* Free space cache stuff */ + struct btrfs_free_space_ctl *free_space_ctl; + + /* Block group cache stuff */ + struct rb_node cache_node; + + /* For block groups in the same raid type */ + struct list_head list; + + /* Usage count */ + atomic_t count; + + /* + * List of struct btrfs_free_clusters for this block group. + * Today it will only have one thing on it, but that may change + */ + struct list_head cluster_list; + + /* For delayed block group creation or deletion of empty block groups */ + struct list_head bg_list; + + /* For read-only block groups */ + struct list_head ro_list; + + atomic_t trimming; + + /* For dirty block groups */ + struct list_head dirty_list; + struct list_head io_list; + + struct btrfs_io_ctl io_ctl; + + /* + * Incremented when doing extent allocations and holding a read lock + * on the space_info's groups_sem semaphore. + * Decremented when an ordered extent that represents an IO against this + * block group's range is created (after it's added to its inode's + * root's list of ordered extents) or immediately after the allocation + * if it's a metadata extent or fallocate extent (for these cases we + * don't create ordered extents). + */ + atomic_t reservations; + + /* + * Incremented while holding the spinlock *lock* by a task checking if + * it can perform a nocow write (incremented if the value for the *ro* + * field is 0). Decremented by such tasks once they create an ordered + * extent or before that if some error happens before reaching that step. + * This is to prevent races between block group relocation and nocow + * writes through direct IO. + */ + atomic_t nocow_writers; + + /* Lock for free space tree operations. */ + struct mutex free_space_lock; + + /* + * Does the block group need to be added to the free space tree? + * Protected by free_space_lock. + */ + int needs_free_space; + + /* Record locked full stripes for RAID5/6 block group */ + struct btrfs_full_stripe_locks_tree full_stripe_locks_root; +}; + +#ifdef CONFIG_BTRFS_DEBUG +static inline int btrfs_should_fragment_free_space( + struct btrfs_block_group_cache *block_group) +{ + struct btrfs_fs_info *fs_info = block_group->fs_info; + + return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && + block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || + (btrfs_test_opt(fs_info, FRAGMENT_DATA) && + block_group->flags & BTRFS_BLOCK_GROUP_DATA); +} +#endif + +#endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 88042497dbec..e95fdd1d9dd2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -39,6 +39,7 @@ struct btrfs_transaction; struct btrfs_pending_snapshot; struct btrfs_delayed_ref_root; struct btrfs_space_info; +struct btrfs_block_group_cache; extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; @@ -439,26 +440,6 @@ enum btrfs_caching_type { BTRFS_CACHE_ERROR, }; -enum btrfs_disk_cache_state { - BTRFS_DC_WRITTEN, - BTRFS_DC_ERROR, - BTRFS_DC_CLEAR, - BTRFS_DC_SETUP, -}; - -struct btrfs_caching_control { - struct list_head list; - struct mutex mutex; - wait_queue_head_t wait; - struct btrfs_work work; - struct btrfs_block_group_cache *block_group; - u64 progress; - refcount_t count; -}; - -/* Once caching_thread() finds this much free space, it will wake up waiters. */ -#define CACHING_CTL_WAKE_UP SZ_2M - struct btrfs_io_ctl { void *cur, *orig; struct page *page; @@ -481,120 +462,6 @@ struct btrfs_full_stripe_locks_tree { struct mutex lock; }; -struct btrfs_block_group_cache { - struct btrfs_key key; - struct btrfs_block_group_item item; - struct btrfs_fs_info *fs_info; - struct inode *inode; - spinlock_t lock; - u64 pinned; - u64 reserved; - u64 delalloc_bytes; - u64 bytes_super; - u64 flags; - u64 cache_generation; - - /* - * If the free space extent count exceeds this number, convert the block - * group to bitmaps. - */ - u32 bitmap_high_thresh; - - /* - * If the free space extent count drops below this number, convert the - * block group back to extents. - */ - u32 bitmap_low_thresh; - - /* - * It is just used for the delayed data space allocation because - * only the data space allocation and the relative metadata update - * can be done cross the transaction. - */ - struct rw_semaphore data_rwsem; - - /* for raid56, this is a full stripe, without parity */ - unsigned long full_stripe_len; - - unsigned int ro; - unsigned int iref:1; - unsigned int has_caching_ctl:1; - unsigned int removed:1; - - int disk_cache_state; - - /* cache tracking stuff */ - int cached; - struct btrfs_caching_control *caching_ctl; - u64 last_byte_to_unpin; - - struct btrfs_space_info *space_info; - - /* free space cache stuff */ - struct btrfs_free_space_ctl *free_space_ctl; - - /* block group cache stuff */ - struct rb_node cache_node; - - /* for block groups in the same raid type */ - struct list_head list; - - /* usage count */ - atomic_t count; - - /* List of struct btrfs_free_clusters for this block group. - * Today it will only have one thing on it, but that may change - */ - struct list_head cluster_list; - - /* For delayed block group creation or deletion of empty block groups */ - struct list_head bg_list; - - /* For read-only block groups */ - struct list_head ro_list; - - atomic_t trimming; - - /* For dirty block groups */ - struct list_head dirty_list; - struct list_head io_list; - - struct btrfs_io_ctl io_ctl; - - /* - * Incremented when doing extent allocations and holding a read lock - * on the space_info's groups_sem semaphore. - * Decremented when an ordered extent that represents an IO against this - * block group's range is created (after it's added to its inode's - * root's list of ordered extents) or immediately after the allocation - * if it's a metadata extent or fallocate extent (for these cases we - * don't create ordered extents). - */ - atomic_t reservations; - - /* - * Incremented while holding the spinlock *lock* by a task checking if - * it can perform a nocow write (incremented if the value for the *ro* - * field is 0). Decremented by such tasks once they create an ordered - * extent or before that if some error happens before reaching that step. - * This is to prevent races between block group relocation and nocow - * writes through direct IO. - */ - atomic_t nocow_writers; - - /* Lock for free space tree operations. */ - struct mutex free_space_lock; - - /* - * Does the block group need to be added to the free space tree? - * Protected by free_space_lock. - */ - int needs_free_space; - - /* Record locked full stripes for RAID5/6 block group */ - struct btrfs_full_stripe_locks_tree full_stripe_locks_root; -}; - /* delayed seq elem */ struct seq_list { struct list_head list; @@ -1387,19 +1254,6 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info) btrfs_clear_opt(fs_info->mount_opt, opt); \ } -#ifdef CONFIG_BTRFS_DEBUG -static inline int -btrfs_should_fragment_free_space(struct btrfs_block_group_cache *block_group) -{ - struct btrfs_fs_info *fs_info = block_group->fs_info; - - return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && - block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || - (btrfs_test_opt(fs_info, FRAGMENT_DATA) && - block_group->flags & BTRFS_BLOCK_GROUP_DATA); -} -#endif - /* * Requests for changes that need to be done during transaction commit. * diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 97beb351a10c..589405eeb80f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -40,6 +40,7 @@ #include "compression.h" #include "tree-checker.h" #include "ref-verify.h" +#include "block-group.h" #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ BTRFS_HEADER_FLAG_RELOC |\ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0bb095bda01b..f28697131f22 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -32,6 +32,7 @@ #include "space-info.h" #include "block-rsv.h" #include "delalloc-space.h" +#include "block-group.h" #undef SCRAMBLE_DELAYED_REFS diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 92cb06dd94d3..faaf57a7c289 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -20,6 +20,7 @@ #include "volumes.h" #include "space-info.h" #include "delalloc-space.h" +#include "block-group.h" #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) #define MAX_CACHE_BYTES_PER_GIG SZ_32K diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index f5dc115ebba0..48a03f5240f5 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c @@ -10,6 +10,7 @@ #include "locking.h" #include "free-space-tree.h" #include "transaction.h" +#include "block-group.h" static int __add_block_group_free_space(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h index 22b7602bde25..360d50e1cdea 100644 --- a/fs/btrfs/free-space-tree.h +++ b/fs/btrfs/free-space-tree.h @@ -6,6 +6,8 @@ #ifndef BTRFS_FREE_SPACE_TREE_H #define BTRFS_FREE_SPACE_TREE_H +struct btrfs_caching_control; + /* * The default size for new free space bitmap items. The last bitmap in a block * group may be truncated, and none of the free space tree code assumes that diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 20963b6567ae..612c25aac15c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -47,6 +47,7 @@ #include "props.h" #include "qgroup.h" #include "delalloc-space.h" +#include "block-group.h" struct btrfs_iget_args { struct btrfs_key *location; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ccac62d40dd2..b431f7877e88 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -45,6 +45,7 @@ #include "compression.h" #include "space-info.h" #include "delalloc-space.h" +#include "block-group.h" #ifdef CONFIG_64BIT /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index f8a3c1b0a15a..a960e33525ba 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -21,7 +21,7 @@ #include "backref.h" #include "extent_io.h" #include "qgroup.h" - +#include "block-group.h" /* TODO XXX FIXME * - subvol delete -> delete when ref goes to 0? delete limits also? diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index bb5bd49573b4..0b034c494355 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -14,6 +14,7 @@ #include "disk-io.h" #include "transaction.h" #include "dev-replace.h" +#include "block-group.h" #undef DEBUG diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 7ec632d4d960..2f0e25afa486 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -21,6 +21,7 @@ #include "qgroup.h" #include "print-tree.h" #include "delalloc-space.h" +#include "block-group.h" /* * backref_node, mapping_node and tree_block start with this diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 0c99cf9fb595..f7d4e03f4c5d 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -18,6 +18,7 @@ #include "check-integrity.h" #include "rcu-string.h" #include "raid56.h" +#include "block-group.h" /* * This is only the first step towards a full-features scrub. It reads all diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index ab7b9ec4c240..9dbb9c5f82b1 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -8,6 +8,7 @@ #include "ordered-data.h" #include "transaction.h" #include "math.h" +#include "block-group.h" u64 btrfs_space_info_used(struct btrfs_space_info *s_info, bool may_use_included) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 10bc7e6cca75..16c7af333d3a 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -44,6 +44,7 @@ #include "backref.h" #include "space-info.h" #include "tests/btrfs-tests.h" +#include "block-group.h" #include "qgroup.h" #define CREATE_TRACE_POINTS diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 9539f8143b7a..271e7e714920 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -17,6 +17,7 @@ #include "sysfs.h" #include "volumes.h" #include "space-info.h" +#include "block-group.h" static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj); static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj); diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 1e3ba4949399..b5e80563efaa 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -15,6 +15,7 @@ #include "../volumes.h" #include "../disk-io.h" #include "../qgroup.h" +#include "../block-group.h" static struct vfsmount *test_mnt = NULL; diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index af89f66f9e63..43ec7060fcd2 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c @@ -8,6 +8,7 @@ #include "../ctree.h" #include "../disk-io.h" #include "../free-space-cache.h" +#include "../block-group.h" #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index a90dad166971..bc92df977630 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c @@ -9,6 +9,7 @@ #include "../disk-io.h" #include "../free-space-tree.h" #include "../transaction.h" +#include "../block-group.h" struct free_space_extent { u64 start; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index e3adb714c04b..2e3f6778bfa3 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -19,6 +19,7 @@ #include "volumes.h" #include "dev-replace.h" #include "qgroup.h" +#include "block-group.h" #define BTRFS_ROOT_TRANS_TAG 0 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ef3e5b4f88be..cb9dcdffe434 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -29,6 +29,7 @@ #include "sysfs.h" #include "tree-checker.h" #include "space-info.h" +#include "block-group.h" const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { [BTRFS_RAID_RAID10] = { From 2e405ad842546a1a37aaa586d5140d071cb1f802 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:45 -0400 Subject: [PATCH 036/138] btrfs: migrate the block group lookup code Move these bits first as they are the easiest to move. Export two of the helpers so they can be moved all at once. Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ minor style updates ] Signed-off-by: David Sterba --- fs/btrfs/Makefile | 2 +- fs/btrfs/block-group.c | 95 +++++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 7 +++ fs/btrfs/ctree.h | 3 -- fs/btrfs/extent-tree.c | 96 +----------------------------------------- 5 files changed, 105 insertions(+), 98 deletions(-) create mode 100644 fs/btrfs/block-group.c diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 76a843198bcb..82200dbca5ac 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -11,7 +11,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \ - block-rsv.o delalloc-space.o + block-rsv.o delalloc-space.o block-group.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c new file mode 100644 index 000000000000..ebe7b1c5c1e3 --- /dev/null +++ b/fs/btrfs/block-group.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "ctree.h" +#include "block-group.h" + +/* + * This will return the block group at or after bytenr if contains is 0, else + * it will return the block group that contains the bytenr + */ +static struct btrfs_block_group_cache *block_group_cache_tree_search( + struct btrfs_fs_info *info, u64 bytenr, int contains) +{ + struct btrfs_block_group_cache *cache, *ret = NULL; + struct rb_node *n; + u64 end, start; + + spin_lock(&info->block_group_cache_lock); + n = info->block_group_cache_tree.rb_node; + + while (n) { + cache = rb_entry(n, struct btrfs_block_group_cache, + cache_node); + end = cache->key.objectid + cache->key.offset - 1; + start = cache->key.objectid; + + if (bytenr < start) { + if (!contains && (!ret || start < ret->key.objectid)) + ret = cache; + n = n->rb_left; + } else if (bytenr > start) { + if (contains && bytenr <= end) { + ret = cache; + break; + } + n = n->rb_right; + } else { + ret = cache; + break; + } + } + if (ret) { + btrfs_get_block_group(ret); + if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) + info->first_logical_byte = ret->key.objectid; + } + spin_unlock(&info->block_group_cache_lock); + + return ret; +} + +/* + * Return the block group that starts at or after bytenr + */ +struct btrfs_block_group_cache *btrfs_lookup_first_block_group( + struct btrfs_fs_info *info, u64 bytenr) +{ + return block_group_cache_tree_search(info, bytenr, 0); +} + +/* + * Return the block group that contains the given bytenr + */ +struct btrfs_block_group_cache *btrfs_lookup_block_group( + struct btrfs_fs_info *info, u64 bytenr) +{ + return block_group_cache_tree_search(info, bytenr, 1); +} + +struct btrfs_block_group_cache *btrfs_next_block_group( + struct btrfs_block_group_cache *cache) +{ + struct btrfs_fs_info *fs_info = cache->fs_info; + struct rb_node *node; + + spin_lock(&fs_info->block_group_cache_lock); + + /* If our block group was removed, we need a full search. */ + if (RB_EMPTY_NODE(&cache->cache_node)) { + const u64 next_bytenr = cache->key.objectid + cache->key.offset; + + spin_unlock(&fs_info->block_group_cache_lock); + btrfs_put_block_group(cache); + cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; + } + node = rb_next(&cache->cache_node); + btrfs_put_block_group(cache); + if (node) { + cache = rb_entry(node, struct btrfs_block_group_cache, + cache_node); + btrfs_get_block_group(cache); + } else + cache = NULL; + spin_unlock(&fs_info->block_group_cache_lock); + return cache; +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 054745007519..87bac0d5ad69 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -151,4 +151,11 @@ static inline int btrfs_should_fragment_free_space( } #endif +struct btrfs_block_group_cache *btrfs_lookup_first_block_group( + struct btrfs_fs_info *info, u64 bytenr); +struct btrfs_block_group_cache *btrfs_lookup_block_group( + struct btrfs_fs_info *info, u64 bytenr); +struct btrfs_block_group_cache *btrfs_next_block_group( + struct btrfs_block_group_cache *cache); + #endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e95fdd1d9dd2..49ac72c3d0cd 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2496,9 +2496,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, int btrfs_exclude_logged_extents(struct extent_buffer *eb); int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, u64 bytenr); -struct btrfs_block_group_cache *btrfs_lookup_block_group( - struct btrfs_fs_info *info, - u64 bytenr); void btrfs_get_block_group(struct btrfs_block_group_cache *cache); void btrfs_put_block_group(struct btrfs_block_group_cache *cache); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f28697131f22..a454945227ca 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -133,52 +133,6 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, return 0; } -/* - * This will return the block group at or after bytenr if contains is 0, else - * it will return the block group that contains the bytenr - */ -static struct btrfs_block_group_cache * -block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, - int contains) -{ - struct btrfs_block_group_cache *cache, *ret = NULL; - struct rb_node *n; - u64 end, start; - - spin_lock(&info->block_group_cache_lock); - n = info->block_group_cache_tree.rb_node; - - while (n) { - cache = rb_entry(n, struct btrfs_block_group_cache, - cache_node); - end = cache->key.objectid + cache->key.offset - 1; - start = cache->key.objectid; - - if (bytenr < start) { - if (!contains && (!ret || start < ret->key.objectid)) - ret = cache; - n = n->rb_left; - } else if (bytenr > start) { - if (contains && bytenr <= end) { - ret = cache; - break; - } - n = n->rb_right; - } else { - ret = cache; - break; - } - } - if (ret) { - btrfs_get_block_group(ret); - if (bytenr == 0 && info->first_logical_byte > ret->key.objectid) - info->first_logical_byte = ret->key.objectid; - } - spin_unlock(&info->block_group_cache_lock); - - return ret; -} - static int add_excluded_extent(struct btrfs_fs_info *fs_info, u64 start, u64 num_bytes) { @@ -673,24 +627,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, return ret; } -/* - * return the block group that starts at or after bytenr - */ -static struct btrfs_block_group_cache * -btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) -{ - return block_group_cache_tree_search(info, bytenr, 0); -} - -/* - * return the block group that contains the given bytenr - */ -struct btrfs_block_group_cache *btrfs_lookup_block_group( - struct btrfs_fs_info *info, - u64 bytenr) -{ - return block_group_cache_tree_search(info, bytenr, 1); -} static u64 generic_ref_to_space_flags(struct btrfs_ref *ref) { @@ -3146,34 +3082,6 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, } -static struct btrfs_block_group_cache *next_block_group( - struct btrfs_block_group_cache *cache) -{ - struct btrfs_fs_info *fs_info = cache->fs_info; - struct rb_node *node; - - spin_lock(&fs_info->block_group_cache_lock); - - /* If our block group was removed, we need a full search. */ - if (RB_EMPTY_NODE(&cache->cache_node)) { - const u64 next_bytenr = cache->key.objectid + cache->key.offset; - - spin_unlock(&fs_info->block_group_cache_lock); - btrfs_put_block_group(cache); - cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; - } - node = rb_next(&cache->cache_node); - btrfs_put_block_group(cache); - if (node) { - cache = rb_entry(node, struct btrfs_block_group_cache, - cache_node); - btrfs_get_block_group(cache); - } else - cache = NULL; - spin_unlock(&fs_info->block_group_cache_lock); - return cache; -} - static int cache_save_setup(struct btrfs_block_group_cache *block_group, struct btrfs_trans_handle *trans, struct btrfs_path *path) @@ -7651,7 +7559,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) if (block_group->iref) break; spin_unlock(&block_group->lock); - block_group = next_block_group(block_group); + block_group = btrfs_next_block_group(block_group); } if (!block_group) { if (last == 0) @@ -8872,7 +8780,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) return -EINVAL; cache = btrfs_lookup_first_block_group(fs_info, range->start); - for (; cache; cache = next_block_group(cache)) { + for (; cache; cache = btrfs_next_block_group(cache)) { if (cache->key.objectid >= range_end) { btrfs_put_block_group(cache); break; From 3cad128400c2445d9140c0f5720018e075ef66c6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:46 -0400 Subject: [PATCH 037/138] btrfs: migrate the block group ref counting stuff Another easy set to move over to block-group.c. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 25 +++++++++++++++++++++++++ fs/btrfs/block-group.h | 2 ++ fs/btrfs/ctree.h | 3 --- fs/btrfs/extent-tree.c | 25 ------------------------- 4 files changed, 27 insertions(+), 28 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index ebe7b1c5c1e3..4328196a4d44 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -3,6 +3,31 @@ #include "ctree.h" #include "block-group.h" +void btrfs_get_block_group(struct btrfs_block_group_cache *cache) +{ + atomic_inc(&cache->count); +} + +void btrfs_put_block_group(struct btrfs_block_group_cache *cache) +{ + if (atomic_dec_and_test(&cache->count)) { + WARN_ON(cache->pinned > 0); + WARN_ON(cache->reserved > 0); + + /* + * If not empty, someone is still holding mutex of + * full_stripe_lock, which can only be released by caller. + * And it will definitely cause use-after-free when caller + * tries to release full stripe lock. + * + * No better way to resolve, but only to warn. + */ + WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); + kfree(cache->free_space_ctl); + kfree(cache); + } +} + /* * This will return the block group at or after bytenr if contains is 0, else * it will return the block group that contains the bytenr diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 87bac0d5ad69..f7c7d1ac6d9b 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -157,5 +157,7 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr); struct btrfs_block_group_cache *btrfs_next_block_group( struct btrfs_block_group_cache *cache); +void btrfs_get_block_group(struct btrfs_block_group_cache *cache); +void btrfs_put_block_group(struct btrfs_block_group_cache *cache); #endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 49ac72c3d0cd..ae8f39c3dcd2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2479,7 +2479,6 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); -void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count); void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, @@ -2496,8 +2495,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, int btrfs_exclude_logged_extents(struct extent_buffer *eb); int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, u64 bytenr); -void btrfs_get_block_group(struct btrfs_block_group_cache *cache); -void btrfs_put_block_group(struct btrfs_block_group_cache *cache); struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 parent, u64 root_objectid, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a454945227ca..dc1fb9286fee 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -68,31 +68,6 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) return (cache->flags & bits) == bits; } -void btrfs_get_block_group(struct btrfs_block_group_cache *cache) -{ - atomic_inc(&cache->count); -} - -void btrfs_put_block_group(struct btrfs_block_group_cache *cache) -{ - if (atomic_dec_and_test(&cache->count)) { - WARN_ON(cache->pinned > 0); - WARN_ON(cache->reserved > 0); - - /* - * If not empty, someone is still holding mutex of - * full_stripe_lock, which can only be released by caller. - * And it will definitely cause use-after-free when caller - * tries to release full stripe lock. - * - * No better way to resolve, but only to warn. - */ - WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); - kfree(cache->free_space_ctl); - kfree(cache); - } -} - /* * this adds the block group to the fs_info rb tree for the block group * cache From 3eeb3226a8891544ea4a9baf27ba3d73e8a42991 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:47 -0400 Subject: [PATCH 038/138] btrfs: migrate nocow and reservation helpers These are relatively straightforward as well. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 82 ++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 6 ++++ fs/btrfs/ctree.h | 6 ---- fs/btrfs/extent-tree.c | 82 ------------------------------------------ 4 files changed, 88 insertions(+), 88 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 4328196a4d44..1f3afa0b42ba 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2,6 +2,7 @@ #include "ctree.h" #include "block-group.h" +#include "space-info.h" void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { @@ -118,3 +119,84 @@ struct btrfs_block_group_cache *btrfs_next_block_group( spin_unlock(&fs_info->block_group_cache_lock); return cache; } + +bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) +{ + struct btrfs_block_group_cache *bg; + bool ret = true; + + bg = btrfs_lookup_block_group(fs_info, bytenr); + if (!bg) + return false; + + spin_lock(&bg->lock); + if (bg->ro) + ret = false; + else + atomic_inc(&bg->nocow_writers); + spin_unlock(&bg->lock); + + /* No put on block group, done by btrfs_dec_nocow_writers */ + if (!ret) + btrfs_put_block_group(bg); + + return ret; +} + +void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) +{ + struct btrfs_block_group_cache *bg; + + bg = btrfs_lookup_block_group(fs_info, bytenr); + ASSERT(bg); + if (atomic_dec_and_test(&bg->nocow_writers)) + wake_up_var(&bg->nocow_writers); + /* + * Once for our lookup and once for the lookup done by a previous call + * to btrfs_inc_nocow_writers() + */ + btrfs_put_block_group(bg); + btrfs_put_block_group(bg); +} + +void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) +{ + wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); +} + +void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, + const u64 start) +{ + struct btrfs_block_group_cache *bg; + + bg = btrfs_lookup_block_group(fs_info, start); + ASSERT(bg); + if (atomic_dec_and_test(&bg->reservations)) + wake_up_var(&bg->reservations); + btrfs_put_block_group(bg); +} + +void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) +{ + struct btrfs_space_info *space_info = bg->space_info; + + ASSERT(bg->ro); + + if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) + return; + + /* + * Our block group is read only but before we set it to read only, + * some task might have had allocated an extent from it already, but it + * has not yet created a respective ordered extent (and added it to a + * root's list of ordered extents). + * Therefore wait for any task currently allocating extents, since the + * block group's reservations counter is incremented while a read lock + * on the groups' semaphore is held and decremented after releasing + * the read access on that semaphore and creating the ordered extent. + */ + down_write(&space_info->groups_sem); + up_write(&space_info->groups_sem); + + wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index f7c7d1ac6d9b..73147cce7952 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -159,5 +159,11 @@ struct btrfs_block_group_cache *btrfs_next_block_group( struct btrfs_block_group_cache *cache); void btrfs_get_block_group(struct btrfs_block_group_cache *cache); void btrfs_put_block_group(struct btrfs_block_group_cache *cache); +void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, + const u64 start); +void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); +bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); +void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); +void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); #endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ae8f39c3dcd2..04785e912e52 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2473,12 +2473,6 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; } -void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, - const u64 start); -void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); -bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); -void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); -void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count); void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index dc1fb9286fee..86ffbc958bb5 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3561,51 +3561,6 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) return readonly; } -bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) -{ - struct btrfs_block_group_cache *bg; - bool ret = true; - - bg = btrfs_lookup_block_group(fs_info, bytenr); - if (!bg) - return false; - - spin_lock(&bg->lock); - if (bg->ro) - ret = false; - else - atomic_inc(&bg->nocow_writers); - spin_unlock(&bg->lock); - - /* no put on block group, done by btrfs_dec_nocow_writers */ - if (!ret) - btrfs_put_block_group(bg); - - return ret; - -} - -void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) -{ - struct btrfs_block_group_cache *bg; - - bg = btrfs_lookup_block_group(fs_info, bytenr); - ASSERT(bg); - if (atomic_dec_and_test(&bg->nocow_writers)) - wake_up_var(&bg->nocow_writers); - /* - * Once for our lookup and once for the lookup done by a previous call - * to btrfs_inc_nocow_writers() - */ - btrfs_put_block_group(bg); - btrfs_put_block_group(bg); -} - -void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) -{ - wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); -} - static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) { u64 extra_flags = chunk_to_extended(flags) & @@ -4277,43 +4232,6 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg) atomic_inc(&bg->reservations); } -void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, - const u64 start) -{ - struct btrfs_block_group_cache *bg; - - bg = btrfs_lookup_block_group(fs_info, start); - ASSERT(bg); - if (atomic_dec_and_test(&bg->reservations)) - wake_up_var(&bg->reservations); - btrfs_put_block_group(bg); -} - -void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) -{ - struct btrfs_space_info *space_info = bg->space_info; - - ASSERT(bg->ro); - - if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) - return; - - /* - * Our block group is read only but before we set it to read only, - * some task might have had allocated an extent from it already, but it - * has not yet created a respective ordered extent (and added it to a - * root's list of ordered extents). - * Therefore wait for any task currently allocating extents, since the - * block group's reservations counter is incremented while a read lock - * on the groups' semaphore is held and decremented after releasing - * the read access on that semaphore and creating the ordered extent. - */ - down_write(&space_info->groups_sem); - up_write(&space_info->groups_sem); - - wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); -} - /** * btrfs_add_reserved_bytes - update the block_group and space info counters * @cache: The cache we are manipulating From 676f1f759fa3f65f8987295c99615c1090e074ea Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:48 -0400 Subject: [PATCH 039/138] btrfs: export the block group caching helpers This will make it so we can move them easily. Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ coding style updates ] Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 13 +++++++++ fs/btrfs/extent-tree.c | 61 ++++++++++++++++++------------------------ 2 files changed, 39 insertions(+), 35 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 73147cce7952..7069bcfc5e8f 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -165,5 +165,18 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); +void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, + u64 num_bytes); +int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache); +int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, + int load_cache_only); + +static inline int btrfs_block_group_cache_done( + struct btrfs_block_group_cache *cache) +{ + smp_mb(); + return cache->cached == BTRFS_CACHE_FINISHED || + cache->cached == BTRFS_CACHE_ERROR; +} #endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 86ffbc958bb5..721b2bcb9e95 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -55,14 +55,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, static int find_next_key(struct btrfs_path *path, int level, struct btrfs_key *key); -static noinline int -block_group_cache_done(struct btrfs_block_group_cache *cache) -{ - smp_mb(); - return cache->cached == BTRFS_CACHE_FINISHED || - cache->cached == BTRFS_CACHE_ERROR; -} - static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) { return (cache->flags & bits) == bits; @@ -235,9 +227,10 @@ static void fragment_free_space(struct btrfs_block_group_cache *block_group) #endif /* - * this is only called by cache_block_group, since we could have freed extents - * we need to check the pinned_extents for any extents that can't be used yet - * since their free space will be released as soon as the transaction commits. + * This is only called by btrfs_cache_block_group, since we could have freed + * extents we need to check the pinned_extents for any extents that can't be + * used yet since their free space will be released as soon as the transaction + * commits. */ u64 add_new_free_space(struct btrfs_block_group_cache *block_group, u64 start, u64 end) @@ -466,8 +459,8 @@ static noinline void caching_thread(struct btrfs_work *work) btrfs_put_block_group(block_group); } -static int cache_block_group(struct btrfs_block_group_cache *cache, - int load_cache_only) +int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, + int load_cache_only) { DEFINE_WAIT(wait); struct btrfs_fs_info *fs_info = cache->fs_info; @@ -3981,7 +3974,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, * space back to the block group, otherwise we will leak space. */ if (!alloc && cache->cached == BTRFS_CACHE_NO) - cache_block_group(cache, 1); + btrfs_cache_block_group(cache, 1); byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); @@ -4138,7 +4131,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, * to one because the slow code to read in the free extents does check * the pinned extents. */ - cache_block_group(cache, 1); + btrfs_cache_block_group(cache, 1); pin_down_extent(cache, bytenr, num_bytes, 0); @@ -4159,12 +4152,12 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, if (!block_group) return -EINVAL; - cache_block_group(block_group, 0); + btrfs_cache_block_group(block_group, 0); caching_ctl = get_caching_control(block_group); if (!caching_ctl) { /* Logic error */ - BUG_ON(!block_group_cache_done(block_group)); + BUG_ON(!btrfs_block_group_cache_done(block_group)); ret = btrfs_remove_free_space(block_group, start, num_bytes); } else { mutex_lock(&caching_ctl->mutex); @@ -4308,7 +4301,7 @@ void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info) list_for_each_entry_safe(caching_ctl, next, &fs_info->caching_block_groups, list) { cache = caching_ctl->block_group; - if (block_group_cache_done(cache)) { + if (btrfs_block_group_cache_done(cache)) { cache->last_byte_to_unpin = (u64)-1; list_del_init(&caching_ctl->list); put_caching_control(caching_ctl); @@ -4937,9 +4930,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using * any of the information in this block group. */ -static noinline void -wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, - u64 num_bytes) +void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, + u64 num_bytes) { struct btrfs_caching_control *caching_ctl; @@ -4947,14 +4939,13 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, if (!caching_ctl) return; - wait_event(caching_ctl->wait, block_group_cache_done(cache) || + wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) || (cache->free_space_ctl->free_space >= num_bytes)); put_caching_control(caching_ctl); } -static noinline int -wait_block_group_cache_done(struct btrfs_block_group_cache *cache) +int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) { struct btrfs_caching_control *caching_ctl; int ret = 0; @@ -4963,7 +4954,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache) if (!caching_ctl) return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; - wait_event(caching_ctl->wait, block_group_cache_done(cache)); + wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache)); if (cache->cached == BTRFS_CACHE_ERROR) ret = -EIO; put_caching_control(caching_ctl); @@ -5189,7 +5180,7 @@ static int find_free_extent_clustered(struct btrfs_block_group_cache *bg, spin_unlock(&last_ptr->refill_lock); ffe_ctl->retry_clustered = true; - wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + + btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + ffe_ctl->empty_cluster + ffe_ctl->empty_size); return -EAGAIN; } @@ -5256,8 +5247,8 @@ static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg, */ if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT) { - wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + - ffe_ctl->empty_size); + btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes + + ffe_ctl->empty_size); ffe_ctl->retry_unclustered = true; return -EAGAIN; } else if (!offset) { @@ -5564,10 +5555,10 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info, } have_block_group: - ffe_ctl.cached = block_group_cache_done(block_group); + ffe_ctl.cached = btrfs_block_group_cache_done(block_group); if (unlikely(!ffe_ctl.cached)) { ffe_ctl.have_caching_bg = true; - ret = cache_block_group(block_group, 0); + ret = btrfs_cache_block_group(block_group, 0); BUG_ON(ret < 0); ret = 0; } @@ -7447,7 +7438,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) block_group = btrfs_lookup_first_block_group(info, last); while (block_group) { - wait_block_group_cache_done(block_group); + btrfs_wait_block_group_cache_done(block_group); spin_lock(&block_group->lock); if (block_group->iref) break; @@ -8211,7 +8202,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (block_group->has_caching_ctl) caching_ctl = get_caching_control(block_group); if (block_group->cached == BTRFS_CACHE_STARTED) - wait_block_group_cache_done(block_group); + btrfs_wait_block_group_cache_done(block_group); if (block_group->has_caching_ctl) { down_write(&fs_info->commit_root_sem); if (!caching_ctl) { @@ -8683,14 +8674,14 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) end = min(range_end, cache->key.objectid + cache->key.offset); if (end - start >= range->minlen) { - if (!block_group_cache_done(cache)) { - ret = cache_block_group(cache, 0); + if (!btrfs_block_group_cache_done(cache)) { + ret = btrfs_cache_block_group(cache, 0); if (ret) { bg_failed++; bg_ret = ret; continue; } - ret = wait_block_group_cache_done(cache); + ret = btrfs_wait_block_group_cache_done(cache); if (ret) { bg_failed++; bg_ret = ret; From 6f410d1b3dbf9213ee89c569e8213511319bbd90 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:49 -0400 Subject: [PATCH 040/138] btrfs: export the excluded extents helpers We'll need this to move the caching stuff around. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 3 +++ fs/btrfs/extent-tree.c | 36 +++++++++++++++++++----------------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 04785e912e52..b95d7472b2a9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2473,6 +2473,9 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; } +int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, + u64 start, u64 num_bytes); +void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count); void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 721b2bcb9e95..2d92f103fa2b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -100,8 +100,8 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, return 0; } -static int add_excluded_extent(struct btrfs_fs_info *fs_info, - u64 start, u64 num_bytes) +int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, + u64 start, u64 num_bytes) { u64 end = start + num_bytes - 1; set_extent_bits(&fs_info->freed_extents[0], @@ -111,7 +111,7 @@ static int add_excluded_extent(struct btrfs_fs_info *fs_info, return 0; } -static void free_excluded_extents(struct btrfs_block_group_cache *cache) +void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; u64 start, end; @@ -136,8 +136,8 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; cache->bytes_super += stripe_len; - ret = add_excluded_extent(fs_info, cache->key.objectid, - stripe_len); + ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid, + stripe_len); if (ret) return ret; } @@ -170,7 +170,7 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) } cache->bytes_super += len; - ret = add_excluded_extent(fs_info, start, len); + ret = btrfs_add_excluded_extent(fs_info, start, len); if (ret) { kfree(logical); return ret; @@ -450,7 +450,7 @@ static noinline void caching_thread(struct btrfs_work *work) caching_ctl->progress = (u64)-1; up_read(&fs_info->commit_root_sem); - free_excluded_extents(block_group); + btrfs_free_excluded_extents(block_group); mutex_unlock(&caching_ctl->mutex); wake_up(&caching_ctl->wait); @@ -558,7 +558,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, wake_up(&caching_ctl->wait); if (ret == 1) { put_caching_control(caching_ctl); - free_excluded_extents(cache); + btrfs_free_excluded_extents(cache); return 0; } } else { @@ -4163,7 +4163,8 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, mutex_lock(&caching_ctl->mutex); if (start >= caching_ctl->progress) { - ret = add_excluded_extent(fs_info, start, num_bytes); + ret = btrfs_add_excluded_extent(fs_info, start, + num_bytes); } else if (start + num_bytes <= caching_ctl->progress) { ret = btrfs_remove_free_space(block_group, start, num_bytes); @@ -4177,7 +4178,8 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, num_bytes = (start + num_bytes) - caching_ctl->progress; start = caching_ctl->progress; - ret = add_excluded_extent(fs_info, start, num_bytes); + ret = btrfs_add_excluded_extent(fs_info, start, + num_bytes); } out_lock: mutex_unlock(&caching_ctl->mutex); @@ -7513,7 +7515,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) */ if (block_group->cached == BTRFS_CACHE_NO || block_group->cached == BTRFS_CACHE_ERROR) - free_excluded_extents(block_group); + btrfs_free_excluded_extents(block_group); btrfs_remove_free_space_cache(block_group); ASSERT(block_group->cached != BTRFS_CACHE_STARTED); @@ -7806,7 +7808,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) * We may have excluded something, so call this just in * case. */ - free_excluded_extents(cache); + btrfs_free_excluded_extents(cache); btrfs_put_block_group(cache); goto error; } @@ -7821,14 +7823,14 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) if (found_key.offset == btrfs_block_group_used(&cache->item)) { cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; - free_excluded_extents(cache); + btrfs_free_excluded_extents(cache); } else if (btrfs_block_group_used(&cache->item) == 0) { cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; add_new_free_space(cache, found_key.objectid, found_key.objectid + found_key.offset); - free_excluded_extents(cache); + btrfs_free_excluded_extents(cache); } ret = btrfs_add_block_group_cache(info, cache); @@ -7952,14 +7954,14 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, * We may have excluded something, so call this just in * case. */ - free_excluded_extents(cache); + btrfs_free_excluded_extents(cache); btrfs_put_block_group(cache); return ret; } add_new_free_space(cache, chunk_offset, chunk_offset + size); - free_excluded_extents(cache); + btrfs_free_excluded_extents(cache); #ifdef CONFIG_BTRFS_DEBUG if (btrfs_should_fragment_free_space(cache)) { @@ -8075,7 +8077,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, * Free the reserved super bytes from this block group before * remove it. */ - free_excluded_extents(block_group); + btrfs_free_excluded_extents(block_group); btrfs_free_ref_tree_range(fs_info, block_group->key.objectid, block_group->key.offset); From e3cb339fa5ca57cfa5429bf3da548331dce93038 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:50 -0400 Subject: [PATCH 041/138] btrfs: export the caching control helpers Man a lot of people use this stuff. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 3 +++ fs/btrfs/extent-tree.c | 36 ++++++++++++++++++------------------ 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 7069bcfc5e8f..3f8b6ffefb8a 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -170,6 +170,9 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache); int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, int load_cache_only); +void btrfs_put_caching_control(struct btrfs_caching_control *ctl); +struct btrfs_caching_control *btrfs_get_caching_control( + struct btrfs_block_group_cache *cache); static inline int btrfs_block_group_cache_done( struct btrfs_block_group_cache *cache) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2d92f103fa2b..dab8e90237cd 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -182,8 +182,8 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) return 0; } -static struct btrfs_caching_control * -get_caching_control(struct btrfs_block_group_cache *cache) +struct btrfs_caching_control *btrfs_get_caching_control( + struct btrfs_block_group_cache *cache) { struct btrfs_caching_control *ctl; @@ -199,7 +199,7 @@ get_caching_control(struct btrfs_block_group_cache *cache) return ctl; } -static void put_caching_control(struct btrfs_caching_control *ctl) +void btrfs_put_caching_control(struct btrfs_caching_control *ctl) { if (refcount_dec_and_test(&ctl->count)) kfree(ctl); @@ -455,7 +455,7 @@ static noinline void caching_thread(struct btrfs_work *work) wake_up(&caching_ctl->wait); - put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); btrfs_put_block_group(block_group); } @@ -504,7 +504,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, schedule(); finish_wait(&ctl->wait, &wait); - put_caching_control(ctl); + btrfs_put_caching_control(ctl); spin_lock(&cache->lock); } @@ -557,7 +557,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, wake_up(&caching_ctl->wait); if (ret == 1) { - put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); btrfs_free_excluded_extents(cache); return 0; } @@ -579,7 +579,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, } if (load_cache_only) { - put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); return 0; } @@ -4153,7 +4153,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, return -EINVAL; btrfs_cache_block_group(block_group, 0); - caching_ctl = get_caching_control(block_group); + caching_ctl = btrfs_get_caching_control(block_group); if (!caching_ctl) { /* Logic error */ @@ -4183,7 +4183,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, } out_lock: mutex_unlock(&caching_ctl->mutex); - put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); } btrfs_put_block_group(block_group); return ret; @@ -4306,7 +4306,7 @@ void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info) if (btrfs_block_group_cache_done(cache)) { cache->last_byte_to_unpin = (u64)-1; list_del_init(&caching_ctl->list); - put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); } else { cache->last_byte_to_unpin = caching_ctl->progress; } @@ -4937,14 +4937,14 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache { struct btrfs_caching_control *caching_ctl; - caching_ctl = get_caching_control(cache); + caching_ctl = btrfs_get_caching_control(cache); if (!caching_ctl) return; wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) || (cache->free_space_ctl->free_space >= num_bytes)); - put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); } int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) @@ -4952,14 +4952,14 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) struct btrfs_caching_control *caching_ctl; int ret = 0; - caching_ctl = get_caching_control(cache); + caching_ctl = btrfs_get_caching_control(cache); if (!caching_ctl) return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache)); if (cache->cached == BTRFS_CACHE_ERROR) ret = -EIO; - put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); return ret; } @@ -7482,7 +7482,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) caching_ctl = list_entry(info->caching_block_groups.next, struct btrfs_caching_control, list); list_del(&caching_ctl->list); - put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); } up_write(&info->commit_root_sem); @@ -8202,7 +8202,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, } if (block_group->has_caching_ctl) - caching_ctl = get_caching_control(block_group); + caching_ctl = btrfs_get_caching_control(block_group); if (block_group->cached == BTRFS_CACHE_STARTED) btrfs_wait_block_group_cache_done(block_group); if (block_group->has_caching_ctl) { @@ -8223,8 +8223,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, up_write(&fs_info->commit_root_sem); if (caching_ctl) { /* Once for the caching bgs list and once for us. */ - put_caching_control(caching_ctl); - put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); } } From caa4efafcf037d3881436f2b7ad818cdacebb8a1 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:51 -0400 Subject: [PATCH 042/138] btrfs: temporarily export fragment_free_space This is used in caching and reading block groups, so export it while we move these chunks independently. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 1 + fs/btrfs/extent-tree.c | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 3f8b6ffefb8a..ef101fd52158 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -149,6 +149,7 @@ static inline int btrfs_should_fragment_free_space( (btrfs_test_opt(fs_info, FRAGMENT_DATA) && block_group->flags & BTRFS_BLOCK_GROUP_DATA); } +void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group); #endif struct btrfs_block_group_cache *btrfs_lookup_first_block_group( diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index dab8e90237cd..6cae152a4812 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -206,7 +206,7 @@ void btrfs_put_caching_control(struct btrfs_caching_control *ctl) } #ifdef CONFIG_BTRFS_DEBUG -static void fragment_free_space(struct btrfs_block_group_cache *block_group) +void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; u64 start = block_group->key.objectid; @@ -443,7 +443,7 @@ static noinline void caching_thread(struct btrfs_work *work) block_group->space_info->bytes_used += bytes_used >> 1; spin_unlock(&block_group->lock); spin_unlock(&block_group->space_info->lock); - fragment_free_space(block_group); + btrfs_fragment_free_space(block_group); } #endif @@ -550,7 +550,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, cache->space_info->bytes_used += bytes_used >> 1; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - fragment_free_space(cache); + btrfs_fragment_free_space(cache); } #endif mutex_unlock(&caching_ctl->mutex); @@ -7968,7 +7968,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 new_bytes_used = size - bytes_used; bytes_used += new_bytes_used >> 1; - fragment_free_space(cache); + btrfs_fragment_free_space(cache); } #endif /* From 6a9fb468f1152d6254f49fee6ac28c3cfa3367e5 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:52 -0400 Subject: [PATCH 043/138] btrfs: make caching_thread use btrfs_find_next_key extent-tree.c has a find_next_key that just walks up the path to find the next key, but it is used for both the caching stuff and the snapshot delete stuff. The snapshot deletion stuff is special so it can't really use btrfs_find_next_key, but the caching thread stuff can. We just need to fix btrfs_find_next_key to deal with ->skip_locking and then it works exactly the same as the private find_next_key helper. Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 4 ++-- fs/btrfs/extent-tree.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 9d1d0a926cb0..f8dc582db690 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -5623,7 +5623,7 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, int slot; struct extent_buffer *c; - WARN_ON(!path->keep_locks); + WARN_ON(!path->keep_locks && !path->skip_locking); while (level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) return 1; @@ -5639,7 +5639,7 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, !path->nodes[level + 1]) return 1; - if (path->locks[level + 1]) { + if (path->locks[level + 1] || path->skip_locking) { level++; continue; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6cae152a4812..3348f9a4e15c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -331,7 +331,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) if (path->slots[0] < nritems) { btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); } else { - ret = find_next_key(path, 0, &key); + ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); if (ret) break; From 6e369febbc534596b5e366d46f10b65bb4caa544 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 13 Jun 2019 17:23:02 +0200 Subject: [PATCH 044/138] btrfs: sysfs: add debugging exports Add 'debug' directories to global sysfs and per-filesystem. This will replace the debugfs directory. The sysfs location is simpler and builds on top of the existing file hierarchy so there will hopefully be no more questions about the sample debugfs file. The directory is called 'debug' and only present under CONFIG_BTRFS_DEBUG so this will not affect productions builds. Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 271e7e714920..315204557bfc 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -248,6 +248,25 @@ static const struct attribute_group btrfs_static_feature_attr_group = { .attrs = btrfs_supported_static_feature_attrs, }; +#ifdef CONFIG_BTRFS_DEBUG + +/* + * Runtime debugging exported via sysfs + * + * /sys/fs/btrfs/debug - applies to module or all filesystems + * /sys/fs/btrfs/UUID - applies only to the given filesystem + */ +static struct attribute *btrfs_debug_feature_attrs[] = { + NULL +}; + +static const struct attribute_group btrfs_debug_feature_attr_group = { + .name = "debug", + .attrs = btrfs_debug_feature_attrs, +}; + +#endif + static ssize_t btrfs_show_u64(u64 *value_ptr, spinlock_t *lock, char *buf) { u64 val; @@ -860,6 +879,13 @@ int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info) if (error) goto failure; +#ifdef CONFIG_BTRFS_DEBUG + error = sysfs_create_group(fsid_kobj, + &btrfs_debug_feature_attr_group); + if (error) + goto failure; +#endif + error = addrm_unknown_feature_attrs(fs_info, true); if (error) goto failure; @@ -952,6 +978,12 @@ int __init btrfs_init_sysfs(void) if (ret) goto out_remove_group; +#ifdef CONFIG_BTRFS_DEBUG + ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group); + if (ret) + goto out2; +#endif + return 0; out_remove_group: From b33151e7b3a3d057f975852164581d89b2e6716a Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 13 Jun 2019 17:27:36 +0200 Subject: [PATCH 045/138] btrfs: delete debugfs code Replaced by the sysfs exports that provide a more fine grained interface for filesystem debugging. Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 30 ------------------------------ fs/btrfs/sysfs.h | 5 ----- 2 files changed, 35 deletions(-) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 315204557bfc..115499cd6b79 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -9,7 +9,6 @@ #include #include #include -#include #include "ctree.h" #include "disk-io.h" @@ -829,12 +828,6 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; -/* /sys/kernel/debug/btrfs */ -static struct dentry *btrfs_debugfs_root_dentry; - -/* Debugging tunables and exported data */ -u64 btrfs_debugfs_test; - /* * Can be called by the device discovery thread. * And parent can be specified for seed device @@ -940,25 +933,6 @@ void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info, ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group); } -static void btrfs_init_debugfs(void) -{ -#ifdef CONFIG_DEBUG_FS - btrfs_debugfs_root_dentry = debugfs_create_dir("btrfs", NULL); - - /* - * Example code, how to export data through debugfs. - * - * file: /sys/kernel/debug/btrfs/test - * contents of: btrfs_debugfs_test - */ -#ifdef CONFIG_BTRFS_DEBUG - debugfs_create_u64("test", S_IRUGO | S_IWUSR, btrfs_debugfs_root_dentry, - &btrfs_debugfs_test); -#endif - -#endif -} - int __init btrfs_init_sysfs(void) { int ret; @@ -967,8 +941,6 @@ int __init btrfs_init_sysfs(void) if (!btrfs_kset) return -ENOMEM; - btrfs_init_debugfs(); - init_feature_attrs(); ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); if (ret) @@ -989,7 +961,6 @@ int __init btrfs_init_sysfs(void) out_remove_group: sysfs_remove_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); out2: - debugfs_remove_recursive(btrfs_debugfs_root_dentry); kset_unregister(btrfs_kset); return ret; @@ -1001,6 +972,5 @@ void __cold btrfs_exit_sysfs(void) &btrfs_static_feature_attr_group); sysfs_remove_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); kset_unregister(btrfs_kset); - debugfs_remove_recursive(btrfs_debugfs_root_dentry); } diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 40716b357c1d..4bb4fe96d4bd 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -3,11 +3,6 @@ #ifndef BTRFS_SYSFS_H #define BTRFS_SYSFS_H -/* - * Data exported through sysfs - */ -extern u64 btrfs_debugfs_test; - enum btrfs_feature_set { FEAT_COMPAT, FEAT_COMPAT_RO, From cdf52bd9fe28d0ea93fb9f5d96ae59aec1148b69 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Sat, 3 Aug 2019 09:53:16 +0100 Subject: [PATCH 046/138] Btrfs: fix memory leaks in the test test_find_first_clear_extent_bit The test creates an extent io tree and sets several ranges with the CHUNK_ALLOCATED and CHUNK_TRIMMED bits, resulting in the allocation of several extent state structures. However the test never clears those ranges, resulting in memory leaks of the extent state structures. This is detected when CONFIG_BTRFS_DEBUG is set once we remove the btrfs module (rmmod btrfs): [57399.787918] BTRFS: state leak: start 67108864 end 75497471 state 1 in tree 1 refs 1 [57399.790155] BTRFS: state leak: start 33554432 end 67108863 state 33 in tree 1 refs 1 [57399.791941] BTRFS: state leak: start 1048576 end 4194303 state 33 in tree 1 refs 1 [57399.793753] BTRFS: state leak: start 67108864 end 75497471 state 1 in tree 1 refs 1 [57399.795188] BTRFS: state leak: start 33554432 end 67108863 state 33 in tree 1 refs 1 [57399.796453] BTRFS: state leak: start 1048576 end 4194303 state 33 in tree 1 refs 1 [57399.797765] BTRFS: state leak: start 67108864 end 75497471 state 1 in tree 1 refs 1 [57399.799049] BTRFS: state leak: start 33554432 end 67108863 state 33 in tree 1 refs 1 [57399.800142] BTRFS: state leak: start 1048576 end 4194303 state 33 in tree 1 refs 1 [57399.801126] BTRFS: state leak: start 67108864 end 75497471 state 1 in tree 1 refs 1 [57399.802106] BTRFS: state leak: start 33554432 end 67108863 state 33 in tree 1 refs 1 [57399.803119] BTRFS: state leak: start 1048576 end 4194303 state 33 in tree 1 refs 1 [57399.804153] BTRFS: state leak: start 67108864 end 75497471 state 1 in tree 1 refs 1 [57399.805196] BTRFS: state leak: start 33554432 end 67108863 state 33 in tree 1 refs 1 [57399.806191] BTRFS: state leak: start 1048576 end 4194303 state 33 in tree 1 refs 1 The start and end offsets reported correspond exactly to the ranges used by the test. So fix that by clearing all the ranges when the test finishes. Fixes: 1eaebb341d2b41 ("btrfs: Don't trim returned range based on input value in find_first_clear_extent_bit") Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/tests/extent-io-tests.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 1bf6b5a79191..705a8a7eb815 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -514,6 +514,8 @@ static int test_find_first_clear_extent_bit(void) "error handling beyond end of range search: start %llu end %llu", start, end); + clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED); + return 0; } From 202f64ef427cce8478d9cfc1a96970db066b1ece Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 5 Aug 2019 10:57:41 +0100 Subject: [PATCH 047/138] Btrfs: make test_find_first_clear_extent_bit fail on incorrect results If any call to find_first_clear_extent_bit() returns an unexpected result, the test should fail and not just print an error message, otherwise it makes detection of regressions much harder to notice. Fixes: 1eaebb341d2b41 ("btrfs: Don't trim returned range based on input value in find_first_clear_extent_bit") Reviewed-by: Nikolay Borisov Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/tests/extent-io-tests.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 705a8a7eb815..123d9a614357 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -438,6 +438,7 @@ static int test_find_first_clear_extent_bit(void) { struct extent_io_tree tree; u64 start, end; + int ret = -EINVAL; test_msg("running find_first_clear_extent_bit test"); extent_io_tree_init(NULL, &tree, IO_TREE_SELFTEST, NULL); @@ -452,9 +453,11 @@ static int test_find_first_clear_extent_bit(void) find_first_clear_extent_bit(&tree, SZ_512K, &start, &end, CHUNK_TRIMMED | CHUNK_ALLOCATED); - if (start != 0 || end != SZ_1M -1) + if (start != 0 || end != SZ_1M - 1) { test_err("error finding beginning range: start %llu end %llu", start, end); + goto out; + } /* Now add 32M-64M so that we have a hole between 4M-32M */ set_extent_bits(&tree, SZ_32M, SZ_64M - 1, @@ -466,9 +469,11 @@ static int test_find_first_clear_extent_bit(void) find_first_clear_extent_bit(&tree, 12 * SZ_1M, &start, &end, CHUNK_TRIMMED | CHUNK_ALLOCATED); - if (start != SZ_4M || end != SZ_32M - 1) + if (start != SZ_4M || end != SZ_32M - 1) { test_err("error finding trimmed range: start %llu end %llu", start, end); + goto out; + } /* * Search in the middle of allocated range, should get the next one @@ -477,9 +482,11 @@ static int test_find_first_clear_extent_bit(void) find_first_clear_extent_bit(&tree, SZ_2M, &start, &end, CHUNK_TRIMMED | CHUNK_ALLOCATED); - if (start != SZ_4M || end != SZ_32M -1) + if (start != SZ_4M || end != SZ_32M - 1) { test_err("error finding next unalloc range: start %llu end %llu", start, end); + goto out; + } /* * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag @@ -489,9 +496,11 @@ static int test_find_first_clear_extent_bit(void) find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end, CHUNK_TRIMMED); - if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) + if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { test_err("error finding exact range: start %llu end %llu", start, end); + goto out; + } find_first_clear_extent_bit(&tree, SZ_64M - SZ_8M, &start, &end, CHUNK_TRIMMED); @@ -500,23 +509,29 @@ static int test_find_first_clear_extent_bit(void) * Search in the middle of set range whose immediate neighbour doesn't * have the bits set so it must be returned */ - if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) + if (start != SZ_64M || end != SZ_64M + SZ_8M - 1) { test_err("error finding next alloc range: start %llu end %llu", start, end); + goto out; + } /* * Search beyond any known range, shall return after last known range * and end should be -1 */ find_first_clear_extent_bit(&tree, -1, &start, &end, CHUNK_TRIMMED); - if (start != SZ_64M + SZ_8M || end != -1) + if (start != SZ_64M + SZ_8M || end != -1) { test_err( "error handling beyond end of range search: start %llu end %llu", start, end); + goto out; + } + ret = 0; +out: clear_extent_bits(&tree, 0, (u64)-1, CHUNK_TRIMMED | CHUNK_ALLOCATED); - return 0; + return ret; } int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) From 73a3ca20934dd02c0912bcf32463fffec6139399 Mon Sep 17 00:00:00 2001 From: Hans van Kranenburg Date: Sat, 3 Aug 2019 23:36:34 +0200 Subject: [PATCH 048/138] btrfs: clarify btrfs_ioctl_get_dev_stats padding In commit c11d2c236cc26 ("Btrfs: add ioctl to get and reset the device stats") the get_dev_stats ioctl was added. Shortly thereafter, in commit b27f7c0c150f7 ("btrfs: join DEV_STATS ioctls to one") , the flags field was added. However, the calculation for unused padding space was not updated, which also invalidated the comment. Clarify what happened to reduce confusion and wasted time for anyone implementing this. Signed-off-by: Hans van Kranenburg Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/uapi/linux/btrfs.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 7885d79f7515..3ee0678c0a83 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -665,7 +665,12 @@ struct btrfs_ioctl_get_dev_stats { /* out values: */ __u64 values[BTRFS_DEV_STAT_VALUES_MAX]; - __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */ + /* + * This pads the struct to 1032 bytes. It was originally meant to pad to + * 1024 bytes, but when adding the flags field, the padding calculation + * was not adjusted. + */ + __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; }; #define BTRFS_QUOTA_CTL_ENABLE 1 From 73798c465b66ed55d6f0adafbda42aca854acbf1 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 6 Aug 2019 22:05:07 +0800 Subject: [PATCH 049/138] btrfs: qgroup: Try our best to delete qgroup relations When we try to delete qgroups, we're pretty cautious, we make sure both qgroups exist and there is a relationship between them, then try to delete the relation. This behavior is OK, but the problem is we need to two relation items, and if we failed the first item deletion, we error out, leaving the other relation item in qgroup tree. Sometimes the error from del_qgroup_relation_item() could just be -ENOENT, thus we can ignore that error and continue without any problem. Further more, such cautious behavior makes qgroup relation deletion impossible for orphan relation items. This patch will enhance __del_qgroup_relation(): - If both qgroups and their relation items exist Go the regular deletion routine and update their accounting if needed. - If any qgroup or relation item doesn't exist Then we still try to delete the orphan items anyway, but don't trigger the accounting update. By this, we try our best to remove relation items, and can handle orphan relation items properly, while still keep the existing behavior for good qgroup tree. Reported-by: Andrei Borzenkov Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/qgroup.c | 48 +++++++++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index a960e33525ba..8d3bd799ac7d 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1312,8 +1312,9 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, struct btrfs_qgroup *member; struct btrfs_qgroup_list *list; struct ulist *tmp; + bool found = false; int ret = 0; - int err; + int ret2; tmp = ulist_alloc(GFP_KERNEL); if (!tmp) @@ -1327,28 +1328,39 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, member = find_qgroup_rb(fs_info, src); parent = find_qgroup_rb(fs_info, dst); - if (!member || !parent) { - ret = -EINVAL; - goto out; - } + /* + * The parent/member pair doesn't exist, then try to delete the dead + * relation items only. + */ + if (!member || !parent) + goto delete_item; /* check if such qgroup relation exist firstly */ list_for_each_entry(list, &member->groups, next_group) { - if (list->group == parent) - goto exist; + if (list->group == parent) { + found = true; + break; + } } - ret = -ENOENT; - goto out; -exist: - ret = del_qgroup_relation_item(trans, src, dst); - err = del_qgroup_relation_item(trans, dst, src); - if (err && !ret) - ret = err; - spin_lock(&fs_info->qgroup_lock); - del_relation_rb(fs_info, src, dst); - ret = quick_update_accounting(fs_info, tmp, src, dst, -1); - spin_unlock(&fs_info->qgroup_lock); +delete_item: + ret = del_qgroup_relation_item(trans, src, dst); + if (ret < 0 && ret != -ENOENT) + goto out; + ret2 = del_qgroup_relation_item(trans, dst, src); + if (ret2 < 0 && ret2 != -ENOENT) + goto out; + + /* At least one deletion succeeded, return 0 */ + if (!ret || !ret2) + ret = 0; + + if (found) { + spin_lock(&fs_info->qgroup_lock); + del_relation_rb(fs_info, src, dst); + ret = quick_update_accounting(fs_info, tmp, src, dst, -1); + spin_unlock(&fs_info->qgroup_lock); + } out: ulist_free(tmp); return ret; From 4e411a7d044168520d936264c9fe40847cf031eb Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Wed, 7 Aug 2019 16:21:19 +0800 Subject: [PATCH 050/138] btrfs: reset device stat using btrfs_dev_stat_set btrfs_dev_stat_reset() is an overdo in terms of wrapping. So this patch open codes btrfs_dev_stat_reset(). Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 6 +++--- fs/btrfs/volumes.h | 6 ------ 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index cb9dcdffe434..a9920bce7bcd 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7303,7 +7303,7 @@ static void __btrfs_reset_dev_stats(struct btrfs_device *dev) int i; for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) - btrfs_dev_stat_reset(dev, i); + btrfs_dev_stat_set(dev, i, 0); } int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) @@ -7353,7 +7353,7 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) btrfs_dev_stat_set(device, i, btrfs_dev_stats_value(eb, ptr, i)); else - btrfs_dev_stat_reset(device, i); + btrfs_dev_stat_set(device, i, 0); } device->dev_stats_valid = 1; @@ -7536,7 +7536,7 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, stats->values[i] = btrfs_dev_stat_read_and_reset(dev, i); else - btrfs_dev_stat_reset(dev, i); + btrfs_dev_stat_set(dev, i, 0); } } else { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 081cb734a239..a7da1f3e3627 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -547,12 +547,6 @@ static inline void btrfs_dev_stat_set(struct btrfs_device *dev, atomic_inc(&dev->dev_stats_ccnt); } -static inline void btrfs_dev_stat_reset(struct btrfs_device *dev, - int index) -{ - btrfs_dev_stat_set(dev, index, 0); -} - /* * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which * can be used as index to access btrfs_raid_array[]. From ae4b9b4c7d546b1cabf9e5da67432cfbc7cd7148 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Wed, 7 Aug 2019 16:21:20 +0800 Subject: [PATCH 051/138] btrfs: opencode reset of all device stats __btrfs_reset_dev_stats() is a small helper function to reset devices stat values, and is used only once, instead just open code it. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a9920bce7bcd..8031b47cb380 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -191,7 +191,6 @@ out_overflow:; static int init_first_rw_device(struct btrfs_trans_handle *trans); static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); -static void __btrfs_reset_dev_stats(struct btrfs_device *dev); static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); static int __btrfs_map_block(struct btrfs_fs_info *fs_info, @@ -7298,14 +7297,6 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) } } -static void __btrfs_reset_dev_stats(struct btrfs_device *dev) -{ - int i; - - for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) - btrfs_dev_stat_set(dev, i, 0); -} - int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) { struct btrfs_key key; @@ -7335,7 +7326,8 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) key.offset = device->devid; ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); if (ret) { - __btrfs_reset_dev_stats(device); + for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) + btrfs_dev_stat_set(device, i, 0); device->dev_stats_valid = 1; btrfs_release_path(path); continue; From 89439109bc2be5f19c0955d392fb6ea7d0f4ecb3 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 17:34:41 +0200 Subject: [PATCH 052/138] btrfs: move sysfs declarations out of ctree.h As the header for sysfs code already exists, use it to clean up ctree.h. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 12 ------------ fs/btrfs/super.c | 1 + fs/btrfs/sysfs.h | 11 +++++++++++ 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b95d7472b2a9..c40f28f175c8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -398,12 +398,6 @@ struct btrfs_dev_replace { wait_queue_head_t replace_wait; }; -/* For raid type sysfs entries */ -struct raid_kobject { - u64 flags; - struct kobject kobj; -}; - /* * free clusters are used to claim free space in relatively large chunks, * allowing us to do less seeky writes. They are used for all metadata @@ -3096,12 +3090,6 @@ loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in, int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, struct btrfs_root *root); -/* sysfs.c */ -int __init btrfs_init_sysfs(void); -void __cold btrfs_exit_sysfs(void); -int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info); -void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); - /* super.c */ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, unsigned long new_flags); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 16c7af333d3a..1b151af25772 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -43,6 +43,7 @@ #include "free-space-cache.h" #include "backref.h" #include "space-info.h" +#include "sysfs.h" #include "tests/btrfs-tests.h" #include "block-group.h" diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 4bb4fe96d4bd..4f622989b594 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -35,6 +35,12 @@ struct btrfs_feature_attr { u64 feature_bit; }; +/* For raid type sysfs entries */ +struct raid_kobject { + u64 flags; + struct kobject kobj; +}; + #define BTRFS_FEAT_ATTR(_name, _feature_set, _feature_prefix, _feature_bit) \ static struct btrfs_feature_attr btrfs_attr_features_##_name = { \ .kobj_attr = __INIT_KOBJ_ATTR(_name, S_IRUGO, \ @@ -86,4 +92,9 @@ void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs); void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info, u64 bit, enum btrfs_feature_set set); +int __init btrfs_init_sysfs(void); +void __cold btrfs_exit_sysfs(void); +int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info); +void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); + #endif From 32a9991f15a0fc2a55de47db7d22a99f462d6804 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 17:49:55 +0200 Subject: [PATCH 053/138] btrfs: factor sysfs code out of link_block_group The part of link_block_group that just creates the sysfs object is independent and can be factored out to a helper. Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 37 ++------------------------------- fs/btrfs/sysfs.c | 46 ++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/sysfs.h | 1 + 3 files changed, 49 insertions(+), 35 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3348f9a4e15c..37e56b0c0484 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4,7 +4,6 @@ */ #include -#include #include #include #include @@ -7573,7 +7572,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) static void link_block_group(struct btrfs_block_group_cache *cache) { struct btrfs_space_info *space_info = cache->space_info; - struct btrfs_fs_info *fs_info = cache->fs_info; int index = btrfs_bg_flags_to_raid_index(cache->flags); bool first = false; @@ -7583,39 +7581,8 @@ static void link_block_group(struct btrfs_block_group_cache *cache) list_add_tail(&cache->list, &space_info->block_groups[index]); up_write(&space_info->groups_sem); - if (first) { - struct raid_kobject *rkobj; - unsigned int nofs_flag; - int ret; - - /* - * Setup a NOFS context because kobject_add(), deep in its call - * chain, does GFP_KERNEL allocations, and we are often called - * in a context where if reclaim is triggered we can deadlock - * (we are either holding a transaction handle or some lock - * required for a transaction commit). - */ - nofs_flag = memalloc_nofs_save(); - rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL); - if (!rkobj) { - memalloc_nofs_restore(nofs_flag); - btrfs_warn(cache->fs_info, - "couldn't alloc memory for raid level kobject"); - return; - } - rkobj->flags = cache->flags; - kobject_init(&rkobj->kobj, &btrfs_raid_ktype); - ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s", - btrfs_bg_type_to_raid_name(rkobj->flags)); - memalloc_nofs_restore(nofs_flag); - if (ret) { - kobject_put(&rkobj->kobj); - btrfs_warn(fs_info, - "failed to add kobject for block cache, ignoring"); - return; - } - space_info->block_group_kobjs[index] = &rkobj->kobj; - } + if (first) + btrfs_sysfs_add_block_group_type(cache); } static struct btrfs_block_group_cache * diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 115499cd6b79..50cb9f9cdbfd 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -749,6 +750,51 @@ static void init_feature_attrs(void) } } +/* + * Create a sysfs entry for a given block group type at path + * /sys/fs/btrfs/UUID/allocation/data/TYPE + */ +void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache) +{ + struct btrfs_fs_info *fs_info = cache->fs_info; + struct btrfs_space_info *space_info = cache->space_info; + struct raid_kobject *rkobj; + const int index = btrfs_bg_flags_to_raid_index(cache->flags); + unsigned int nofs_flag; + int ret; + + /* + * Setup a NOFS context because kobject_add(), deep in its call chain, + * does GFP_KERNEL allocations, and we are often called in a context + * where if reclaim is triggered we can deadlock (we are either holding + * a transaction handle or some lock required for a transaction + * commit). + */ + nofs_flag = memalloc_nofs_save(); + + rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); + if (!rkobj) { + memalloc_nofs_restore(nofs_flag); + btrfs_warn(cache->fs_info, + "couldn't alloc memory for raid level kobject"); + return; + } + + rkobj->flags = cache->flags; + kobject_init(&rkobj->kobj, &btrfs_raid_ktype); + ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s", + btrfs_bg_type_to_raid_name(rkobj->flags)); + memalloc_nofs_restore(nofs_flag); + if (ret) { + kobject_put(&rkobj->kobj); + btrfs_warn(fs_info, + "failed to add kobject for block cache, ignoring"); + return; + } + + space_info->block_group_kobjs[index] = &rkobj->kobj; +} + /* when one_device is NULL, it removes all device links */ int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 4f622989b594..403240e81dd2 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -96,5 +96,6 @@ int __init btrfs_init_sysfs(void); void __cold btrfs_exit_sysfs(void); int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info); void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); +void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache); #endif From 536ea45cba34618669a2f984fa7dff86480823fb Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 17:55:55 +0200 Subject: [PATCH 054/138] btrfs: sysfs: unexport btrfs_raid_ktype The last non-sysfs usage of btrfs_raid_ktype has been moved to a private helper in previous patch so the variable can be made static. Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 2 +- fs/btrfs/sysfs.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 50cb9f9cdbfd..f581ceedf5a9 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -336,7 +336,7 @@ static void release_raid_kobj(struct kobject *kobj) kfree(to_raid_kobj(kobj)); } -struct kobj_type btrfs_raid_ktype = { +static struct kobj_type btrfs_raid_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = release_raid_kobj, .default_groups = raid_groups, diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 403240e81dd2..a359f0b7c20b 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -80,7 +80,6 @@ attr_to_btrfs_feature_attr(struct attribute *attr) char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags); extern const char * const btrfs_feature_set_names[FEAT_MAX]; extern struct kobj_type space_info_ktype; -extern struct kobj_type btrfs_raid_ktype; int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, struct btrfs_device *one_device); int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, From b882327a774041d5710e37da3286e25278a1947f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 18:50:16 +0200 Subject: [PATCH 055/138] btrfs: factor out sysfs code for creating space infos Move creation of data/metadata/system space info directories to sysfs.c. Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 25 ++----------------------- fs/btrfs/sysfs.c | 37 +++++++++++++++++++++++++++++++++++++ fs/btrfs/sysfs.h | 2 ++ 3 files changed, 41 insertions(+), 23 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 9dbb9c5f82b1..e9406b2133d1 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -34,23 +34,6 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info) rcu_read_unlock(); } -static const char *alloc_name(u64 flags) -{ - switch (flags) { - case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA: - return "mixed"; - case BTRFS_BLOCK_GROUP_METADATA: - return "metadata"; - case BTRFS_BLOCK_GROUP_DATA: - return "data"; - case BTRFS_BLOCK_GROUP_SYSTEM: - return "system"; - default: - WARN_ON(1); - return "invalid-combination"; - }; -} - static int create_space_info(struct btrfs_fs_info *info, u64 flags) { @@ -80,13 +63,9 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags) INIT_LIST_HEAD(&space_info->tickets); INIT_LIST_HEAD(&space_info->priority_tickets); - ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype, - info->space_info_kobj, "%s", - alloc_name(space_info->flags)); - if (ret) { - kobject_put(&space_info->kobj); + ret = btrfs_sysfs_add_space_info_type(info, space_info); + if (ret) return ret; - } list_add_rcu(&space_info->list, &info->space_info); if (flags & BTRFS_BLOCK_GROUP_DATA) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index f581ceedf5a9..d982730265b9 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -795,6 +795,43 @@ void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache) space_info->block_group_kobjs[index] = &rkobj->kobj; } +static const char *alloc_name(u64 flags) +{ + switch (flags) { + case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA: + return "mixed"; + case BTRFS_BLOCK_GROUP_METADATA: + return "metadata"; + case BTRFS_BLOCK_GROUP_DATA: + return "data"; + case BTRFS_BLOCK_GROUP_SYSTEM: + return "system"; + default: + WARN_ON(1); + return "invalid-combination"; + }; +} + +/* + * Create a sysfs entry for a space info type at path + * /sys/fs/btrfs/UUID/allocation/TYPE + */ +int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info) +{ + int ret; + + ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype, + fs_info->space_info_kobj, "%s", + alloc_name(space_info->flags)); + if (ret) { + kobject_put(&space_info->kobj); + return ret; + } + + return 0; +} + /* when one_device is NULL, it removes all device links */ int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index a359f0b7c20b..6baf9ff0519b 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -96,5 +96,7 @@ void __cold btrfs_exit_sysfs(void); int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info); void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache); +int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info); #endif From 27992d014554c37aae1cf788fbf6c457b3eceb76 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 17:55:55 +0200 Subject: [PATCH 056/138] btrfs: sysfs: unexport space_info_ktype The last non-sysfs usage of space_info_ktype has been moved to a private helper in previous patch so the variable can be made static. Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 2 +- fs/btrfs/sysfs.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index d982730265b9..bc82ab66ba8f 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -395,7 +395,7 @@ static void space_info_release(struct kobject *kobj) kfree(sinfo); } -struct kobj_type space_info_ktype = { +static struct kobj_type space_info_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = space_info_release, .default_groups = space_info_groups, diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 6baf9ff0519b..0c06f2adfae8 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -79,7 +79,6 @@ attr_to_btrfs_feature_attr(struct attribute *attr) char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags); extern const char * const btrfs_feature_set_names[FEAT_MAX]; -extern struct kobj_type space_info_ktype; int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, struct btrfs_device *one_device); int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, From f10152bcc962cae6e78ae4eecfa22270f481c0ff Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 19:07:55 +0200 Subject: [PATCH 057/138] btrfs: sysfs: replace direct access to feature set names with a helper In order to unexport the feature type array, add a helper for the enum-to-string conversion. Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 2 +- fs/btrfs/sysfs.c | 7 ++++++- fs/btrfs/sysfs.h | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b431f7877e88..4eabd419aaca 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -5259,7 +5259,7 @@ static int check_feature_bits(struct btrfs_fs_info *fs_info, u64 change_mask, u64 flags, u64 supported_flags, u64 safe_set, u64 safe_clear) { - const char *type = btrfs_feature_set_names[set]; + const char *type = btrfs_feature_set_name(set); char *names; u64 disallowed, unsupported; u64 set_mask = flags & change_mask; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index bc82ab66ba8f..c7c29e5b38a0 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -675,12 +675,17 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info) btrfs_sysfs_rm_device_link(fs_info->fs_devices, NULL); } -const char * const btrfs_feature_set_names[FEAT_MAX] = { +static const char * const btrfs_feature_set_names[FEAT_MAX] = { [FEAT_COMPAT] = "compat", [FEAT_COMPAT_RO] = "compat_ro", [FEAT_INCOMPAT] = "incompat", }; +const char * const btrfs_feature_set_name(enum btrfs_feature_set set) +{ + return btrfs_feature_set_names[set]; +} + char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags) { size_t bufsize = 4096; /* safe max, 64 names * 64 bytes */ diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 0c06f2adfae8..d8a6ab1bfde7 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -78,7 +78,7 @@ attr_to_btrfs_feature_attr(struct attribute *attr) } char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags); -extern const char * const btrfs_feature_set_names[FEAT_MAX]; +const char * const btrfs_feature_set_name(enum btrfs_feature_set set); int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, struct btrfs_device *one_device); int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices, From 5b28692e0c4ffb7266d359f0d54155156cdfe0a2 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 18:50:16 +0200 Subject: [PATCH 058/138] btrfs: factor out sysfs code for sending device uevent The device uevent belongs to the sysfs API. Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 11 +++++++++++ fs/btrfs/sysfs.h | 1 + fs/btrfs/volumes.c | 13 ------------- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index c7c29e5b38a0..5562ce1f7090 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -913,6 +913,17 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, return error; } +void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action) +{ + int ret; + + ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); + if (ret) + pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", + action, kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), + &disk_to_dev(bdev->bd_disk)->kobj); +} + /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index d8a6ab1bfde7..6807d105c027 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -89,6 +89,7 @@ int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs); void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs); void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info, u64 bit, enum btrfs_feature_set set); +void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action); int __init btrfs_init_sysfs(void); void __cold btrfs_exit_sysfs(void); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8031b47cb380..830e4b0b5957 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -358,19 +358,6 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices) kfree(fs_devices); } -static void btrfs_kobject_uevent(struct block_device *bdev, - enum kobject_action action) -{ - int ret; - - ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); - if (ret) - pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", - action, - kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), - &disk_to_dev(bdev->bd_disk)->kobj); -} - void __exit btrfs_cleanup_fs_uuids(void) { struct btrfs_fs_devices *fs_devices; From b5865babb7b44308f0d0ece39756d55ef7628742 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 18:50:16 +0200 Subject: [PATCH 059/138] btrfs: factor out sysfs code for deleting block group and space infos The helpers to create block group and space info directories already live in sysfs.c, move the deletion part there too. Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 14 +------------- fs/btrfs/sysfs.c | 22 ++++++++++++++++++++++ fs/btrfs/sysfs.h | 1 + 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 37e56b0c0484..05f6464b3123 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7539,8 +7539,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) btrfs_release_global_block_rsv(info); while (!list_empty(&info->space_info)) { - int i; - space_info = list_entry(info->space_info.next, struct btrfs_space_info, list); @@ -7554,17 +7552,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) space_info->bytes_may_use > 0)) btrfs_dump_space_info(info, space_info, 0, 0); list_del(&space_info->list); - for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { - struct kobject *kobj; - kobj = space_info->block_group_kobjs[i]; - space_info->block_group_kobjs[i] = NULL; - if (kobj) { - kobject_del(kobj); - kobject_put(kobj); - } - } - kobject_del(&space_info->kobj); - kobject_put(&space_info->kobj); + btrfs_sysfs_remove_space_info(space_info); } return 0; } diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 5562ce1f7090..4de9bae3e186 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -800,6 +800,28 @@ void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache) space_info->block_group_kobjs[index] = &rkobj->kobj; } +/* + * Remove sysfs directories for all block group types of a given space info and + * the space info as well + */ +void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info) +{ + int i; + + for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { + struct kobject *kobj; + + kobj = space_info->block_group_kobjs[i]; + space_info->block_group_kobjs[i] = NULL; + if (kobj) { + kobject_del(kobj); + kobject_put(kobj); + } + } + kobject_del(&space_info->kobj); + kobject_put(&space_info->kobj); +} + static const char *alloc_name(u64 flags) { switch (flags) { diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 6807d105c027..031697358b6e 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -98,5 +98,6 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache); int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info); +void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info); #endif From f93c39970b1da007b3110fd7c9dd361614922457 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 18:50:16 +0200 Subject: [PATCH 060/138] btrfs: factor out sysfs code for updating sprout fsid Wrap the fsid renaming code and move it to sysfs.c. Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 15 +++++++++++++++ fs/btrfs/sysfs.h | 2 ++ fs/btrfs/volumes.c | 12 ++---------- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 4de9bae3e186..9078e377ba7b 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -946,6 +946,21 @@ void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action) &disk_to_dev(bdev->bd_disk)->kobj); } +void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices, + const u8 *fsid) +{ + char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; + + /* + * Sprouting changes fsid of the mounted filesystem, rename the fsid + * directory + */ + snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", fsid); + if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf)) + btrfs_warn(fs_devices->fs_info, + "sysfs: failed to create fsid for sprout"); +} + /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 031697358b6e..4243fcf607b0 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -87,6 +87,8 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs, struct kobject *parent); int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs); void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs); +void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices, + const u8 *fsid); void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info, u64 bit, enum btrfs_feature_set set); void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 830e4b0b5957..fa6eb9e0ba89 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2680,22 +2680,14 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path } if (seeding_dev) { - char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; - ret = btrfs_finish_sprout(trans); if (ret) { btrfs_abort_transaction(trans, ret); goto error_sysfs; } - /* Sprouting would change fsid of the mounted root, - * so rename the fsid on the sysfs - */ - snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", - fs_info->fs_devices->fsid); - if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf)) - btrfs_warn(fs_info, - "sysfs: failed to create fsid for sprout"); + btrfs_sysfs_update_sprout_fsid(fs_devices, + fs_info->fs_devices->fsid); } ret = btrfs_commit_transaction(trans); From 67715b206c397b28b8a41c9ddbdc1776a0e7a25f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 1 Aug 2019 19:46:20 +0200 Subject: [PATCH 061/138] btrfs: cleanup kobject.h includes The kobject should be pulled in via sysfs.h and that needs to include it because it needs various definitions like kobj_attribute or kobject. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 - fs/btrfs/sysfs.c | 1 - fs/btrfs/sysfs.h | 2 ++ 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index c40f28f175c8..fc5031e6fd23 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 9078e377ba7b..85cf3bfc9f62 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include "ctree.h" diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 4243fcf607b0..611e103c174b 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -3,6 +3,8 @@ #ifndef BTRFS_SYSFS_H #define BTRFS_SYSFS_H +#include + enum btrfs_feature_set { FEAT_COMPAT, FEAT_COMPAT_RO, From 8f52316c271a572ef602e0b463d9d60d0f40276c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Aug 2019 13:07:38 +0200 Subject: [PATCH 062/138] btrfs: sysfs: move type conversion helpers to sysfs.c Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 16 ++++++++++++++++ fs/btrfs/sysfs.h | 18 ------------------ 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 85cf3bfc9f62..89781d941645 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -21,6 +21,22 @@ static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj); static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj); +static struct btrfs_feature_attr *to_btrfs_feature_attr(struct kobj_attribute *a) +{ + return container_of(a, struct btrfs_feature_attr, kobj_attr); +} + +static struct kobj_attribute *attr_to_btrfs_attr(struct attribute *attr) +{ + return container_of(attr, struct kobj_attribute, attr); +} + +static struct btrfs_feature_attr *attr_to_btrfs_feature_attr( + struct attribute *attr) +{ + return to_btrfs_feature_attr(attr_to_btrfs_attr(attr)); +} + static u64 get_features(struct btrfs_fs_info *fs_info, enum btrfs_feature_set set) { diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 611e103c174b..eaa37bd4013a 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -61,24 +61,6 @@ static struct btrfs_feature_attr btrfs_attr_features_##_name = { \ #define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \ BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature) -/* convert from attribute */ -static inline struct btrfs_feature_attr * -to_btrfs_feature_attr(struct kobj_attribute *a) -{ - return container_of(a, struct btrfs_feature_attr, kobj_attr); -} - -static inline struct kobj_attribute *attr_to_btrfs_attr(struct attribute *attr) -{ - return container_of(attr, struct kobj_attribute, attr); -} - -static inline struct btrfs_feature_attr * -attr_to_btrfs_feature_attr(struct attribute *attr) -{ - return to_btrfs_feature_attr(attr_to_btrfs_attr(attr)); -} - char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags); const char * const btrfs_feature_set_name(enum btrfs_feature_set set); int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, From 9188db611dbb6b3f68c9108265d2abd1cb5a3b44 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Aug 2019 12:52:48 +0200 Subject: [PATCH 063/138] btrfs: sysfs: move helper macros to sysfs.c None of the macros is used outside of sysfs.c. Signed-off-by: David Sterba --- fs/btrfs/sysfs.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/sysfs.h | 49 ------------------------------------------------ 2 files changed, 48 insertions(+), 49 deletions(-) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 89781d941645..f6d3c80f2e28 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -18,6 +18,54 @@ #include "space-info.h" #include "block-group.h" +struct btrfs_feature_attr { + struct kobj_attribute kobj_attr; + enum btrfs_feature_set feature_set; + u64 feature_bit; +}; + +/* For raid type sysfs entries */ +struct raid_kobject { + u64 flags; + struct kobject kobj; +}; + +#define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \ +{ \ + .attr = { .name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +} + +#define BTRFS_ATTR_RW(_prefix, _name, _show, _store) \ + static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \ + __INIT_KOBJ_ATTR(_name, 0644, _show, _store) + +#define BTRFS_ATTR(_prefix, _name, _show) \ + static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \ + __INIT_KOBJ_ATTR(_name, 0444, _show, NULL) + +#define BTRFS_ATTR_PTR(_prefix, _name) \ + (&btrfs_attr_##_prefix##_##_name.attr) + +#define BTRFS_FEAT_ATTR(_name, _feature_set, _feature_prefix, _feature_bit) \ +static struct btrfs_feature_attr btrfs_attr_features_##_name = { \ + .kobj_attr = __INIT_KOBJ_ATTR(_name, S_IRUGO, \ + btrfs_feature_attr_show, \ + btrfs_feature_attr_store), \ + .feature_set = _feature_set, \ + .feature_bit = _feature_prefix ##_## _feature_bit, \ +} +#define BTRFS_FEAT_ATTR_PTR(_name) \ + (&btrfs_attr_features_##_name.kobj_attr.attr) + +#define BTRFS_FEAT_ATTR_COMPAT(name, feature) \ + BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature) +#define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \ + BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT_RO, feature) +#define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \ + BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature) + static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj); static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj); diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index eaa37bd4013a..610e9c36a94c 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h @@ -12,55 +12,6 @@ enum btrfs_feature_set { FEAT_MAX }; -#define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \ -{ \ - .attr = { .name = __stringify(_name), .mode = _mode }, \ - .show = _show, \ - .store = _store, \ -} - -#define BTRFS_ATTR_RW(_prefix, _name, _show, _store) \ - static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \ - __INIT_KOBJ_ATTR(_name, 0644, _show, _store) - -#define BTRFS_ATTR(_prefix, _name, _show) \ - static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \ - __INIT_KOBJ_ATTR(_name, 0444, _show, NULL) - -#define BTRFS_ATTR_PTR(_prefix, _name) \ - (&btrfs_attr_##_prefix##_##_name.attr) - - -struct btrfs_feature_attr { - struct kobj_attribute kobj_attr; - enum btrfs_feature_set feature_set; - u64 feature_bit; -}; - -/* For raid type sysfs entries */ -struct raid_kobject { - u64 flags; - struct kobject kobj; -}; - -#define BTRFS_FEAT_ATTR(_name, _feature_set, _feature_prefix, _feature_bit) \ -static struct btrfs_feature_attr btrfs_attr_features_##_name = { \ - .kobj_attr = __INIT_KOBJ_ATTR(_name, S_IRUGO, \ - btrfs_feature_attr_show, \ - btrfs_feature_attr_store), \ - .feature_set = _feature_set, \ - .feature_bit = _feature_prefix ##_## _feature_bit, \ -} -#define BTRFS_FEAT_ATTR_PTR(_name) \ - (&btrfs_attr_features_##_name.kobj_attr.attr) - -#define BTRFS_FEAT_ATTR_COMPAT(name, feature) \ - BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature) -#define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \ - BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT_RO, feature) -#define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \ - BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature) - char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags); const char * const btrfs_feature_set_name(enum btrfs_feature_set set); int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, From 9f21246d8c7efb940b96098cb556bfe86205fbed Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 6 Aug 2019 16:43:19 +0200 Subject: [PATCH 064/138] btrfs: migrate the block group caching code We can now just copy it over to block-group.c. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 460 +++++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 2 + fs/btrfs/ctree.h | 2 - fs/btrfs/extent-tree.c | 459 ---------------------------------------- 4 files changed, 462 insertions(+), 461 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 1f3afa0b42ba..643a2f16603b 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -3,6 +3,9 @@ #include "ctree.h" #include "block-group.h" #include "space-info.h" +#include "disk-io.h" +#include "free-space-cache.h" +#include "free-space-tree.h" void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { @@ -200,3 +203,460 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); } + +struct btrfs_caching_control *btrfs_get_caching_control( + struct btrfs_block_group_cache *cache) +{ + struct btrfs_caching_control *ctl; + + spin_lock(&cache->lock); + if (!cache->caching_ctl) { + spin_unlock(&cache->lock); + return NULL; + } + + ctl = cache->caching_ctl; + refcount_inc(&ctl->count); + spin_unlock(&cache->lock); + return ctl; +} + +void btrfs_put_caching_control(struct btrfs_caching_control *ctl) +{ + if (refcount_dec_and_test(&ctl->count)) + kfree(ctl); +} + +/* + * When we wait for progress in the block group caching, its because our + * allocation attempt failed at least once. So, we must sleep and let some + * progress happen before we try again. + * + * This function will sleep at least once waiting for new free space to show + * up, and then it will check the block group free space numbers for our min + * num_bytes. Another option is to have it go ahead and look in the rbtree for + * a free extent of a given size, but this is a good start. + * + * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using + * any of the information in this block group. + */ +void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, + u64 num_bytes) +{ + struct btrfs_caching_control *caching_ctl; + + caching_ctl = btrfs_get_caching_control(cache); + if (!caching_ctl) + return; + + wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) || + (cache->free_space_ctl->free_space >= num_bytes)); + + btrfs_put_caching_control(caching_ctl); +} + +int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) +{ + struct btrfs_caching_control *caching_ctl; + int ret = 0; + + caching_ctl = btrfs_get_caching_control(cache); + if (!caching_ctl) + return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; + + wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache)); + if (cache->cached == BTRFS_CACHE_ERROR) + ret = -EIO; + btrfs_put_caching_control(caching_ctl); + return ret; +} + +#ifdef CONFIG_BTRFS_DEBUG +void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_fs_info *fs_info = block_group->fs_info; + u64 start = block_group->key.objectid; + u64 len = block_group->key.offset; + u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? + fs_info->nodesize : fs_info->sectorsize; + u64 step = chunk << 1; + + while (len > chunk) { + btrfs_remove_free_space(block_group, start, chunk); + start += step; + if (len < step) + len = 0; + else + len -= step; + } +} +#endif + +/* + * This is only called by btrfs_cache_block_group, since we could have freed + * extents we need to check the pinned_extents for any extents that can't be + * used yet since their free space will be released as soon as the transaction + * commits. + */ +u64 add_new_free_space(struct btrfs_block_group_cache *block_group, + u64 start, u64 end) +{ + struct btrfs_fs_info *info = block_group->fs_info; + u64 extent_start, extent_end, size, total_added = 0; + int ret; + + while (start < end) { + ret = find_first_extent_bit(info->pinned_extents, start, + &extent_start, &extent_end, + EXTENT_DIRTY | EXTENT_UPTODATE, + NULL); + if (ret) + break; + + if (extent_start <= start) { + start = extent_end + 1; + } else if (extent_start > start && extent_start < end) { + size = extent_start - start; + total_added += size; + ret = btrfs_add_free_space(block_group, start, + size); + BUG_ON(ret); /* -ENOMEM or logic error */ + start = extent_end + 1; + } else { + break; + } + } + + if (start < end) { + size = end - start; + total_added += size; + ret = btrfs_add_free_space(block_group, start, size); + BUG_ON(ret); /* -ENOMEM or logic error */ + } + + return total_added; +} + +static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) +{ + struct btrfs_block_group_cache *block_group = caching_ctl->block_group; + struct btrfs_fs_info *fs_info = block_group->fs_info; + struct btrfs_root *extent_root = fs_info->extent_root; + struct btrfs_path *path; + struct extent_buffer *leaf; + struct btrfs_key key; + u64 total_found = 0; + u64 last = 0; + u32 nritems; + int ret; + bool wakeup = true; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); + +#ifdef CONFIG_BTRFS_DEBUG + /* + * If we're fragmenting we don't want to make anybody think we can + * allocate from this block group until we've had a chance to fragment + * the free space. + */ + if (btrfs_should_fragment_free_space(block_group)) + wakeup = false; +#endif + /* + * We don't want to deadlock with somebody trying to allocate a new + * extent for the extent root while also trying to search the extent + * root to add free space. So we skip locking and search the commit + * root, since its read-only + */ + path->skip_locking = 1; + path->search_commit_root = 1; + path->reada = READA_FORWARD; + + key.objectid = last; + key.offset = 0; + key.type = BTRFS_EXTENT_ITEM_KEY; + +next: + ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); + if (ret < 0) + goto out; + + leaf = path->nodes[0]; + nritems = btrfs_header_nritems(leaf); + + while (1) { + if (btrfs_fs_closing(fs_info) > 1) { + last = (u64)-1; + break; + } + + if (path->slots[0] < nritems) { + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); + } else { + ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); + if (ret) + break; + + if (need_resched() || + rwsem_is_contended(&fs_info->commit_root_sem)) { + if (wakeup) + caching_ctl->progress = last; + btrfs_release_path(path); + up_read(&fs_info->commit_root_sem); + mutex_unlock(&caching_ctl->mutex); + cond_resched(); + mutex_lock(&caching_ctl->mutex); + down_read(&fs_info->commit_root_sem); + goto next; + } + + ret = btrfs_next_leaf(extent_root, path); + if (ret < 0) + goto out; + if (ret) + break; + leaf = path->nodes[0]; + nritems = btrfs_header_nritems(leaf); + continue; + } + + if (key.objectid < last) { + key.objectid = last; + key.offset = 0; + key.type = BTRFS_EXTENT_ITEM_KEY; + + if (wakeup) + caching_ctl->progress = last; + btrfs_release_path(path); + goto next; + } + + if (key.objectid < block_group->key.objectid) { + path->slots[0]++; + continue; + } + + if (key.objectid >= block_group->key.objectid + + block_group->key.offset) + break; + + if (key.type == BTRFS_EXTENT_ITEM_KEY || + key.type == BTRFS_METADATA_ITEM_KEY) { + total_found += add_new_free_space(block_group, last, + key.objectid); + if (key.type == BTRFS_METADATA_ITEM_KEY) + last = key.objectid + + fs_info->nodesize; + else + last = key.objectid + key.offset; + + if (total_found > CACHING_CTL_WAKE_UP) { + total_found = 0; + if (wakeup) + wake_up(&caching_ctl->wait); + } + } + path->slots[0]++; + } + ret = 0; + + total_found += add_new_free_space(block_group, last, + block_group->key.objectid + + block_group->key.offset); + caching_ctl->progress = (u64)-1; + +out: + btrfs_free_path(path); + return ret; +} + +static noinline void caching_thread(struct btrfs_work *work) +{ + struct btrfs_block_group_cache *block_group; + struct btrfs_fs_info *fs_info; + struct btrfs_caching_control *caching_ctl; + int ret; + + caching_ctl = container_of(work, struct btrfs_caching_control, work); + block_group = caching_ctl->block_group; + fs_info = block_group->fs_info; + + mutex_lock(&caching_ctl->mutex); + down_read(&fs_info->commit_root_sem); + + if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) + ret = load_free_space_tree(caching_ctl); + else + ret = load_extent_tree_free(caching_ctl); + + spin_lock(&block_group->lock); + block_group->caching_ctl = NULL; + block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; + spin_unlock(&block_group->lock); + +#ifdef CONFIG_BTRFS_DEBUG + if (btrfs_should_fragment_free_space(block_group)) { + u64 bytes_used; + + spin_lock(&block_group->space_info->lock); + spin_lock(&block_group->lock); + bytes_used = block_group->key.offset - + btrfs_block_group_used(&block_group->item); + block_group->space_info->bytes_used += bytes_used >> 1; + spin_unlock(&block_group->lock); + spin_unlock(&block_group->space_info->lock); + btrfs_fragment_free_space(block_group); + } +#endif + + caching_ctl->progress = (u64)-1; + + up_read(&fs_info->commit_root_sem); + btrfs_free_excluded_extents(block_group); + mutex_unlock(&caching_ctl->mutex); + + wake_up(&caching_ctl->wait); + + btrfs_put_caching_control(caching_ctl); + btrfs_put_block_group(block_group); +} + +int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, + int load_cache_only) +{ + DEFINE_WAIT(wait); + struct btrfs_fs_info *fs_info = cache->fs_info; + struct btrfs_caching_control *caching_ctl; + int ret = 0; + + caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); + if (!caching_ctl) + return -ENOMEM; + + INIT_LIST_HEAD(&caching_ctl->list); + mutex_init(&caching_ctl->mutex); + init_waitqueue_head(&caching_ctl->wait); + caching_ctl->block_group = cache; + caching_ctl->progress = cache->key.objectid; + refcount_set(&caching_ctl->count, 1); + btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, + caching_thread, NULL, NULL); + + spin_lock(&cache->lock); + /* + * This should be a rare occasion, but this could happen I think in the + * case where one thread starts to load the space cache info, and then + * some other thread starts a transaction commit which tries to do an + * allocation while the other thread is still loading the space cache + * info. The previous loop should have kept us from choosing this block + * group, but if we've moved to the state where we will wait on caching + * block groups we need to first check if we're doing a fast load here, + * so we can wait for it to finish, otherwise we could end up allocating + * from a block group who's cache gets evicted for one reason or + * another. + */ + while (cache->cached == BTRFS_CACHE_FAST) { + struct btrfs_caching_control *ctl; + + ctl = cache->caching_ctl; + refcount_inc(&ctl->count); + prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&cache->lock); + + schedule(); + + finish_wait(&ctl->wait, &wait); + btrfs_put_caching_control(ctl); + spin_lock(&cache->lock); + } + + if (cache->cached != BTRFS_CACHE_NO) { + spin_unlock(&cache->lock); + kfree(caching_ctl); + return 0; + } + WARN_ON(cache->caching_ctl); + cache->caching_ctl = caching_ctl; + cache->cached = BTRFS_CACHE_FAST; + spin_unlock(&cache->lock); + + if (btrfs_test_opt(fs_info, SPACE_CACHE)) { + mutex_lock(&caching_ctl->mutex); + ret = load_free_space_cache(cache); + + spin_lock(&cache->lock); + if (ret == 1) { + cache->caching_ctl = NULL; + cache->cached = BTRFS_CACHE_FINISHED; + cache->last_byte_to_unpin = (u64)-1; + caching_ctl->progress = (u64)-1; + } else { + if (load_cache_only) { + cache->caching_ctl = NULL; + cache->cached = BTRFS_CACHE_NO; + } else { + cache->cached = BTRFS_CACHE_STARTED; + cache->has_caching_ctl = 1; + } + } + spin_unlock(&cache->lock); +#ifdef CONFIG_BTRFS_DEBUG + if (ret == 1 && + btrfs_should_fragment_free_space(cache)) { + u64 bytes_used; + + spin_lock(&cache->space_info->lock); + spin_lock(&cache->lock); + bytes_used = cache->key.offset - + btrfs_block_group_used(&cache->item); + cache->space_info->bytes_used += bytes_used >> 1; + spin_unlock(&cache->lock); + spin_unlock(&cache->space_info->lock); + btrfs_fragment_free_space(cache); + } +#endif + mutex_unlock(&caching_ctl->mutex); + + wake_up(&caching_ctl->wait); + if (ret == 1) { + btrfs_put_caching_control(caching_ctl); + btrfs_free_excluded_extents(cache); + return 0; + } + } else { + /* + * We're either using the free space tree or no caching at all. + * Set cached to the appropriate value and wakeup any waiters. + */ + spin_lock(&cache->lock); + if (load_cache_only) { + cache->caching_ctl = NULL; + cache->cached = BTRFS_CACHE_NO; + } else { + cache->cached = BTRFS_CACHE_STARTED; + cache->has_caching_ctl = 1; + } + spin_unlock(&cache->lock); + wake_up(&caching_ctl->wait); + } + + if (load_cache_only) { + btrfs_put_caching_control(caching_ctl); + return 0; + } + + down_write(&fs_info->commit_root_sem); + refcount_inc(&caching_ctl->count); + list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); + up_write(&fs_info->commit_root_sem); + + btrfs_get_block_group(cache); + + btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); + + return ret; +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index ef101fd52158..80b388ece277 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -174,6 +174,8 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, void btrfs_put_caching_control(struct btrfs_caching_control *ctl); struct btrfs_caching_control *btrfs_get_caching_control( struct btrfs_block_group_cache *cache); +u64 add_new_free_space(struct btrfs_block_group_cache *block_group, + u64 start, u64 end); static inline int btrfs_block_group_cache_done( struct btrfs_block_group_cache *cache) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index fc5031e6fd23..17eb4c91f0e1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2618,8 +2618,6 @@ int btrfs_start_write_no_snapshotting(struct btrfs_root *root); void btrfs_end_write_no_snapshotting(struct btrfs_root *root); void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); -u64 add_new_free_space(struct btrfs_block_group_cache *block_group, - u64 start, u64 end); void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg); /* ctree.c */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 05f6464b3123..4b352325ff7f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -181,420 +181,6 @@ static int exclude_super_stripes(struct btrfs_block_group_cache *cache) return 0; } -struct btrfs_caching_control *btrfs_get_caching_control( - struct btrfs_block_group_cache *cache) -{ - struct btrfs_caching_control *ctl; - - spin_lock(&cache->lock); - if (!cache->caching_ctl) { - spin_unlock(&cache->lock); - return NULL; - } - - ctl = cache->caching_ctl; - refcount_inc(&ctl->count); - spin_unlock(&cache->lock); - return ctl; -} - -void btrfs_put_caching_control(struct btrfs_caching_control *ctl) -{ - if (refcount_dec_and_test(&ctl->count)) - kfree(ctl); -} - -#ifdef CONFIG_BTRFS_DEBUG -void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group) -{ - struct btrfs_fs_info *fs_info = block_group->fs_info; - u64 start = block_group->key.objectid; - u64 len = block_group->key.offset; - u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? - fs_info->nodesize : fs_info->sectorsize; - u64 step = chunk << 1; - - while (len > chunk) { - btrfs_remove_free_space(block_group, start, chunk); - start += step; - if (len < step) - len = 0; - else - len -= step; - } -} -#endif - -/* - * This is only called by btrfs_cache_block_group, since we could have freed - * extents we need to check the pinned_extents for any extents that can't be - * used yet since their free space will be released as soon as the transaction - * commits. - */ -u64 add_new_free_space(struct btrfs_block_group_cache *block_group, - u64 start, u64 end) -{ - struct btrfs_fs_info *info = block_group->fs_info; - u64 extent_start, extent_end, size, total_added = 0; - int ret; - - while (start < end) { - ret = find_first_extent_bit(info->pinned_extents, start, - &extent_start, &extent_end, - EXTENT_DIRTY | EXTENT_UPTODATE, - NULL); - if (ret) - break; - - if (extent_start <= start) { - start = extent_end + 1; - } else if (extent_start > start && extent_start < end) { - size = extent_start - start; - total_added += size; - ret = btrfs_add_free_space(block_group, start, - size); - BUG_ON(ret); /* -ENOMEM or logic error */ - start = extent_end + 1; - } else { - break; - } - } - - if (start < end) { - size = end - start; - total_added += size; - ret = btrfs_add_free_space(block_group, start, size); - BUG_ON(ret); /* -ENOMEM or logic error */ - } - - return total_added; -} - -static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) -{ - struct btrfs_block_group_cache *block_group = caching_ctl->block_group; - struct btrfs_fs_info *fs_info = block_group->fs_info; - struct btrfs_root *extent_root = fs_info->extent_root; - struct btrfs_path *path; - struct extent_buffer *leaf; - struct btrfs_key key; - u64 total_found = 0; - u64 last = 0; - u32 nritems; - int ret; - bool wakeup = true; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); - -#ifdef CONFIG_BTRFS_DEBUG - /* - * If we're fragmenting we don't want to make anybody think we can - * allocate from this block group until we've had a chance to fragment - * the free space. - */ - if (btrfs_should_fragment_free_space(block_group)) - wakeup = false; -#endif - /* - * We don't want to deadlock with somebody trying to allocate a new - * extent for the extent root while also trying to search the extent - * root to add free space. So we skip locking and search the commit - * root, since its read-only - */ - path->skip_locking = 1; - path->search_commit_root = 1; - path->reada = READA_FORWARD; - - key.objectid = last; - key.offset = 0; - key.type = BTRFS_EXTENT_ITEM_KEY; - -next: - ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); - if (ret < 0) - goto out; - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - - while (1) { - if (btrfs_fs_closing(fs_info) > 1) { - last = (u64)-1; - break; - } - - if (path->slots[0] < nritems) { - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - } else { - ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); - if (ret) - break; - - if (need_resched() || - rwsem_is_contended(&fs_info->commit_root_sem)) { - if (wakeup) - caching_ctl->progress = last; - btrfs_release_path(path); - up_read(&fs_info->commit_root_sem); - mutex_unlock(&caching_ctl->mutex); - cond_resched(); - mutex_lock(&caching_ctl->mutex); - down_read(&fs_info->commit_root_sem); - goto next; - } - - ret = btrfs_next_leaf(extent_root, path); - if (ret < 0) - goto out; - if (ret) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - continue; - } - - if (key.objectid < last) { - key.objectid = last; - key.offset = 0; - key.type = BTRFS_EXTENT_ITEM_KEY; - - if (wakeup) - caching_ctl->progress = last; - btrfs_release_path(path); - goto next; - } - - if (key.objectid < block_group->key.objectid) { - path->slots[0]++; - continue; - } - - if (key.objectid >= block_group->key.objectid + - block_group->key.offset) - break; - - if (key.type == BTRFS_EXTENT_ITEM_KEY || - key.type == BTRFS_METADATA_ITEM_KEY) { - total_found += add_new_free_space(block_group, last, - key.objectid); - if (key.type == BTRFS_METADATA_ITEM_KEY) - last = key.objectid + - fs_info->nodesize; - else - last = key.objectid + key.offset; - - if (total_found > CACHING_CTL_WAKE_UP) { - total_found = 0; - if (wakeup) - wake_up(&caching_ctl->wait); - } - } - path->slots[0]++; - } - ret = 0; - - total_found += add_new_free_space(block_group, last, - block_group->key.objectid + - block_group->key.offset); - caching_ctl->progress = (u64)-1; - -out: - btrfs_free_path(path); - return ret; -} - -static noinline void caching_thread(struct btrfs_work *work) -{ - struct btrfs_block_group_cache *block_group; - struct btrfs_fs_info *fs_info; - struct btrfs_caching_control *caching_ctl; - int ret; - - caching_ctl = container_of(work, struct btrfs_caching_control, work); - block_group = caching_ctl->block_group; - fs_info = block_group->fs_info; - - mutex_lock(&caching_ctl->mutex); - down_read(&fs_info->commit_root_sem); - - if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) - ret = load_free_space_tree(caching_ctl); - else - ret = load_extent_tree_free(caching_ctl); - - spin_lock(&block_group->lock); - block_group->caching_ctl = NULL; - block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; - spin_unlock(&block_group->lock); - -#ifdef CONFIG_BTRFS_DEBUG - if (btrfs_should_fragment_free_space(block_group)) { - u64 bytes_used; - - spin_lock(&block_group->space_info->lock); - spin_lock(&block_group->lock); - bytes_used = block_group->key.offset - - btrfs_block_group_used(&block_group->item); - block_group->space_info->bytes_used += bytes_used >> 1; - spin_unlock(&block_group->lock); - spin_unlock(&block_group->space_info->lock); - btrfs_fragment_free_space(block_group); - } -#endif - - caching_ctl->progress = (u64)-1; - - up_read(&fs_info->commit_root_sem); - btrfs_free_excluded_extents(block_group); - mutex_unlock(&caching_ctl->mutex); - - wake_up(&caching_ctl->wait); - - btrfs_put_caching_control(caching_ctl); - btrfs_put_block_group(block_group); -} - -int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, - int load_cache_only) -{ - DEFINE_WAIT(wait); - struct btrfs_fs_info *fs_info = cache->fs_info; - struct btrfs_caching_control *caching_ctl; - int ret = 0; - - caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); - if (!caching_ctl) - return -ENOMEM; - - INIT_LIST_HEAD(&caching_ctl->list); - mutex_init(&caching_ctl->mutex); - init_waitqueue_head(&caching_ctl->wait); - caching_ctl->block_group = cache; - caching_ctl->progress = cache->key.objectid; - refcount_set(&caching_ctl->count, 1); - btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, - caching_thread, NULL, NULL); - - spin_lock(&cache->lock); - /* - * This should be a rare occasion, but this could happen I think in the - * case where one thread starts to load the space cache info, and then - * some other thread starts a transaction commit which tries to do an - * allocation while the other thread is still loading the space cache - * info. The previous loop should have kept us from choosing this block - * group, but if we've moved to the state where we will wait on caching - * block groups we need to first check if we're doing a fast load here, - * so we can wait for it to finish, otherwise we could end up allocating - * from a block group who's cache gets evicted for one reason or - * another. - */ - while (cache->cached == BTRFS_CACHE_FAST) { - struct btrfs_caching_control *ctl; - - ctl = cache->caching_ctl; - refcount_inc(&ctl->count); - prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); - spin_unlock(&cache->lock); - - schedule(); - - finish_wait(&ctl->wait, &wait); - btrfs_put_caching_control(ctl); - spin_lock(&cache->lock); - } - - if (cache->cached != BTRFS_CACHE_NO) { - spin_unlock(&cache->lock); - kfree(caching_ctl); - return 0; - } - WARN_ON(cache->caching_ctl); - cache->caching_ctl = caching_ctl; - cache->cached = BTRFS_CACHE_FAST; - spin_unlock(&cache->lock); - - if (btrfs_test_opt(fs_info, SPACE_CACHE)) { - mutex_lock(&caching_ctl->mutex); - ret = load_free_space_cache(cache); - - spin_lock(&cache->lock); - if (ret == 1) { - cache->caching_ctl = NULL; - cache->cached = BTRFS_CACHE_FINISHED; - cache->last_byte_to_unpin = (u64)-1; - caching_ctl->progress = (u64)-1; - } else { - if (load_cache_only) { - cache->caching_ctl = NULL; - cache->cached = BTRFS_CACHE_NO; - } else { - cache->cached = BTRFS_CACHE_STARTED; - cache->has_caching_ctl = 1; - } - } - spin_unlock(&cache->lock); -#ifdef CONFIG_BTRFS_DEBUG - if (ret == 1 && - btrfs_should_fragment_free_space(cache)) { - u64 bytes_used; - - spin_lock(&cache->space_info->lock); - spin_lock(&cache->lock); - bytes_used = cache->key.offset - - btrfs_block_group_used(&cache->item); - cache->space_info->bytes_used += bytes_used >> 1; - spin_unlock(&cache->lock); - spin_unlock(&cache->space_info->lock); - btrfs_fragment_free_space(cache); - } -#endif - mutex_unlock(&caching_ctl->mutex); - - wake_up(&caching_ctl->wait); - if (ret == 1) { - btrfs_put_caching_control(caching_ctl); - btrfs_free_excluded_extents(cache); - return 0; - } - } else { - /* - * We're either using the free space tree or no caching at all. - * Set cached to the appropriate value and wakeup any waiters. - */ - spin_lock(&cache->lock); - if (load_cache_only) { - cache->caching_ctl = NULL; - cache->cached = BTRFS_CACHE_NO; - } else { - cache->cached = BTRFS_CACHE_STARTED; - cache->has_caching_ctl = 1; - } - spin_unlock(&cache->lock); - wake_up(&caching_ctl->wait); - } - - if (load_cache_only) { - btrfs_put_caching_control(caching_ctl); - return 0; - } - - down_write(&fs_info->commit_root_sem); - refcount_inc(&caching_ctl->count); - list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); - up_write(&fs_info->commit_root_sem); - - btrfs_get_block_group(cache); - - btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); - - return ret; -} - - static u64 generic_ref_to_space_flags(struct btrfs_ref *ref) { if (ref->type == BTRFS_REF_METADATA) { @@ -4917,51 +4503,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) return ret; } -/* - * when we wait for progress in the block group caching, its because - * our allocation attempt failed at least once. So, we must sleep - * and let some progress happen before we try again. - * - * This function will sleep at least once waiting for new free space to - * show up, and then it will check the block group free space numbers - * for our min num_bytes. Another option is to have it go ahead - * and look in the rbtree for a free extent of a given size, but this - * is a good start. - * - * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using - * any of the information in this block group. - */ -void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, - u64 num_bytes) -{ - struct btrfs_caching_control *caching_ctl; - - caching_ctl = btrfs_get_caching_control(cache); - if (!caching_ctl) - return; - - wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) || - (cache->free_space_ctl->free_space >= num_bytes)); - - btrfs_put_caching_control(caching_ctl); -} - -int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) -{ - struct btrfs_caching_control *caching_ctl; - int ret = 0; - - caching_ctl = btrfs_get_caching_control(cache); - if (!caching_ctl) - return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; - - wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache)); - if (cache->cached == BTRFS_CACHE_ERROR) - ret = -EIO; - btrfs_put_caching_control(caching_ctl); - return ret; -} - enum btrfs_loop_type { LOOP_CACHING_NOWAIT, LOOP_CACHING_WAIT, From 3b2a78f21d5c53ff34b8e03cba4f904c91d4b3a2 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:54 -0400 Subject: [PATCH 065/138] btrfs: temporarily export inc_block_group_ro This is used in a few logical parts of the block group code, temporarily export it so we can move things in pieces. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 2 ++ fs/btrfs/extent-tree.c | 14 +++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 80b388ece277..143baaa54684 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -185,4 +185,6 @@ static inline int btrfs_block_group_cache_done( cache->cached == BTRFS_CACHE_ERROR; } +int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force); + #endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4b352325ff7f..08bd67169590 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6697,7 +6697,7 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) * data in this block group. That check should be done by relocation routine, * not this function. */ -static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) +int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; @@ -6807,14 +6807,14 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) goto out; } - ret = inc_block_group_ro(cache, 0); + ret = __btrfs_inc_block_group_ro(cache, 0); if (!ret) goto out; alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags); ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); if (ret < 0) goto out; - ret = inc_block_group_ro(cache, 0); + ret = __btrfs_inc_block_group_ro(cache, 0); out: if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { alloc_flags = update_block_group_flags(fs_info, cache->flags); @@ -7347,7 +7347,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) set_avail_alloc_bits(info, cache->flags); if (btrfs_chunk_readonly(info, cache->key.objectid)) { - inc_block_group_ro(cache, 1); + __btrfs_inc_block_group_ro(cache, 1); } else if (btrfs_block_group_used(&cache->item) == 0) { ASSERT(list_empty(&cache->bg_list)); btrfs_mark_bg_unused(cache); @@ -7368,11 +7368,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) list_for_each_entry(cache, &space_info->block_groups[BTRFS_RAID_RAID0], list) - inc_block_group_ro(cache, 1); + __btrfs_inc_block_group_ro(cache, 1); list_for_each_entry(cache, &space_info->block_groups[BTRFS_RAID_SINGLE], list) - inc_block_group_ro(cache, 1); + __btrfs_inc_block_group_ro(cache, 1); } btrfs_init_global_block_rsv(info); @@ -7911,7 +7911,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) spin_unlock(&block_group->lock); /* We don't want to force the issue, only flip if it's ok. */ - ret = inc_block_group_ro(block_group, 0); + ret = __btrfs_inc_block_group_ro(block_group, 0); up_write(&space_info->groups_sem); if (ret < 0) { ret = 0; From e3e0520b32bc3dbc64110536d171bfb334ac7a2a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:55 -0400 Subject: [PATCH 066/138] btrfs: migrate the block group removal code This is the removal code and the unused bgs code. Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ refresh, move clear_incompat_bg_bits ] Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 540 +++++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 7 + fs/btrfs/ctree.h | 7 - fs/btrfs/extent-tree.c | 537 ---------------------------------------- 4 files changed, 547 insertions(+), 544 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 643a2f16603b..a27f814b86bd 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -6,6 +6,10 @@ #include "disk-io.h" #include "free-space-cache.h" #include "free-space-tree.h" +#include "disk-io.h" +#include "volumes.h" +#include "transaction.h" +#include "ref-verify.h" void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { @@ -660,3 +664,539 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, return ret; } + +static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) +{ + u64 extra_flags = chunk_to_extended(flags) & + BTRFS_EXTENDED_PROFILE_MASK; + + write_seqlock(&fs_info->profiles_lock); + if (flags & BTRFS_BLOCK_GROUP_DATA) + fs_info->avail_data_alloc_bits &= ~extra_flags; + if (flags & BTRFS_BLOCK_GROUP_METADATA) + fs_info->avail_metadata_alloc_bits &= ~extra_flags; + if (flags & BTRFS_BLOCK_GROUP_SYSTEM) + fs_info->avail_system_alloc_bits &= ~extra_flags; + write_sequnlock(&fs_info->profiles_lock); +} + +/* + * Clear incompat bits for the following feature(s): + * + * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group + * in the whole filesystem + */ +static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) +{ + if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) { + struct list_head *head = &fs_info->space_info; + struct btrfs_space_info *sinfo; + + list_for_each_entry_rcu(sinfo, head, list) { + bool found = false; + + down_read(&sinfo->groups_sem); + if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) + found = true; + if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) + found = true; + up_read(&sinfo->groups_sem); + + if (found) + return; + } + btrfs_clear_fs_incompat(fs_info, RAID56); + } +} + +int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + u64 group_start, struct extent_map *em) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_root *root = fs_info->extent_root; + struct btrfs_path *path; + struct btrfs_block_group_cache *block_group; + struct btrfs_free_cluster *cluster; + struct btrfs_root *tree_root = fs_info->tree_root; + struct btrfs_key key; + struct inode *inode; + struct kobject *kobj = NULL; + int ret; + int index; + int factor; + struct btrfs_caching_control *caching_ctl = NULL; + bool remove_em; + bool remove_rsv = false; + + block_group = btrfs_lookup_block_group(fs_info, group_start); + BUG_ON(!block_group); + BUG_ON(!block_group->ro); + + trace_btrfs_remove_block_group(block_group); + /* + * Free the reserved super bytes from this block group before + * remove it. + */ + btrfs_free_excluded_extents(block_group); + btrfs_free_ref_tree_range(fs_info, block_group->key.objectid, + block_group->key.offset); + + memcpy(&key, &block_group->key, sizeof(key)); + index = btrfs_bg_flags_to_raid_index(block_group->flags); + factor = btrfs_bg_type_to_factor(block_group->flags); + + /* make sure this block group isn't part of an allocation cluster */ + cluster = &fs_info->data_alloc_cluster; + spin_lock(&cluster->refill_lock); + btrfs_return_cluster_to_free_space(block_group, cluster); + spin_unlock(&cluster->refill_lock); + + /* + * make sure this block group isn't part of a metadata + * allocation cluster + */ + cluster = &fs_info->meta_alloc_cluster; + spin_lock(&cluster->refill_lock); + btrfs_return_cluster_to_free_space(block_group, cluster); + spin_unlock(&cluster->refill_lock); + + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + goto out; + } + + /* + * get the inode first so any iput calls done for the io_list + * aren't the final iput (no unlinks allowed now) + */ + inode = lookup_free_space_inode(block_group, path); + + mutex_lock(&trans->transaction->cache_write_mutex); + /* + * Make sure our free space cache IO is done before removing the + * free space inode + */ + spin_lock(&trans->transaction->dirty_bgs_lock); + if (!list_empty(&block_group->io_list)) { + list_del_init(&block_group->io_list); + + WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); + + spin_unlock(&trans->transaction->dirty_bgs_lock); + btrfs_wait_cache_io(trans, block_group, path); + btrfs_put_block_group(block_group); + spin_lock(&trans->transaction->dirty_bgs_lock); + } + + if (!list_empty(&block_group->dirty_list)) { + list_del_init(&block_group->dirty_list); + remove_rsv = true; + btrfs_put_block_group(block_group); + } + spin_unlock(&trans->transaction->dirty_bgs_lock); + mutex_unlock(&trans->transaction->cache_write_mutex); + + if (!IS_ERR(inode)) { + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); + if (ret) { + btrfs_add_delayed_iput(inode); + goto out; + } + clear_nlink(inode); + /* One for the block groups ref */ + spin_lock(&block_group->lock); + if (block_group->iref) { + block_group->iref = 0; + block_group->inode = NULL; + spin_unlock(&block_group->lock); + iput(inode); + } else { + spin_unlock(&block_group->lock); + } + /* One for our lookup ref */ + btrfs_add_delayed_iput(inode); + } + + key.objectid = BTRFS_FREE_SPACE_OBJECTID; + key.offset = block_group->key.objectid; + key.type = 0; + + ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); + if (ret < 0) + goto out; + if (ret > 0) + btrfs_release_path(path); + if (ret == 0) { + ret = btrfs_del_item(trans, tree_root, path); + if (ret) + goto out; + btrfs_release_path(path); + } + + spin_lock(&fs_info->block_group_cache_lock); + rb_erase(&block_group->cache_node, + &fs_info->block_group_cache_tree); + RB_CLEAR_NODE(&block_group->cache_node); + + if (fs_info->first_logical_byte == block_group->key.objectid) + fs_info->first_logical_byte = (u64)-1; + spin_unlock(&fs_info->block_group_cache_lock); + + down_write(&block_group->space_info->groups_sem); + /* + * we must use list_del_init so people can check to see if they + * are still on the list after taking the semaphore + */ + list_del_init(&block_group->list); + if (list_empty(&block_group->space_info->block_groups[index])) { + kobj = block_group->space_info->block_group_kobjs[index]; + block_group->space_info->block_group_kobjs[index] = NULL; + clear_avail_alloc_bits(fs_info, block_group->flags); + } + up_write(&block_group->space_info->groups_sem); + clear_incompat_bg_bits(fs_info, block_group->flags); + if (kobj) { + kobject_del(kobj); + kobject_put(kobj); + } + + if (block_group->has_caching_ctl) + caching_ctl = btrfs_get_caching_control(block_group); + if (block_group->cached == BTRFS_CACHE_STARTED) + btrfs_wait_block_group_cache_done(block_group); + if (block_group->has_caching_ctl) { + down_write(&fs_info->commit_root_sem); + if (!caching_ctl) { + struct btrfs_caching_control *ctl; + + list_for_each_entry(ctl, + &fs_info->caching_block_groups, list) + if (ctl->block_group == block_group) { + caching_ctl = ctl; + refcount_inc(&caching_ctl->count); + break; + } + } + if (caching_ctl) + list_del_init(&caching_ctl->list); + up_write(&fs_info->commit_root_sem); + if (caching_ctl) { + /* Once for the caching bgs list and once for us. */ + btrfs_put_caching_control(caching_ctl); + btrfs_put_caching_control(caching_ctl); + } + } + + spin_lock(&trans->transaction->dirty_bgs_lock); + WARN_ON(!list_empty(&block_group->dirty_list)); + WARN_ON(!list_empty(&block_group->io_list)); + spin_unlock(&trans->transaction->dirty_bgs_lock); + + btrfs_remove_free_space_cache(block_group); + + spin_lock(&block_group->space_info->lock); + list_del_init(&block_group->ro_list); + + if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { + WARN_ON(block_group->space_info->total_bytes + < block_group->key.offset); + WARN_ON(block_group->space_info->bytes_readonly + < block_group->key.offset); + WARN_ON(block_group->space_info->disk_total + < block_group->key.offset * factor); + } + block_group->space_info->total_bytes -= block_group->key.offset; + block_group->space_info->bytes_readonly -= block_group->key.offset; + block_group->space_info->disk_total -= block_group->key.offset * factor; + + spin_unlock(&block_group->space_info->lock); + + memcpy(&key, &block_group->key, sizeof(key)); + + mutex_lock(&fs_info->chunk_mutex); + spin_lock(&block_group->lock); + block_group->removed = 1; + /* + * At this point trimming can't start on this block group, because we + * removed the block group from the tree fs_info->block_group_cache_tree + * so no one can't find it anymore and even if someone already got this + * block group before we removed it from the rbtree, they have already + * incremented block_group->trimming - if they didn't, they won't find + * any free space entries because we already removed them all when we + * called btrfs_remove_free_space_cache(). + * + * And we must not remove the extent map from the fs_info->mapping_tree + * to prevent the same logical address range and physical device space + * ranges from being reused for a new block group. This is because our + * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is + * completely transactionless, so while it is trimming a range the + * currently running transaction might finish and a new one start, + * allowing for new block groups to be created that can reuse the same + * physical device locations unless we take this special care. + * + * There may also be an implicit trim operation if the file system + * is mounted with -odiscard. The same protections must remain + * in place until the extents have been discarded completely when + * the transaction commit has completed. + */ + remove_em = (atomic_read(&block_group->trimming) == 0); + spin_unlock(&block_group->lock); + + mutex_unlock(&fs_info->chunk_mutex); + + ret = remove_block_group_free_space(trans, block_group); + if (ret) + goto out; + + btrfs_put_block_group(block_group); + btrfs_put_block_group(block_group); + + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + if (ret > 0) + ret = -EIO; + if (ret < 0) + goto out; + + ret = btrfs_del_item(trans, root, path); + if (ret) + goto out; + + if (remove_em) { + struct extent_map_tree *em_tree; + + em_tree = &fs_info->mapping_tree; + write_lock(&em_tree->lock); + remove_extent_mapping(em_tree, em); + write_unlock(&em_tree->lock); + /* once for the tree */ + free_extent_map(em); + } +out: + if (remove_rsv) + btrfs_delayed_refs_rsv_release(fs_info, 1); + btrfs_free_path(path); + return ret; +} + +struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( + struct btrfs_fs_info *fs_info, const u64 chunk_offset) +{ + struct extent_map_tree *em_tree = &fs_info->mapping_tree; + struct extent_map *em; + struct map_lookup *map; + unsigned int num_items; + + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, chunk_offset, 1); + read_unlock(&em_tree->lock); + ASSERT(em && em->start == chunk_offset); + + /* + * We need to reserve 3 + N units from the metadata space info in order + * to remove a block group (done at btrfs_remove_chunk() and at + * btrfs_remove_block_group()), which are used for: + * + * 1 unit for adding the free space inode's orphan (located in the tree + * of tree roots). + * 1 unit for deleting the block group item (located in the extent + * tree). + * 1 unit for deleting the free space item (located in tree of tree + * roots). + * N units for deleting N device extent items corresponding to each + * stripe (located in the device tree). + * + * In order to remove a block group we also need to reserve units in the + * system space info in order to update the chunk tree (update one or + * more device items and remove one chunk item), but this is done at + * btrfs_remove_chunk() through a call to check_system_chunk(). + */ + map = em->map_lookup; + num_items = 3 + map->num_stripes; + free_extent_map(em); + + return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, + num_items, 1); +} + +/* + * Process the unused_bgs list and remove any that don't have any allocated + * space inside of them. + */ +void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) +{ + struct btrfs_block_group_cache *block_group; + struct btrfs_space_info *space_info; + struct btrfs_trans_handle *trans; + int ret = 0; + + if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) + return; + + spin_lock(&fs_info->unused_bgs_lock); + while (!list_empty(&fs_info->unused_bgs)) { + u64 start, end; + int trimming; + + block_group = list_first_entry(&fs_info->unused_bgs, + struct btrfs_block_group_cache, + bg_list); + list_del_init(&block_group->bg_list); + + space_info = block_group->space_info; + + if (ret || btrfs_mixed_space_info(space_info)) { + btrfs_put_block_group(block_group); + continue; + } + spin_unlock(&fs_info->unused_bgs_lock); + + mutex_lock(&fs_info->delete_unused_bgs_mutex); + + /* Don't want to race with allocators so take the groups_sem */ + down_write(&space_info->groups_sem); + spin_lock(&block_group->lock); + if (block_group->reserved || block_group->pinned || + btrfs_block_group_used(&block_group->item) || + block_group->ro || + list_is_singular(&block_group->list)) { + /* + * We want to bail if we made new allocations or have + * outstanding allocations in this block group. We do + * the ro check in case balance is currently acting on + * this block group. + */ + trace_btrfs_skip_unused_block_group(block_group); + spin_unlock(&block_group->lock); + up_write(&space_info->groups_sem); + goto next; + } + spin_unlock(&block_group->lock); + + /* We don't want to force the issue, only flip if it's ok. */ + ret = __btrfs_inc_block_group_ro(block_group, 0); + up_write(&space_info->groups_sem); + if (ret < 0) { + ret = 0; + goto next; + } + + /* + * Want to do this before we do anything else so we can recover + * properly if we fail to join the transaction. + */ + trans = btrfs_start_trans_remove_block_group(fs_info, + block_group->key.objectid); + if (IS_ERR(trans)) { + btrfs_dec_block_group_ro(block_group); + ret = PTR_ERR(trans); + goto next; + } + + /* + * We could have pending pinned extents for this block group, + * just delete them, we don't care about them anymore. + */ + start = block_group->key.objectid; + end = start + block_group->key.offset - 1; + /* + * Hold the unused_bg_unpin_mutex lock to avoid racing with + * btrfs_finish_extent_commit(). If we are at transaction N, + * another task might be running finish_extent_commit() for the + * previous transaction N - 1, and have seen a range belonging + * to the block group in freed_extents[] before we were able to + * clear the whole block group range from freed_extents[]. This + * means that task can lookup for the block group after we + * unpinned it from freed_extents[] and removed it, leading to + * a BUG_ON() at btrfs_unpin_extent_range(). + */ + mutex_lock(&fs_info->unused_bg_unpin_mutex); + ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, + EXTENT_DIRTY); + if (ret) { + mutex_unlock(&fs_info->unused_bg_unpin_mutex); + btrfs_dec_block_group_ro(block_group); + goto end_trans; + } + ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, + EXTENT_DIRTY); + if (ret) { + mutex_unlock(&fs_info->unused_bg_unpin_mutex); + btrfs_dec_block_group_ro(block_group); + goto end_trans; + } + mutex_unlock(&fs_info->unused_bg_unpin_mutex); + + /* Reset pinned so btrfs_put_block_group doesn't complain */ + spin_lock(&space_info->lock); + spin_lock(&block_group->lock); + + btrfs_space_info_update_bytes_pinned(fs_info, space_info, + -block_group->pinned); + space_info->bytes_readonly += block_group->pinned; + percpu_counter_add_batch(&space_info->total_bytes_pinned, + -block_group->pinned, + BTRFS_TOTAL_BYTES_PINNED_BATCH); + block_group->pinned = 0; + + spin_unlock(&block_group->lock); + spin_unlock(&space_info->lock); + + /* DISCARD can flip during remount */ + trimming = btrfs_test_opt(fs_info, DISCARD); + + /* Implicit trim during transaction commit. */ + if (trimming) + btrfs_get_block_group_trimming(block_group); + + /* + * Btrfs_remove_chunk will abort the transaction if things go + * horribly wrong. + */ + ret = btrfs_remove_chunk(trans, block_group->key.objectid); + + if (ret) { + if (trimming) + btrfs_put_block_group_trimming(block_group); + goto end_trans; + } + + /* + * If we're not mounted with -odiscard, we can just forget + * about this block group. Otherwise we'll need to wait + * until transaction commit to do the actual discard. + */ + if (trimming) { + spin_lock(&fs_info->unused_bgs_lock); + /* + * A concurrent scrub might have added us to the list + * fs_info->unused_bgs, so use a list_move operation + * to add the block group to the deleted_bgs list. + */ + list_move(&block_group->bg_list, + &trans->transaction->deleted_bgs); + spin_unlock(&fs_info->unused_bgs_lock); + btrfs_get_block_group(block_group); + } +end_trans: + btrfs_end_transaction(trans); +next: + mutex_unlock(&fs_info->delete_unused_bgs_mutex); + btrfs_put_block_group(block_group); + spin_lock(&fs_info->unused_bgs_lock); + } + spin_unlock(&fs_info->unused_bgs_lock); +} + +void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg) +{ + struct btrfs_fs_info *fs_info = bg->fs_info; + + spin_lock(&fs_info->unused_bgs_lock); + if (list_empty(&bg->bg_list)) { + btrfs_get_block_group(bg); + trace_btrfs_add_unused_block_group(bg); + list_add_tail(&bg->bg_list, &fs_info->unused_bgs); + } + spin_unlock(&fs_info->unused_bgs_lock); +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 143baaa54684..f1fe14ba2702 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -176,6 +176,13 @@ struct btrfs_caching_control *btrfs_get_caching_control( struct btrfs_block_group_cache *cache); u64 add_new_free_space(struct btrfs_block_group_cache *block_group, u64 start, u64 end); +struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( + struct btrfs_fs_info *fs_info, + const u64 chunk_offset); +int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + u64 group_start, struct extent_map *em); +void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); +void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg); static inline int btrfs_block_group_cache_done( struct btrfs_block_group_cache *cache) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 17eb4c91f0e1..aedee3f66764 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2532,12 +2532,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info); int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size); -struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( - struct btrfs_fs_info *fs_info, - const u64 chunk_offset); -int btrfs_remove_block_group(struct btrfs_trans_handle *trans, - u64 group_start, struct extent_map *em); -void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); @@ -2618,7 +2612,6 @@ int btrfs_start_write_no_snapshotting(struct btrfs_root *root); void btrfs_end_write_no_snapshotting(struct btrfs_root *root); void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); -void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 08bd67169590..775d78a101b0 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7501,530 +7501,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, return 0; } -static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) -{ - u64 extra_flags = chunk_to_extended(flags) & - BTRFS_EXTENDED_PROFILE_MASK; - - write_seqlock(&fs_info->profiles_lock); - if (flags & BTRFS_BLOCK_GROUP_DATA) - fs_info->avail_data_alloc_bits &= ~extra_flags; - if (flags & BTRFS_BLOCK_GROUP_METADATA) - fs_info->avail_metadata_alloc_bits &= ~extra_flags; - if (flags & BTRFS_BLOCK_GROUP_SYSTEM) - fs_info->avail_system_alloc_bits &= ~extra_flags; - write_sequnlock(&fs_info->profiles_lock); -} - -/* - * Clear incompat bits for the following feature(s): - * - * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group - * in the whole filesystem - */ -static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) -{ - if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) { - struct list_head *head = &fs_info->space_info; - struct btrfs_space_info *sinfo; - - list_for_each_entry_rcu(sinfo, head, list) { - bool found = false; - - down_read(&sinfo->groups_sem); - if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) - found = true; - if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) - found = true; - up_read(&sinfo->groups_sem); - - if (found) - return; - } - btrfs_clear_fs_incompat(fs_info, RAID56); - } -} - -int btrfs_remove_block_group(struct btrfs_trans_handle *trans, - u64 group_start, struct extent_map *em) -{ - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_root *root = fs_info->extent_root; - struct btrfs_path *path; - struct btrfs_block_group_cache *block_group; - struct btrfs_free_cluster *cluster; - struct btrfs_root *tree_root = fs_info->tree_root; - struct btrfs_key key; - struct inode *inode; - struct kobject *kobj = NULL; - int ret; - int index; - int factor; - struct btrfs_caching_control *caching_ctl = NULL; - bool remove_em; - bool remove_rsv = false; - - block_group = btrfs_lookup_block_group(fs_info, group_start); - BUG_ON(!block_group); - BUG_ON(!block_group->ro); - - trace_btrfs_remove_block_group(block_group); - /* - * Free the reserved super bytes from this block group before - * remove it. - */ - btrfs_free_excluded_extents(block_group); - btrfs_free_ref_tree_range(fs_info, block_group->key.objectid, - block_group->key.offset); - - memcpy(&key, &block_group->key, sizeof(key)); - index = btrfs_bg_flags_to_raid_index(block_group->flags); - factor = btrfs_bg_type_to_factor(block_group->flags); - - /* make sure this block group isn't part of an allocation cluster */ - cluster = &fs_info->data_alloc_cluster; - spin_lock(&cluster->refill_lock); - btrfs_return_cluster_to_free_space(block_group, cluster); - spin_unlock(&cluster->refill_lock); - - /* - * make sure this block group isn't part of a metadata - * allocation cluster - */ - cluster = &fs_info->meta_alloc_cluster; - spin_lock(&cluster->refill_lock); - btrfs_return_cluster_to_free_space(block_group, cluster); - spin_unlock(&cluster->refill_lock); - - path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } - - /* - * get the inode first so any iput calls done for the io_list - * aren't the final iput (no unlinks allowed now) - */ - inode = lookup_free_space_inode(block_group, path); - - mutex_lock(&trans->transaction->cache_write_mutex); - /* - * Make sure our free space cache IO is done before removing the - * free space inode - */ - spin_lock(&trans->transaction->dirty_bgs_lock); - if (!list_empty(&block_group->io_list)) { - list_del_init(&block_group->io_list); - - WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); - - spin_unlock(&trans->transaction->dirty_bgs_lock); - btrfs_wait_cache_io(trans, block_group, path); - btrfs_put_block_group(block_group); - spin_lock(&trans->transaction->dirty_bgs_lock); - } - - if (!list_empty(&block_group->dirty_list)) { - list_del_init(&block_group->dirty_list); - remove_rsv = true; - btrfs_put_block_group(block_group); - } - spin_unlock(&trans->transaction->dirty_bgs_lock); - mutex_unlock(&trans->transaction->cache_write_mutex); - - if (!IS_ERR(inode)) { - ret = btrfs_orphan_add(trans, BTRFS_I(inode)); - if (ret) { - btrfs_add_delayed_iput(inode); - goto out; - } - clear_nlink(inode); - /* One for the block groups ref */ - spin_lock(&block_group->lock); - if (block_group->iref) { - block_group->iref = 0; - block_group->inode = NULL; - spin_unlock(&block_group->lock); - iput(inode); - } else { - spin_unlock(&block_group->lock); - } - /* One for our lookup ref */ - btrfs_add_delayed_iput(inode); - } - - key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; - key.type = 0; - - ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); - if (ret < 0) - goto out; - if (ret > 0) - btrfs_release_path(path); - if (ret == 0) { - ret = btrfs_del_item(trans, tree_root, path); - if (ret) - goto out; - btrfs_release_path(path); - } - - spin_lock(&fs_info->block_group_cache_lock); - rb_erase(&block_group->cache_node, - &fs_info->block_group_cache_tree); - RB_CLEAR_NODE(&block_group->cache_node); - - if (fs_info->first_logical_byte == block_group->key.objectid) - fs_info->first_logical_byte = (u64)-1; - spin_unlock(&fs_info->block_group_cache_lock); - - down_write(&block_group->space_info->groups_sem); - /* - * we must use list_del_init so people can check to see if they - * are still on the list after taking the semaphore - */ - list_del_init(&block_group->list); - if (list_empty(&block_group->space_info->block_groups[index])) { - kobj = block_group->space_info->block_group_kobjs[index]; - block_group->space_info->block_group_kobjs[index] = NULL; - clear_avail_alloc_bits(fs_info, block_group->flags); - } - up_write(&block_group->space_info->groups_sem); - clear_incompat_bg_bits(fs_info, block_group->flags); - if (kobj) { - kobject_del(kobj); - kobject_put(kobj); - } - - if (block_group->has_caching_ctl) - caching_ctl = btrfs_get_caching_control(block_group); - if (block_group->cached == BTRFS_CACHE_STARTED) - btrfs_wait_block_group_cache_done(block_group); - if (block_group->has_caching_ctl) { - down_write(&fs_info->commit_root_sem); - if (!caching_ctl) { - struct btrfs_caching_control *ctl; - - list_for_each_entry(ctl, - &fs_info->caching_block_groups, list) - if (ctl->block_group == block_group) { - caching_ctl = ctl; - refcount_inc(&caching_ctl->count); - break; - } - } - if (caching_ctl) - list_del_init(&caching_ctl->list); - up_write(&fs_info->commit_root_sem); - if (caching_ctl) { - /* Once for the caching bgs list and once for us. */ - btrfs_put_caching_control(caching_ctl); - btrfs_put_caching_control(caching_ctl); - } - } - - spin_lock(&trans->transaction->dirty_bgs_lock); - WARN_ON(!list_empty(&block_group->dirty_list)); - WARN_ON(!list_empty(&block_group->io_list)); - spin_unlock(&trans->transaction->dirty_bgs_lock); - - btrfs_remove_free_space_cache(block_group); - - spin_lock(&block_group->space_info->lock); - list_del_init(&block_group->ro_list); - - if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { - WARN_ON(block_group->space_info->total_bytes - < block_group->key.offset); - WARN_ON(block_group->space_info->bytes_readonly - < block_group->key.offset); - WARN_ON(block_group->space_info->disk_total - < block_group->key.offset * factor); - } - block_group->space_info->total_bytes -= block_group->key.offset; - block_group->space_info->bytes_readonly -= block_group->key.offset; - block_group->space_info->disk_total -= block_group->key.offset * factor; - - spin_unlock(&block_group->space_info->lock); - - memcpy(&key, &block_group->key, sizeof(key)); - - mutex_lock(&fs_info->chunk_mutex); - spin_lock(&block_group->lock); - block_group->removed = 1; - /* - * At this point trimming can't start on this block group, because we - * removed the block group from the tree fs_info->block_group_cache_tree - * so no one can't find it anymore and even if someone already got this - * block group before we removed it from the rbtree, they have already - * incremented block_group->trimming - if they didn't, they won't find - * any free space entries because we already removed them all when we - * called btrfs_remove_free_space_cache(). - * - * And we must not remove the extent map from the fs_info->mapping_tree - * to prevent the same logical address range and physical device space - * ranges from being reused for a new block group. This is because our - * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is - * completely transactionless, so while it is trimming a range the - * currently running transaction might finish and a new one start, - * allowing for new block groups to be created that can reuse the same - * physical device locations unless we take this special care. - * - * There may also be an implicit trim operation if the file system - * is mounted with -odiscard. The same protections must remain - * in place until the extents have been discarded completely when - * the transaction commit has completed. - */ - remove_em = (atomic_read(&block_group->trimming) == 0); - spin_unlock(&block_group->lock); - - mutex_unlock(&fs_info->chunk_mutex); - - ret = remove_block_group_free_space(trans, block_group); - if (ret) - goto out; - - btrfs_put_block_group(block_group); - btrfs_put_block_group(block_group); - - ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret > 0) - ret = -EIO; - if (ret < 0) - goto out; - - ret = btrfs_del_item(trans, root, path); - if (ret) - goto out; - - if (remove_em) { - struct extent_map_tree *em_tree; - - em_tree = &fs_info->mapping_tree; - write_lock(&em_tree->lock); - remove_extent_mapping(em_tree, em); - write_unlock(&em_tree->lock); - /* once for the tree */ - free_extent_map(em); - } -out: - if (remove_rsv) - btrfs_delayed_refs_rsv_release(fs_info, 1); - btrfs_free_path(path); - return ret; -} - -struct btrfs_trans_handle * -btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info, - const u64 chunk_offset) -{ - struct extent_map_tree *em_tree = &fs_info->mapping_tree; - struct extent_map *em; - struct map_lookup *map; - unsigned int num_items; - - read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, chunk_offset, 1); - read_unlock(&em_tree->lock); - ASSERT(em && em->start == chunk_offset); - - /* - * We need to reserve 3 + N units from the metadata space info in order - * to remove a block group (done at btrfs_remove_chunk() and at - * btrfs_remove_block_group()), which are used for: - * - * 1 unit for adding the free space inode's orphan (located in the tree - * of tree roots). - * 1 unit for deleting the block group item (located in the extent - * tree). - * 1 unit for deleting the free space item (located in tree of tree - * roots). - * N units for deleting N device extent items corresponding to each - * stripe (located in the device tree). - * - * In order to remove a block group we also need to reserve units in the - * system space info in order to update the chunk tree (update one or - * more device items and remove one chunk item), but this is done at - * btrfs_remove_chunk() through a call to check_system_chunk(). - */ - map = em->map_lookup; - num_items = 3 + map->num_stripes; - free_extent_map(em); - - return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, - num_items, 1); -} - -/* - * Process the unused_bgs list and remove any that don't have any allocated - * space inside of them. - */ -void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) -{ - struct btrfs_block_group_cache *block_group; - struct btrfs_space_info *space_info; - struct btrfs_trans_handle *trans; - int ret = 0; - - if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) - return; - - spin_lock(&fs_info->unused_bgs_lock); - while (!list_empty(&fs_info->unused_bgs)) { - u64 start, end; - int trimming; - - block_group = list_first_entry(&fs_info->unused_bgs, - struct btrfs_block_group_cache, - bg_list); - list_del_init(&block_group->bg_list); - - space_info = block_group->space_info; - - if (ret || btrfs_mixed_space_info(space_info)) { - btrfs_put_block_group(block_group); - continue; - } - spin_unlock(&fs_info->unused_bgs_lock); - - mutex_lock(&fs_info->delete_unused_bgs_mutex); - - /* Don't want to race with allocators so take the groups_sem */ - down_write(&space_info->groups_sem); - spin_lock(&block_group->lock); - if (block_group->reserved || block_group->pinned || - btrfs_block_group_used(&block_group->item) || - block_group->ro || - list_is_singular(&block_group->list)) { - /* - * We want to bail if we made new allocations or have - * outstanding allocations in this block group. We do - * the ro check in case balance is currently acting on - * this block group. - */ - trace_btrfs_skip_unused_block_group(block_group); - spin_unlock(&block_group->lock); - up_write(&space_info->groups_sem); - goto next; - } - spin_unlock(&block_group->lock); - - /* We don't want to force the issue, only flip if it's ok. */ - ret = __btrfs_inc_block_group_ro(block_group, 0); - up_write(&space_info->groups_sem); - if (ret < 0) { - ret = 0; - goto next; - } - - /* - * Want to do this before we do anything else so we can recover - * properly if we fail to join the transaction. - */ - trans = btrfs_start_trans_remove_block_group(fs_info, - block_group->key.objectid); - if (IS_ERR(trans)) { - btrfs_dec_block_group_ro(block_group); - ret = PTR_ERR(trans); - goto next; - } - - /* - * We could have pending pinned extents for this block group, - * just delete them, we don't care about them anymore. - */ - start = block_group->key.objectid; - end = start + block_group->key.offset - 1; - /* - * Hold the unused_bg_unpin_mutex lock to avoid racing with - * btrfs_finish_extent_commit(). If we are at transaction N, - * another task might be running finish_extent_commit() for the - * previous transaction N - 1, and have seen a range belonging - * to the block group in freed_extents[] before we were able to - * clear the whole block group range from freed_extents[]. This - * means that task can lookup for the block group after we - * unpinned it from freed_extents[] and removed it, leading to - * a BUG_ON() at btrfs_unpin_extent_range(). - */ - mutex_lock(&fs_info->unused_bg_unpin_mutex); - ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, - EXTENT_DIRTY); - if (ret) { - mutex_unlock(&fs_info->unused_bg_unpin_mutex); - btrfs_dec_block_group_ro(block_group); - goto end_trans; - } - ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, - EXTENT_DIRTY); - if (ret) { - mutex_unlock(&fs_info->unused_bg_unpin_mutex); - btrfs_dec_block_group_ro(block_group); - goto end_trans; - } - mutex_unlock(&fs_info->unused_bg_unpin_mutex); - - /* Reset pinned so btrfs_put_block_group doesn't complain */ - spin_lock(&space_info->lock); - spin_lock(&block_group->lock); - - btrfs_space_info_update_bytes_pinned(fs_info, space_info, - -block_group->pinned); - space_info->bytes_readonly += block_group->pinned; - percpu_counter_add_batch(&space_info->total_bytes_pinned, - -block_group->pinned, - BTRFS_TOTAL_BYTES_PINNED_BATCH); - block_group->pinned = 0; - - spin_unlock(&block_group->lock); - spin_unlock(&space_info->lock); - - /* DISCARD can flip during remount */ - trimming = btrfs_test_opt(fs_info, DISCARD); - - /* Implicit trim during transaction commit. */ - if (trimming) - btrfs_get_block_group_trimming(block_group); - - /* - * Btrfs_remove_chunk will abort the transaction if things go - * horribly wrong. - */ - ret = btrfs_remove_chunk(trans, block_group->key.objectid); - - if (ret) { - if (trimming) - btrfs_put_block_group_trimming(block_group); - goto end_trans; - } - - /* - * If we're not mounted with -odiscard, we can just forget - * about this block group. Otherwise we'll need to wait - * until transaction commit to do the actual discard. - */ - if (trimming) { - spin_lock(&fs_info->unused_bgs_lock); - /* - * A concurrent scrub might have added us to the list - * fs_info->unused_bgs, so use a list_move operation - * to add the block group to the deleted_bgs list. - */ - list_move(&block_group->bg_list, - &trans->transaction->deleted_bgs); - spin_unlock(&fs_info->unused_bgs_lock); - btrfs_get_block_group(block_group); - } -end_trans: - btrfs_end_transaction(trans); -next: - mutex_unlock(&fs_info->delete_unused_bgs_mutex); - btrfs_put_block_group(block_group); - spin_lock(&fs_info->unused_bgs_lock); - } - spin_unlock(&fs_info->unused_bgs_lock); -} - int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end) { @@ -8272,16 +7748,3 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root) !atomic_read(&root->will_be_snapshotted)); } } - -void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg) -{ - struct btrfs_fs_info *fs_info = bg->fs_info; - - spin_lock(&fs_info->unused_bgs_lock); - if (list_empty(&bg->bg_list)) { - btrfs_get_block_group(bg); - trace_btrfs_add_unused_block_group(bg); - list_add_tail(&bg->bg_list, &fs_info->unused_bgs); - } - spin_unlock(&fs_info->unused_bgs_lock); -} From 4358d9635a16a7bc92fecf095fd76d5a3d776188 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:57 -0400 Subject: [PATCH 067/138] btrfs: migrate the block group read/creation code All of the prep work has been done so we can now cleanly move this chunk over. Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ refresh, add btrfs_get_alloc_profile export, comment updates ] Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 591 ++++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 4 + fs/btrfs/ctree.h | 6 +- fs/btrfs/extent-tree.c | 600 +---------------------------------------- 4 files changed, 601 insertions(+), 600 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index a27f814b86bd..6a70301a587a 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -10,6 +10,8 @@ #include "volumes.h" #include "transaction.h" #include "ref-verify.h" +#include "sysfs.h" +#include "tree-log.h" void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { @@ -36,6 +38,45 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) } } +/* + * This adds the block group to the fs_info rb tree for the block group cache + */ +static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, + struct btrfs_block_group_cache *block_group) +{ + struct rb_node **p; + struct rb_node *parent = NULL; + struct btrfs_block_group_cache *cache; + + spin_lock(&info->block_group_cache_lock); + p = &info->block_group_cache_tree.rb_node; + + while (*p) { + parent = *p; + cache = rb_entry(parent, struct btrfs_block_group_cache, + cache_node); + if (block_group->key.objectid < cache->key.objectid) { + p = &(*p)->rb_left; + } else if (block_group->key.objectid > cache->key.objectid) { + p = &(*p)->rb_right; + } else { + spin_unlock(&info->block_group_cache_lock); + return -EEXIST; + } + } + + rb_link_node(&block_group->cache_node, parent, p); + rb_insert_color(&block_group->cache_node, + &info->block_group_cache_tree); + + if (info->first_logical_byte > block_group->key.objectid) + info->first_logical_byte = block_group->key.objectid; + + spin_unlock(&info->block_group_cache_lock); + + return 0; +} + /* * This will return the block group at or after bytenr if contains is 0, else * it will return the block group that contains the bytenr @@ -1200,3 +1241,553 @@ void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg) } spin_unlock(&fs_info->unused_bgs_lock); } + +static int find_first_block_group(struct btrfs_fs_info *fs_info, + struct btrfs_path *path, + struct btrfs_key *key) +{ + struct btrfs_root *root = fs_info->extent_root; + int ret = 0; + struct btrfs_key found_key; + struct extent_buffer *leaf; + struct btrfs_block_group_item bg; + u64 flags; + int slot; + + ret = btrfs_search_slot(NULL, root, key, path, 0, 0); + if (ret < 0) + goto out; + + while (1) { + slot = path->slots[0]; + leaf = path->nodes[0]; + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + break; + } + btrfs_item_key_to_cpu(leaf, &found_key, slot); + + if (found_key.objectid >= key->objectid && + found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { + struct extent_map_tree *em_tree; + struct extent_map *em; + + em_tree = &root->fs_info->mapping_tree; + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, found_key.objectid, + found_key.offset); + read_unlock(&em_tree->lock); + if (!em) { + btrfs_err(fs_info, + "logical %llu len %llu found bg but no related chunk", + found_key.objectid, found_key.offset); + ret = -ENOENT; + } else if (em->start != found_key.objectid || + em->len != found_key.offset) { + btrfs_err(fs_info, + "block group %llu len %llu mismatch with chunk %llu len %llu", + found_key.objectid, found_key.offset, + em->start, em->len); + ret = -EUCLEAN; + } else { + read_extent_buffer(leaf, &bg, + btrfs_item_ptr_offset(leaf, slot), + sizeof(bg)); + flags = btrfs_block_group_flags(&bg) & + BTRFS_BLOCK_GROUP_TYPE_MASK; + + if (flags != (em->map_lookup->type & + BTRFS_BLOCK_GROUP_TYPE_MASK)) { + btrfs_err(fs_info, +"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", + found_key.objectid, + found_key.offset, flags, + (BTRFS_BLOCK_GROUP_TYPE_MASK & + em->map_lookup->type)); + ret = -EUCLEAN; + } else { + ret = 0; + } + } + free_extent_map(em); + goto out; + } + path->slots[0]++; + } +out: + return ret; +} + +static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) +{ + u64 extra_flags = chunk_to_extended(flags) & + BTRFS_EXTENDED_PROFILE_MASK; + + write_seqlock(&fs_info->profiles_lock); + if (flags & BTRFS_BLOCK_GROUP_DATA) + fs_info->avail_data_alloc_bits |= extra_flags; + if (flags & BTRFS_BLOCK_GROUP_METADATA) + fs_info->avail_metadata_alloc_bits |= extra_flags; + if (flags & BTRFS_BLOCK_GROUP_SYSTEM) + fs_info->avail_system_alloc_bits |= extra_flags; + write_sequnlock(&fs_info->profiles_lock); +} + +static int exclude_super_stripes(struct btrfs_block_group_cache *cache) +{ + struct btrfs_fs_info *fs_info = cache->fs_info; + u64 bytenr; + u64 *logical; + int stripe_len; + int i, nr, ret; + + if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { + stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; + cache->bytes_super += stripe_len; + ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid, + stripe_len); + if (ret) + return ret; + } + + for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { + bytenr = btrfs_sb_offset(i); + ret = btrfs_rmap_block(fs_info, cache->key.objectid, + bytenr, &logical, &nr, &stripe_len); + if (ret) + return ret; + + while (nr--) { + u64 start, len; + + if (logical[nr] > cache->key.objectid + + cache->key.offset) + continue; + + if (logical[nr] + stripe_len <= cache->key.objectid) + continue; + + start = logical[nr]; + if (start < cache->key.objectid) { + start = cache->key.objectid; + len = (logical[nr] + stripe_len) - start; + } else { + len = min_t(u64, stripe_len, + cache->key.objectid + + cache->key.offset - start); + } + + cache->bytes_super += len; + ret = btrfs_add_excluded_extent(fs_info, start, len); + if (ret) { + kfree(logical); + return ret; + } + } + + kfree(logical); + } + return 0; +} + +static void link_block_group(struct btrfs_block_group_cache *cache) +{ + struct btrfs_space_info *space_info = cache->space_info; + int index = btrfs_bg_flags_to_raid_index(cache->flags); + bool first = false; + + down_write(&space_info->groups_sem); + if (list_empty(&space_info->block_groups[index])) + first = true; + list_add_tail(&cache->list, &space_info->block_groups[index]); + up_write(&space_info->groups_sem); + + if (first) + btrfs_sysfs_add_block_group_type(cache); +} + +static struct btrfs_block_group_cache *btrfs_create_block_group_cache( + struct btrfs_fs_info *fs_info, u64 start, u64 size) +{ + struct btrfs_block_group_cache *cache; + + cache = kzalloc(sizeof(*cache), GFP_NOFS); + if (!cache) + return NULL; + + cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), + GFP_NOFS); + if (!cache->free_space_ctl) { + kfree(cache); + return NULL; + } + + cache->key.objectid = start; + cache->key.offset = size; + cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + + cache->fs_info = fs_info; + cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); + set_free_space_tree_thresholds(cache); + + atomic_set(&cache->count, 1); + spin_lock_init(&cache->lock); + init_rwsem(&cache->data_rwsem); + INIT_LIST_HEAD(&cache->list); + INIT_LIST_HEAD(&cache->cluster_list); + INIT_LIST_HEAD(&cache->bg_list); + INIT_LIST_HEAD(&cache->ro_list); + INIT_LIST_HEAD(&cache->dirty_list); + INIT_LIST_HEAD(&cache->io_list); + btrfs_init_free_space_ctl(cache); + atomic_set(&cache->trimming, 0); + mutex_init(&cache->free_space_lock); + btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); + + return cache; +} + +/* + * Iterate all chunks and verify that each of them has the corresponding block + * group + */ +static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) +{ + struct extent_map_tree *map_tree = &fs_info->mapping_tree; + struct extent_map *em; + struct btrfs_block_group_cache *bg; + u64 start = 0; + int ret = 0; + + while (1) { + read_lock(&map_tree->lock); + /* + * lookup_extent_mapping will return the first extent map + * intersecting the range, so setting @len to 1 is enough to + * get the first chunk. + */ + em = lookup_extent_mapping(map_tree, start, 1); + read_unlock(&map_tree->lock); + if (!em) + break; + + bg = btrfs_lookup_block_group(fs_info, em->start); + if (!bg) { + btrfs_err(fs_info, + "chunk start=%llu len=%llu doesn't have corresponding block group", + em->start, em->len); + ret = -EUCLEAN; + free_extent_map(em); + break; + } + if (bg->key.objectid != em->start || + bg->key.offset != em->len || + (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != + (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { + btrfs_err(fs_info, +"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", + em->start, em->len, + em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, + bg->key.objectid, bg->key.offset, + bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); + ret = -EUCLEAN; + free_extent_map(em); + btrfs_put_block_group(bg); + break; + } + start = em->start + em->len; + free_extent_map(em); + btrfs_put_block_group(bg); + } + return ret; +} + +int btrfs_read_block_groups(struct btrfs_fs_info *info) +{ + struct btrfs_path *path; + int ret; + struct btrfs_block_group_cache *cache; + struct btrfs_space_info *space_info; + struct btrfs_key key; + struct btrfs_key found_key; + struct extent_buffer *leaf; + int need_clear = 0; + u64 cache_gen; + u64 feature; + int mixed; + + feature = btrfs_super_incompat_flags(info->super_copy); + mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS); + + key.objectid = 0; + key.offset = 0; + key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->reada = READA_FORWARD; + + cache_gen = btrfs_super_cache_generation(info->super_copy); + if (btrfs_test_opt(info, SPACE_CACHE) && + btrfs_super_generation(info->super_copy) != cache_gen) + need_clear = 1; + if (btrfs_test_opt(info, CLEAR_CACHE)) + need_clear = 1; + + while (1) { + ret = find_first_block_group(info, path, &key); + if (ret > 0) + break; + if (ret != 0) + goto error; + + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); + + cache = btrfs_create_block_group_cache(info, found_key.objectid, + found_key.offset); + if (!cache) { + ret = -ENOMEM; + goto error; + } + + if (need_clear) { + /* + * When we mount with old space cache, we need to + * set BTRFS_DC_CLEAR and set dirty flag. + * + * a) Setting 'BTRFS_DC_CLEAR' makes sure that we + * truncate the old free space cache inode and + * setup a new one. + * b) Setting 'dirty flag' makes sure that we flush + * the new space cache info onto disk. + */ + if (btrfs_test_opt(info, SPACE_CACHE)) + cache->disk_cache_state = BTRFS_DC_CLEAR; + } + + read_extent_buffer(leaf, &cache->item, + btrfs_item_ptr_offset(leaf, path->slots[0]), + sizeof(cache->item)); + cache->flags = btrfs_block_group_flags(&cache->item); + if (!mixed && + ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && + (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { + btrfs_err(info, +"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", + cache->key.objectid); + ret = -EINVAL; + goto error; + } + + key.objectid = found_key.objectid + found_key.offset; + btrfs_release_path(path); + + /* + * We need to exclude the super stripes now so that the space + * info has super bytes accounted for, otherwise we'll think + * we have more space than we actually do. + */ + ret = exclude_super_stripes(cache); + if (ret) { + /* + * We may have excluded something, so call this just in + * case. + */ + btrfs_free_excluded_extents(cache); + btrfs_put_block_group(cache); + goto error; + } + + /* + * Check for two cases, either we are full, and therefore + * don't need to bother with the caching work since we won't + * find any space, or we are empty, and we can just add all + * the space in and be done with it. This saves us _a_lot_ of + * time, particularly in the full case. + */ + if (found_key.offset == btrfs_block_group_used(&cache->item)) { + cache->last_byte_to_unpin = (u64)-1; + cache->cached = BTRFS_CACHE_FINISHED; + btrfs_free_excluded_extents(cache); + } else if (btrfs_block_group_used(&cache->item) == 0) { + cache->last_byte_to_unpin = (u64)-1; + cache->cached = BTRFS_CACHE_FINISHED; + add_new_free_space(cache, found_key.objectid, + found_key.objectid + + found_key.offset); + btrfs_free_excluded_extents(cache); + } + + ret = btrfs_add_block_group_cache(info, cache); + if (ret) { + btrfs_remove_free_space_cache(cache); + btrfs_put_block_group(cache); + goto error; + } + + trace_btrfs_add_block_group(info, cache, 0); + btrfs_update_space_info(info, cache->flags, found_key.offset, + btrfs_block_group_used(&cache->item), + cache->bytes_super, &space_info); + + cache->space_info = space_info; + + link_block_group(cache); + + set_avail_alloc_bits(info, cache->flags); + if (btrfs_chunk_readonly(info, cache->key.objectid)) { + __btrfs_inc_block_group_ro(cache, 1); + } else if (btrfs_block_group_used(&cache->item) == 0) { + ASSERT(list_empty(&cache->bg_list)); + btrfs_mark_bg_unused(cache); + } + } + + list_for_each_entry_rcu(space_info, &info->space_info, list) { + if (!(btrfs_get_alloc_profile(info, space_info->flags) & + (BTRFS_BLOCK_GROUP_RAID10 | + BTRFS_BLOCK_GROUP_RAID1_MASK | + BTRFS_BLOCK_GROUP_RAID56_MASK | + BTRFS_BLOCK_GROUP_DUP))) + continue; + /* + * Avoid allocating from un-mirrored block group if there are + * mirrored block groups. + */ + list_for_each_entry(cache, + &space_info->block_groups[BTRFS_RAID_RAID0], + list) + __btrfs_inc_block_group_ro(cache, 1); + list_for_each_entry(cache, + &space_info->block_groups[BTRFS_RAID_SINGLE], + list) + __btrfs_inc_block_group_ro(cache, 1); + } + + btrfs_init_global_block_rsv(info); + ret = check_chunk_block_group_mappings(info); +error: + btrfs_free_path(path); + return ret; +} + +void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_block_group_cache *block_group; + struct btrfs_root *extent_root = fs_info->extent_root; + struct btrfs_block_group_item item; + struct btrfs_key key; + int ret = 0; + + if (!trans->can_flush_pending_bgs) + return; + + while (!list_empty(&trans->new_bgs)) { + block_group = list_first_entry(&trans->new_bgs, + struct btrfs_block_group_cache, + bg_list); + if (ret) + goto next; + + spin_lock(&block_group->lock); + memcpy(&item, &block_group->item, sizeof(item)); + memcpy(&key, &block_group->key, sizeof(key)); + spin_unlock(&block_group->lock); + + ret = btrfs_insert_item(trans, extent_root, &key, &item, + sizeof(item)); + if (ret) + btrfs_abort_transaction(trans, ret); + ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset); + if (ret) + btrfs_abort_transaction(trans, ret); + add_block_group_free_space(trans, block_group); + /* Already aborted the transaction if it failed. */ +next: + btrfs_delayed_refs_rsv_release(fs_info, 1); + list_del_init(&block_group->bg_list); + } + btrfs_trans_release_chunk_metadata(trans); +} + +int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, + u64 type, u64 chunk_offset, u64 size) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_block_group_cache *cache; + int ret; + + btrfs_set_log_full_commit(trans); + + cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size); + if (!cache) + return -ENOMEM; + + btrfs_set_block_group_used(&cache->item, bytes_used); + btrfs_set_block_group_chunk_objectid(&cache->item, + BTRFS_FIRST_CHUNK_TREE_OBJECTID); + btrfs_set_block_group_flags(&cache->item, type); + + cache->flags = type; + cache->last_byte_to_unpin = (u64)-1; + cache->cached = BTRFS_CACHE_FINISHED; + cache->needs_free_space = 1; + ret = exclude_super_stripes(cache); + if (ret) { + /* We may have excluded something, so call this just in case */ + btrfs_free_excluded_extents(cache); + btrfs_put_block_group(cache); + return ret; + } + + add_new_free_space(cache, chunk_offset, chunk_offset + size); + + btrfs_free_excluded_extents(cache); + +#ifdef CONFIG_BTRFS_DEBUG + if (btrfs_should_fragment_free_space(cache)) { + u64 new_bytes_used = size - bytes_used; + + bytes_used += new_bytes_used >> 1; + btrfs_fragment_free_space(cache); + } +#endif + /* + * Ensure the corresponding space_info object is created and + * assigned to our block group. We want our bg to be added to the rbtree + * with its ->space_info set. + */ + cache->space_info = btrfs_find_space_info(fs_info, cache->flags); + ASSERT(cache->space_info); + + ret = btrfs_add_block_group_cache(fs_info, cache); + if (ret) { + btrfs_remove_free_space_cache(cache); + btrfs_put_block_group(cache); + return ret; + } + + /* + * Now that our block group has its ->space_info set and is inserted in + * the rbtree, update the space info's counters. + */ + trace_btrfs_add_block_group(fs_info, cache, 1); + btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, + cache->bytes_super, &cache->space_info); + btrfs_update_global_block_rsv(fs_info); + + link_block_group(cache); + + list_add_tail(&cache->bg_list, &trans->new_bgs); + trans->delayed_ref_updates++; + btrfs_update_delayed_refs_rsv(trans); + + set_avail_alloc_bits(fs_info, type); + return 0; +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index f1fe14ba2702..4e2218f05127 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -183,6 +183,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, u64 group_start, struct extent_map *em); void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg); +int btrfs_read_block_groups(struct btrfs_fs_info *info); +int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, + u64 type, u64 chunk_offset, u64 size); +void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); static inline int btrfs_block_group_cache_done( struct btrfs_block_group_cache *cache) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index aedee3f66764..92564b96ad7d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2528,16 +2528,12 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); int btrfs_free_block_groups(struct btrfs_fs_info *info); -int btrfs_read_block_groups(struct btrfs_fs_info *info); -int btrfs_make_block_group(struct btrfs_trans_handle *trans, - u64 bytes_used, u64 type, u64 chunk_offset, - u64 size); void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); -void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info); u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info); u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info); +u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); enum btrfs_reserve_flush_enum { diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 775d78a101b0..4e257ffa4232 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -59,46 +59,6 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) return (cache->flags & bits) == bits; } -/* - * this adds the block group to the fs_info rb tree for the block group - * cache - */ -static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, - struct btrfs_block_group_cache *block_group) -{ - struct rb_node **p; - struct rb_node *parent = NULL; - struct btrfs_block_group_cache *cache; - - spin_lock(&info->block_group_cache_lock); - p = &info->block_group_cache_tree.rb_node; - - while (*p) { - parent = *p; - cache = rb_entry(parent, struct btrfs_block_group_cache, - cache_node); - if (block_group->key.objectid < cache->key.objectid) { - p = &(*p)->rb_left; - } else if (block_group->key.objectid > cache->key.objectid) { - p = &(*p)->rb_right; - } else { - spin_unlock(&info->block_group_cache_lock); - return -EEXIST; - } - } - - rb_link_node(&block_group->cache_node, parent, p); - rb_insert_color(&block_group->cache_node, - &info->block_group_cache_tree); - - if (info->first_logical_byte > block_group->key.objectid) - info->first_logical_byte = block_group->key.objectid; - - spin_unlock(&info->block_group_cache_lock); - - return 0; -} - int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, u64 start, u64 num_bytes) { @@ -124,63 +84,6 @@ void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache) start, end, EXTENT_UPTODATE); } -static int exclude_super_stripes(struct btrfs_block_group_cache *cache) -{ - struct btrfs_fs_info *fs_info = cache->fs_info; - u64 bytenr; - u64 *logical; - int stripe_len; - int i, nr, ret; - - if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { - stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; - cache->bytes_super += stripe_len; - ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid, - stripe_len); - if (ret) - return ret; - } - - for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { - bytenr = btrfs_sb_offset(i); - ret = btrfs_rmap_block(fs_info, cache->key.objectid, - bytenr, &logical, &nr, &stripe_len); - if (ret) - return ret; - - while (nr--) { - u64 start, len; - - if (logical[nr] > cache->key.objectid + - cache->key.offset) - continue; - - if (logical[nr] + stripe_len <= cache->key.objectid) - continue; - - start = logical[nr]; - if (start < cache->key.objectid) { - start = cache->key.objectid; - len = (logical[nr] + stripe_len) - start; - } else { - len = min_t(u64, stripe_len, - cache->key.objectid + - cache->key.offset - start); - } - - cache->bytes_super += len; - ret = btrfs_add_excluded_extent(fs_info, start, len); - if (ret) { - kfree(logical); - return ret; - } - } - - kfree(logical); - } - return 0; -} - static u64 generic_ref_to_space_flags(struct btrfs_ref *ref) { if (ref->type == BTRFS_REF_METADATA) { @@ -3139,21 +3042,6 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) return readonly; } -static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) -{ - u64 extra_flags = chunk_to_extended(flags) & - BTRFS_EXTENDED_PROFILE_MASK; - - write_seqlock(&fs_info->profiles_lock); - if (flags & BTRFS_BLOCK_GROUP_DATA) - fs_info->avail_data_alloc_bits |= extra_flags; - if (flags & BTRFS_BLOCK_GROUP_METADATA) - fs_info->avail_metadata_alloc_bits |= extra_flags; - if (flags & BTRFS_BLOCK_GROUP_SYSTEM) - fs_info->avail_system_alloc_bits |= extra_flags; - write_sequnlock(&fs_info->profiles_lock); -} - /* * returns target flags in extended format or 0 if restripe for this * chunk_type is not in progress @@ -3254,6 +3142,11 @@ static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) return btrfs_reduce_alloc_profile(fs_info, flags); } +u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) +{ + return get_alloc_profile(fs_info, orig_flags); +} + static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -6890,86 +6783,6 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) spin_unlock(&sinfo->lock); } -static int find_first_block_group(struct btrfs_fs_info *fs_info, - struct btrfs_path *path, - struct btrfs_key *key) -{ - struct btrfs_root *root = fs_info->extent_root; - int ret = 0; - struct btrfs_key found_key; - struct extent_buffer *leaf; - struct btrfs_block_group_item bg; - u64 flags; - int slot; - - ret = btrfs_search_slot(NULL, root, key, path, 0, 0); - if (ret < 0) - goto out; - - while (1) { - slot = path->slots[0]; - leaf = path->nodes[0]; - if (slot >= btrfs_header_nritems(leaf)) { - ret = btrfs_next_leaf(root, path); - if (ret == 0) - continue; - if (ret < 0) - goto out; - break; - } - btrfs_item_key_to_cpu(leaf, &found_key, slot); - - if (found_key.objectid >= key->objectid && - found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { - struct extent_map_tree *em_tree; - struct extent_map *em; - - em_tree = &root->fs_info->mapping_tree; - read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, found_key.objectid, - found_key.offset); - read_unlock(&em_tree->lock); - if (!em) { - btrfs_err(fs_info, - "logical %llu len %llu found bg but no related chunk", - found_key.objectid, found_key.offset); - ret = -ENOENT; - } else if (em->start != found_key.objectid || - em->len != found_key.offset) { - btrfs_err(fs_info, - "block group %llu len %llu mismatch with chunk %llu len %llu", - found_key.objectid, found_key.offset, - em->start, em->len); - ret = -EUCLEAN; - } else { - read_extent_buffer(leaf, &bg, - btrfs_item_ptr_offset(leaf, slot), - sizeof(bg)); - flags = btrfs_block_group_flags(&bg) & - BTRFS_BLOCK_GROUP_TYPE_MASK; - - if (flags != (em->map_lookup->type & - BTRFS_BLOCK_GROUP_TYPE_MASK)) { - btrfs_err(fs_info, -"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", - found_key.objectid, - found_key.offset, flags, - (BTRFS_BLOCK_GROUP_TYPE_MASK & - em->map_lookup->type)); - ret = -EUCLEAN; - } else { - ret = 0; - } - } - free_extent_map(em); - goto out; - } - path->slots[0]++; - } -out: - return ret; -} - void btrfs_put_block_group_cache(struct btrfs_fs_info *info) { struct btrfs_block_group_cache *block_group; @@ -7098,409 +6911,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) return 0; } -static void link_block_group(struct btrfs_block_group_cache *cache) -{ - struct btrfs_space_info *space_info = cache->space_info; - int index = btrfs_bg_flags_to_raid_index(cache->flags); - bool first = false; - - down_write(&space_info->groups_sem); - if (list_empty(&space_info->block_groups[index])) - first = true; - list_add_tail(&cache->list, &space_info->block_groups[index]); - up_write(&space_info->groups_sem); - - if (first) - btrfs_sysfs_add_block_group_type(cache); -} - -static struct btrfs_block_group_cache * -btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info, - u64 start, u64 size) -{ - struct btrfs_block_group_cache *cache; - - cache = kzalloc(sizeof(*cache), GFP_NOFS); - if (!cache) - return NULL; - - cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), - GFP_NOFS); - if (!cache->free_space_ctl) { - kfree(cache); - return NULL; - } - - cache->key.objectid = start; - cache->key.offset = size; - cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; - - cache->fs_info = fs_info; - cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); - set_free_space_tree_thresholds(cache); - - atomic_set(&cache->count, 1); - spin_lock_init(&cache->lock); - init_rwsem(&cache->data_rwsem); - INIT_LIST_HEAD(&cache->list); - INIT_LIST_HEAD(&cache->cluster_list); - INIT_LIST_HEAD(&cache->bg_list); - INIT_LIST_HEAD(&cache->ro_list); - INIT_LIST_HEAD(&cache->dirty_list); - INIT_LIST_HEAD(&cache->io_list); - btrfs_init_free_space_ctl(cache); - atomic_set(&cache->trimming, 0); - mutex_init(&cache->free_space_lock); - btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); - - return cache; -} - - -/* - * Iterate all chunks and verify that each of them has the corresponding block - * group - */ -static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) -{ - struct extent_map_tree *map_tree = &fs_info->mapping_tree; - struct extent_map *em; - struct btrfs_block_group_cache *bg; - u64 start = 0; - int ret = 0; - - while (1) { - read_lock(&map_tree->lock); - /* - * lookup_extent_mapping will return the first extent map - * intersecting the range, so setting @len to 1 is enough to - * get the first chunk. - */ - em = lookup_extent_mapping(map_tree, start, 1); - read_unlock(&map_tree->lock); - if (!em) - break; - - bg = btrfs_lookup_block_group(fs_info, em->start); - if (!bg) { - btrfs_err(fs_info, - "chunk start=%llu len=%llu doesn't have corresponding block group", - em->start, em->len); - ret = -EUCLEAN; - free_extent_map(em); - break; - } - if (bg->key.objectid != em->start || - bg->key.offset != em->len || - (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != - (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { - btrfs_err(fs_info, -"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", - em->start, em->len, - em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, - bg->key.objectid, bg->key.offset, - bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); - ret = -EUCLEAN; - free_extent_map(em); - btrfs_put_block_group(bg); - break; - } - start = em->start + em->len; - free_extent_map(em); - btrfs_put_block_group(bg); - } - return ret; -} - -int btrfs_read_block_groups(struct btrfs_fs_info *info) -{ - struct btrfs_path *path; - int ret; - struct btrfs_block_group_cache *cache; - struct btrfs_space_info *space_info; - struct btrfs_key key; - struct btrfs_key found_key; - struct extent_buffer *leaf; - int need_clear = 0; - u64 cache_gen; - u64 feature; - int mixed; - - feature = btrfs_super_incompat_flags(info->super_copy); - mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS); - - key.objectid = 0; - key.offset = 0; - key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - path->reada = READA_FORWARD; - - cache_gen = btrfs_super_cache_generation(info->super_copy); - if (btrfs_test_opt(info, SPACE_CACHE) && - btrfs_super_generation(info->super_copy) != cache_gen) - need_clear = 1; - if (btrfs_test_opt(info, CLEAR_CACHE)) - need_clear = 1; - - while (1) { - ret = find_first_block_group(info, path, &key); - if (ret > 0) - break; - if (ret != 0) - goto error; - - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - - cache = btrfs_create_block_group_cache(info, found_key.objectid, - found_key.offset); - if (!cache) { - ret = -ENOMEM; - goto error; - } - - if (need_clear) { - /* - * When we mount with old space cache, we need to - * set BTRFS_DC_CLEAR and set dirty flag. - * - * a) Setting 'BTRFS_DC_CLEAR' makes sure that we - * truncate the old free space cache inode and - * setup a new one. - * b) Setting 'dirty flag' makes sure that we flush - * the new space cache info onto disk. - */ - if (btrfs_test_opt(info, SPACE_CACHE)) - cache->disk_cache_state = BTRFS_DC_CLEAR; - } - - read_extent_buffer(leaf, &cache->item, - btrfs_item_ptr_offset(leaf, path->slots[0]), - sizeof(cache->item)); - cache->flags = btrfs_block_group_flags(&cache->item); - if (!mixed && - ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && - (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { - btrfs_err(info, -"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", - cache->key.objectid); - ret = -EINVAL; - goto error; - } - - key.objectid = found_key.objectid + found_key.offset; - btrfs_release_path(path); - - /* - * We need to exclude the super stripes now so that the space - * info has super bytes accounted for, otherwise we'll think - * we have more space than we actually do. - */ - ret = exclude_super_stripes(cache); - if (ret) { - /* - * We may have excluded something, so call this just in - * case. - */ - btrfs_free_excluded_extents(cache); - btrfs_put_block_group(cache); - goto error; - } - - /* - * check for two cases, either we are full, and therefore - * don't need to bother with the caching work since we won't - * find any space, or we are empty, and we can just add all - * the space in and be done with it. This saves us _a_lot_ of - * time, particularly in the full case. - */ - if (found_key.offset == btrfs_block_group_used(&cache->item)) { - cache->last_byte_to_unpin = (u64)-1; - cache->cached = BTRFS_CACHE_FINISHED; - btrfs_free_excluded_extents(cache); - } else if (btrfs_block_group_used(&cache->item) == 0) { - cache->last_byte_to_unpin = (u64)-1; - cache->cached = BTRFS_CACHE_FINISHED; - add_new_free_space(cache, found_key.objectid, - found_key.objectid + - found_key.offset); - btrfs_free_excluded_extents(cache); - } - - ret = btrfs_add_block_group_cache(info, cache); - if (ret) { - btrfs_remove_free_space_cache(cache); - btrfs_put_block_group(cache); - goto error; - } - - trace_btrfs_add_block_group(info, cache, 0); - btrfs_update_space_info(info, cache->flags, found_key.offset, - btrfs_block_group_used(&cache->item), - cache->bytes_super, &space_info); - - cache->space_info = space_info; - - link_block_group(cache); - - set_avail_alloc_bits(info, cache->flags); - if (btrfs_chunk_readonly(info, cache->key.objectid)) { - __btrfs_inc_block_group_ro(cache, 1); - } else if (btrfs_block_group_used(&cache->item) == 0) { - ASSERT(list_empty(&cache->bg_list)); - btrfs_mark_bg_unused(cache); - } - } - - list_for_each_entry_rcu(space_info, &info->space_info, list) { - if (!(get_alloc_profile(info, space_info->flags) & - (BTRFS_BLOCK_GROUP_RAID10 | - BTRFS_BLOCK_GROUP_RAID1_MASK | - BTRFS_BLOCK_GROUP_RAID56_MASK | - BTRFS_BLOCK_GROUP_DUP))) - continue; - /* - * avoid allocating from un-mirrored block group if there are - * mirrored block groups. - */ - list_for_each_entry(cache, - &space_info->block_groups[BTRFS_RAID_RAID0], - list) - __btrfs_inc_block_group_ro(cache, 1); - list_for_each_entry(cache, - &space_info->block_groups[BTRFS_RAID_SINGLE], - list) - __btrfs_inc_block_group_ro(cache, 1); - } - - btrfs_init_global_block_rsv(info); - ret = check_chunk_block_group_mappings(info); -error: - btrfs_free_path(path); - return ret; -} - -void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) -{ - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *block_group; - struct btrfs_root *extent_root = fs_info->extent_root; - struct btrfs_block_group_item item; - struct btrfs_key key; - int ret = 0; - - if (!trans->can_flush_pending_bgs) - return; - - while (!list_empty(&trans->new_bgs)) { - block_group = list_first_entry(&trans->new_bgs, - struct btrfs_block_group_cache, - bg_list); - if (ret) - goto next; - - spin_lock(&block_group->lock); - memcpy(&item, &block_group->item, sizeof(item)); - memcpy(&key, &block_group->key, sizeof(key)); - spin_unlock(&block_group->lock); - - ret = btrfs_insert_item(trans, extent_root, &key, &item, - sizeof(item)); - if (ret) - btrfs_abort_transaction(trans, ret); - ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset); - if (ret) - btrfs_abort_transaction(trans, ret); - add_block_group_free_space(trans, block_group); - /* already aborted the transaction if it failed. */ -next: - btrfs_delayed_refs_rsv_release(fs_info, 1); - list_del_init(&block_group->bg_list); - } - btrfs_trans_release_chunk_metadata(trans); -} - -int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, - u64 type, u64 chunk_offset, u64 size) -{ - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache; - int ret; - - btrfs_set_log_full_commit(trans); - - cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size); - if (!cache) - return -ENOMEM; - - btrfs_set_block_group_used(&cache->item, bytes_used); - btrfs_set_block_group_chunk_objectid(&cache->item, - BTRFS_FIRST_CHUNK_TREE_OBJECTID); - btrfs_set_block_group_flags(&cache->item, type); - - cache->flags = type; - cache->last_byte_to_unpin = (u64)-1; - cache->cached = BTRFS_CACHE_FINISHED; - cache->needs_free_space = 1; - ret = exclude_super_stripes(cache); - if (ret) { - /* - * We may have excluded something, so call this just in - * case. - */ - btrfs_free_excluded_extents(cache); - btrfs_put_block_group(cache); - return ret; - } - - add_new_free_space(cache, chunk_offset, chunk_offset + size); - - btrfs_free_excluded_extents(cache); - -#ifdef CONFIG_BTRFS_DEBUG - if (btrfs_should_fragment_free_space(cache)) { - u64 new_bytes_used = size - bytes_used; - - bytes_used += new_bytes_used >> 1; - btrfs_fragment_free_space(cache); - } -#endif - /* - * Ensure the corresponding space_info object is created and - * assigned to our block group. We want our bg to be added to the rbtree - * with its ->space_info set. - */ - cache->space_info = btrfs_find_space_info(fs_info, cache->flags); - ASSERT(cache->space_info); - - ret = btrfs_add_block_group_cache(fs_info, cache); - if (ret) { - btrfs_remove_free_space_cache(cache); - btrfs_put_block_group(cache); - return ret; - } - - /* - * Now that our block group has its ->space_info set and is inserted in - * the rbtree, update the space info's counters. - */ - trace_btrfs_add_block_group(fs_info, cache, 1); - btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, - cache->bytes_super, &cache->space_info); - btrfs_update_global_block_rsv(fs_info); - - link_block_group(cache); - - list_add_tail(&cache->bg_list, &trans->new_bgs); - trans->delayed_ref_updates++; - btrfs_update_delayed_refs_rsv(trans); - - set_avail_alloc_bits(fs_info, type); - return 0; -} - int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end) { From 8484764e8587dc3defa9579b795e3f7bbf9789c5 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:58 -0400 Subject: [PATCH 068/138] btrfs: temporarily export btrfs_get_restripe_target This gets used by a few different logical chunks of the block group code, export it while we move things around. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 1 + fs/btrfs/extent-tree.c | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 4e2218f05127..2a6a8466a746 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -197,5 +197,6 @@ static inline int btrfs_block_group_cache_done( } int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force); +u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags); #endif /* BTRFS_BLOCK_GROUP_H */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4e257ffa4232..ad6391df64ff 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3048,7 +3048,7 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) * * should be called with balance_lock held */ -static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) +u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; u64 target = 0; @@ -3089,7 +3089,7 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) * try to reduce to the target profile */ spin_lock(&fs_info->balance_lock); - target = get_restripe_target(fs_info, flags); + target = btrfs_get_restripe_target(fs_info, flags); if (target) { /* pick target profile only if it's already available */ if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { @@ -6538,7 +6538,7 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) * if restripe for this chunk_type is on pick target profile and * return, otherwise do the usual balance */ - stripped = get_restripe_target(fs_info, flags); + stripped = btrfs_get_restripe_target(fs_info, flags); if (stripped) return extended_to_chunk(stripped); From 26ce2095e03c248759951d81fdff37e2bf32601c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:37:59 -0400 Subject: [PATCH 069/138] btrfs: migrate inc/dec_block_group_ro code This can easily be moved now. Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ refresh ] Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 212 +++++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 2 + fs/btrfs/ctree.h | 2 - fs/btrfs/extent-tree.c | 212 ----------------------------------------- 4 files changed, 214 insertions(+), 214 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 6a70301a587a..a4b76e57680a 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1060,6 +1060,80 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( num_items, 1); } +/* + * Mark block group @cache read-only, so later write won't happen to block + * group @cache. + * + * If @force is not set, this function will only mark the block group readonly + * if we have enough free space (1M) in other metadata/system block groups. + * If @force is not set, this function will mark the block group readonly + * without checking free space. + * + * NOTE: This function doesn't care if other block groups can contain all the + * data in this block group. That check should be done by relocation routine, + * not this function. + */ +int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) +{ + struct btrfs_space_info *sinfo = cache->space_info; + u64 num_bytes; + u64 sinfo_used; + u64 min_allocable_bytes; + int ret = -ENOSPC; + + /* + * We need some metadata space and system metadata space for + * allocating chunks in some corner cases until we force to set + * it to be readonly. + */ + if ((sinfo->flags & + (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) && + !force) + min_allocable_bytes = SZ_1M; + else + min_allocable_bytes = 0; + + spin_lock(&sinfo->lock); + spin_lock(&cache->lock); + + if (cache->ro) { + cache->ro++; + ret = 0; + goto out; + } + + num_bytes = cache->key.offset - cache->reserved - cache->pinned - + cache->bytes_super - btrfs_block_group_used(&cache->item); + sinfo_used = btrfs_space_info_used(sinfo, true); + + /* + * sinfo_used + num_bytes should always <= sinfo->total_bytes. + * + * Here we make sure if we mark this bg RO, we still have enough + * free space as buffer (if min_allocable_bytes is not 0). + */ + if (sinfo_used + num_bytes + min_allocable_bytes <= + sinfo->total_bytes) { + sinfo->bytes_readonly += num_bytes; + cache->ro++; + list_add_tail(&cache->ro_list, &sinfo->ro_bgs); + ret = 0; + } +out: + spin_unlock(&cache->lock); + spin_unlock(&sinfo->lock); + if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { + btrfs_info(cache->fs_info, + "unable to make block group %llu ro", + cache->key.objectid); + btrfs_info(cache->fs_info, + "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu", + sinfo_used, num_bytes, min_allocable_bytes); + btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); + } + return ret; +} + /* * Process the unused_bgs list and remove any that don't have any allocated * space inside of them. @@ -1791,3 +1865,141 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, set_avail_alloc_bits(fs_info, type); return 0; } + +static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) +{ + u64 num_devices; + u64 stripped; + + /* + * if restripe for this chunk_type is on pick target profile and + * return, otherwise do the usual balance + */ + stripped = btrfs_get_restripe_target(fs_info, flags); + if (stripped) + return extended_to_chunk(stripped); + + num_devices = fs_info->fs_devices->rw_devices; + + stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK | + BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10; + + if (num_devices == 1) { + stripped |= BTRFS_BLOCK_GROUP_DUP; + stripped = flags & ~stripped; + + /* turn raid0 into single device chunks */ + if (flags & BTRFS_BLOCK_GROUP_RAID0) + return stripped; + + /* turn mirroring into duplication */ + if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK | + BTRFS_BLOCK_GROUP_RAID10)) + return stripped | BTRFS_BLOCK_GROUP_DUP; + } else { + /* they already had raid on here, just return */ + if (flags & stripped) + return flags; + + stripped |= BTRFS_BLOCK_GROUP_DUP; + stripped = flags & ~stripped; + + /* switch duplicated blocks with raid1 */ + if (flags & BTRFS_BLOCK_GROUP_DUP) + return stripped | BTRFS_BLOCK_GROUP_RAID1; + + /* this is drive concat, leave it alone */ + } + + return flags; +} + +int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) + +{ + struct btrfs_fs_info *fs_info = cache->fs_info; + struct btrfs_trans_handle *trans; + u64 alloc_flags; + int ret; + +again: + trans = btrfs_join_transaction(fs_info->extent_root); + if (IS_ERR(trans)) + return PTR_ERR(trans); + + /* + * we're not allowed to set block groups readonly after the dirty + * block groups cache has started writing. If it already started, + * back off and let this transaction commit + */ + mutex_lock(&fs_info->ro_block_group_mutex); + if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { + u64 transid = trans->transid; + + mutex_unlock(&fs_info->ro_block_group_mutex); + btrfs_end_transaction(trans); + + ret = btrfs_wait_for_commit(fs_info, transid); + if (ret) + return ret; + goto again; + } + + /* + * if we are changing raid levels, try to allocate a corresponding + * block group with the new raid level. + */ + alloc_flags = update_block_group_flags(fs_info, cache->flags); + if (alloc_flags != cache->flags) { + ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); + /* + * ENOSPC is allowed here, we may have enough space + * already allocated at the new raid level to + * carry on + */ + if (ret == -ENOSPC) + ret = 0; + if (ret < 0) + goto out; + } + + ret = __btrfs_inc_block_group_ro(cache, 0); + if (!ret) + goto out; + alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); + ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); + if (ret < 0) + goto out; + ret = __btrfs_inc_block_group_ro(cache, 0); +out: + if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { + alloc_flags = update_block_group_flags(fs_info, cache->flags); + mutex_lock(&fs_info->chunk_mutex); + check_system_chunk(trans, alloc_flags); + mutex_unlock(&fs_info->chunk_mutex); + } + mutex_unlock(&fs_info->ro_block_group_mutex); + + btrfs_end_transaction(trans); + return ret; +} + +void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) +{ + struct btrfs_space_info *sinfo = cache->space_info; + u64 num_bytes; + + BUG_ON(!cache->ro); + + spin_lock(&sinfo->lock); + spin_lock(&cache->lock); + if (!--cache->ro) { + num_bytes = cache->key.offset - cache->reserved - + cache->pinned - cache->bytes_super - + btrfs_block_group_used(&cache->item); + sinfo->bytes_readonly -= num_bytes; + list_del_init(&cache->ro_list); + } + spin_unlock(&cache->lock); + spin_unlock(&sinfo->lock); +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 2a6a8466a746..a048a9408dec 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -187,6 +187,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info); int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); +int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); +void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); static inline int btrfs_block_group_cache_done( struct btrfs_block_group_cache *cache) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 92564b96ad7d..f97ad638983b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2590,8 +2590,6 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, bool qgroup_free); int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); -void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); void btrfs_put_block_group_cache(struct btrfs_fs_info *info); u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ad6391df64ff..519cf2cb5cef 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6529,198 +6529,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, return ret; } -static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) -{ - u64 num_devices; - u64 stripped; - - /* - * if restripe for this chunk_type is on pick target profile and - * return, otherwise do the usual balance - */ - stripped = btrfs_get_restripe_target(fs_info, flags); - if (stripped) - return extended_to_chunk(stripped); - - num_devices = fs_info->fs_devices->rw_devices; - - stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK | - BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10; - - if (num_devices == 1) { - stripped |= BTRFS_BLOCK_GROUP_DUP; - stripped = flags & ~stripped; - - /* turn raid0 into single device chunks */ - if (flags & BTRFS_BLOCK_GROUP_RAID0) - return stripped; - - /* turn mirroring into duplication */ - if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK | - BTRFS_BLOCK_GROUP_RAID10)) - return stripped | BTRFS_BLOCK_GROUP_DUP; - } else { - /* they already had raid on here, just return */ - if (flags & stripped) - return flags; - - stripped |= BTRFS_BLOCK_GROUP_DUP; - stripped = flags & ~stripped; - - /* switch duplicated blocks with raid1 */ - if (flags & BTRFS_BLOCK_GROUP_DUP) - return stripped | BTRFS_BLOCK_GROUP_RAID1; - - /* this is drive concat, leave it alone */ - } - - return flags; -} - -/* - * Mark block group @cache read-only, so later write won't happen to block - * group @cache. - * - * If @force is not set, this function will only mark the block group readonly - * if we have enough free space (1M) in other metadata/system block groups. - * If @force is not set, this function will mark the block group readonly - * without checking free space. - * - * NOTE: This function doesn't care if other block groups can contain all the - * data in this block group. That check should be done by relocation routine, - * not this function. - */ -int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) -{ - struct btrfs_space_info *sinfo = cache->space_info; - u64 num_bytes; - u64 sinfo_used; - u64 min_allocable_bytes; - int ret = -ENOSPC; - - /* - * We need some metadata space and system metadata space for - * allocating chunks in some corner cases until we force to set - * it to be readonly. - */ - if ((sinfo->flags & - (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) && - !force) - min_allocable_bytes = SZ_1M; - else - min_allocable_bytes = 0; - - spin_lock(&sinfo->lock); - spin_lock(&cache->lock); - - if (cache->ro) { - cache->ro++; - ret = 0; - goto out; - } - - num_bytes = cache->key.offset - cache->reserved - cache->pinned - - cache->bytes_super - btrfs_block_group_used(&cache->item); - sinfo_used = btrfs_space_info_used(sinfo, true); - - /* - * sinfo_used + num_bytes should always <= sinfo->total_bytes. - * - * Here we make sure if we mark this bg RO, we still have enough - * free space as buffer (if min_allocable_bytes is not 0). - */ - if (sinfo_used + num_bytes + min_allocable_bytes <= - sinfo->total_bytes) { - sinfo->bytes_readonly += num_bytes; - cache->ro++; - list_add_tail(&cache->ro_list, &sinfo->ro_bgs); - ret = 0; - } -out: - spin_unlock(&cache->lock); - spin_unlock(&sinfo->lock); - if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { - btrfs_info(cache->fs_info, - "unable to make block group %llu ro", - cache->key.objectid); - btrfs_info(cache->fs_info, - "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu", - sinfo_used, num_bytes, min_allocable_bytes); - btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); - } - return ret; -} - -int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) - -{ - struct btrfs_fs_info *fs_info = cache->fs_info; - struct btrfs_trans_handle *trans; - u64 alloc_flags; - int ret; - -again: - trans = btrfs_join_transaction(fs_info->extent_root); - if (IS_ERR(trans)) - return PTR_ERR(trans); - - /* - * we're not allowed to set block groups readonly after the dirty - * block groups cache has started writing. If it already started, - * back off and let this transaction commit - */ - mutex_lock(&fs_info->ro_block_group_mutex); - if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { - u64 transid = trans->transid; - - mutex_unlock(&fs_info->ro_block_group_mutex); - btrfs_end_transaction(trans); - - ret = btrfs_wait_for_commit(fs_info, transid); - if (ret) - return ret; - goto again; - } - - /* - * if we are changing raid levels, try to allocate a corresponding - * block group with the new raid level. - */ - alloc_flags = update_block_group_flags(fs_info, cache->flags); - if (alloc_flags != cache->flags) { - ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); - /* - * ENOSPC is allowed here, we may have enough space - * already allocated at the new raid level to - * carry on - */ - if (ret == -ENOSPC) - ret = 0; - if (ret < 0) - goto out; - } - - ret = __btrfs_inc_block_group_ro(cache, 0); - if (!ret) - goto out; - alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags); - ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); - if (ret < 0) - goto out; - ret = __btrfs_inc_block_group_ro(cache, 0); -out: - if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { - alloc_flags = update_block_group_flags(fs_info, cache->flags); - mutex_lock(&fs_info->chunk_mutex); - check_system_chunk(trans, alloc_flags); - mutex_unlock(&fs_info->chunk_mutex); - } - mutex_unlock(&fs_info->ro_block_group_mutex); - - btrfs_end_transaction(trans); - return ret; -} - int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) { u64 alloc_flags = get_alloc_profile(trans->fs_info, type); @@ -6763,26 +6571,6 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) return free_bytes; } -void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) -{ - struct btrfs_space_info *sinfo = cache->space_info; - u64 num_bytes; - - BUG_ON(!cache->ro); - - spin_lock(&sinfo->lock); - spin_lock(&cache->lock); - if (!--cache->ro) { - num_bytes = cache->key.offset - cache->reserved - - cache->pinned - cache->bytes_super - - btrfs_block_group_used(&cache->item); - sinfo->bytes_readonly -= num_bytes; - list_del_init(&cache->ro_list); - } - spin_unlock(&cache->lock); - spin_unlock(&sinfo->lock); -} - void btrfs_put_block_group_cache(struct btrfs_fs_info *info) { struct btrfs_block_group_cache *block_group; From 77745c05115fcf3c2b7deb599799a6b51d1c5155 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:38:00 -0400 Subject: [PATCH 070/138] btrfs: migrate the dirty bg writeout code This can be easily migrated over now. Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ update comments ] Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 516 ++++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 3 + fs/btrfs/ctree.h | 3 - fs/btrfs/extent-tree.c | 518 ----------------------------------------- 4 files changed, 519 insertions(+), 521 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index a4b76e57680a..763bab380379 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -12,6 +12,7 @@ #include "ref-verify.h" #include "sysfs.h" #include "tree-log.h" +#include "delalloc-space.h" void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { @@ -2003,3 +2004,518 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) spin_unlock(&cache->lock); spin_unlock(&sinfo->lock); } + +static int write_one_cache_group(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_block_group_cache *cache) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + int ret; + struct btrfs_root *extent_root = fs_info->extent_root; + unsigned long bi; + struct extent_buffer *leaf; + + ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); + if (ret) { + if (ret > 0) + ret = -ENOENT; + goto fail; + } + + leaf = path->nodes[0]; + bi = btrfs_item_ptr_offset(leaf, path->slots[0]); + write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); + btrfs_mark_buffer_dirty(leaf); +fail: + btrfs_release_path(path); + return ret; + +} + +static int cache_save_setup(struct btrfs_block_group_cache *block_group, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + struct btrfs_fs_info *fs_info = block_group->fs_info; + struct btrfs_root *root = fs_info->tree_root; + struct inode *inode = NULL; + struct extent_changeset *data_reserved = NULL; + u64 alloc_hint = 0; + int dcs = BTRFS_DC_ERROR; + u64 num_pages = 0; + int retries = 0; + int ret = 0; + + /* + * If this block group is smaller than 100 megs don't bother caching the + * block group. + */ + if (block_group->key.offset < (100 * SZ_1M)) { + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_WRITTEN; + spin_unlock(&block_group->lock); + return 0; + } + + if (trans->aborted) + return 0; +again: + inode = lookup_free_space_inode(block_group, path); + if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { + ret = PTR_ERR(inode); + btrfs_release_path(path); + goto out; + } + + if (IS_ERR(inode)) { + BUG_ON(retries); + retries++; + + if (block_group->ro) + goto out_free; + + ret = create_free_space_inode(trans, block_group, path); + if (ret) + goto out_free; + goto again; + } + + /* + * We want to set the generation to 0, that way if anything goes wrong + * from here on out we know not to trust this cache when we load up next + * time. + */ + BTRFS_I(inode)->generation = 0; + ret = btrfs_update_inode(trans, root, inode); + if (ret) { + /* + * So theoretically we could recover from this, simply set the + * super cache generation to 0 so we know to invalidate the + * cache, but then we'd have to keep track of the block groups + * that fail this way so we know we _have_ to reset this cache + * before the next commit or risk reading stale cache. So to + * limit our exposure to horrible edge cases lets just abort the + * transaction, this only happens in really bad situations + * anyway. + */ + btrfs_abort_transaction(trans, ret); + goto out_put; + } + WARN_ON(ret); + + /* We've already setup this transaction, go ahead and exit */ + if (block_group->cache_generation == trans->transid && + i_size_read(inode)) { + dcs = BTRFS_DC_SETUP; + goto out_put; + } + + if (i_size_read(inode) > 0) { + ret = btrfs_check_trunc_cache_free_space(fs_info, + &fs_info->global_block_rsv); + if (ret) + goto out_put; + + ret = btrfs_truncate_free_space_cache(trans, NULL, inode); + if (ret) + goto out_put; + } + + spin_lock(&block_group->lock); + if (block_group->cached != BTRFS_CACHE_FINISHED || + !btrfs_test_opt(fs_info, SPACE_CACHE)) { + /* + * don't bother trying to write stuff out _if_ + * a) we're not cached, + * b) we're with nospace_cache mount option, + * c) we're with v2 space_cache (FREE_SPACE_TREE). + */ + dcs = BTRFS_DC_WRITTEN; + spin_unlock(&block_group->lock); + goto out_put; + } + spin_unlock(&block_group->lock); + + /* + * We hit an ENOSPC when setting up the cache in this transaction, just + * skip doing the setup, we've already cleared the cache so we're safe. + */ + if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { + ret = -ENOSPC; + goto out_put; + } + + /* + * Try to preallocate enough space based on how big the block group is. + * Keep in mind this has to include any pinned space which could end up + * taking up quite a bit since it's not folded into the other space + * cache. + */ + num_pages = div_u64(block_group->key.offset, SZ_256M); + if (!num_pages) + num_pages = 1; + + num_pages *= 16; + num_pages *= PAGE_SIZE; + + ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages); + if (ret) + goto out_put; + + ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, + num_pages, num_pages, + &alloc_hint); + /* + * Our cache requires contiguous chunks so that we don't modify a bunch + * of metadata or split extents when writing the cache out, which means + * we can enospc if we are heavily fragmented in addition to just normal + * out of space conditions. So if we hit this just skip setting up any + * other block groups for this transaction, maybe we'll unpin enough + * space the next time around. + */ + if (!ret) + dcs = BTRFS_DC_SETUP; + else if (ret == -ENOSPC) + set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); + +out_put: + iput(inode); +out_free: + btrfs_release_path(path); +out: + spin_lock(&block_group->lock); + if (!ret && dcs == BTRFS_DC_SETUP) + block_group->cache_generation = trans->transid; + block_group->disk_cache_state = dcs; + spin_unlock(&block_group->lock); + + extent_changeset_free(data_reserved); + return ret; +} + +int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_block_group_cache *cache, *tmp; + struct btrfs_transaction *cur_trans = trans->transaction; + struct btrfs_path *path; + + if (list_empty(&cur_trans->dirty_bgs) || + !btrfs_test_opt(fs_info, SPACE_CACHE)) + return 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* Could add new block groups, use _safe just in case */ + list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, + dirty_list) { + if (cache->disk_cache_state == BTRFS_DC_CLEAR) + cache_save_setup(cache, trans, path); + } + + btrfs_free_path(path); + return 0; +} + +/* + * Transaction commit does final block group cache writeback during a critical + * section where nothing is allowed to change the FS. This is required in + * order for the cache to actually match the block group, but can introduce a + * lot of latency into the commit. + * + * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. + * There's a chance we'll have to redo some of it if the block group changes + * again during the commit, but it greatly reduces the commit latency by + * getting rid of the easy block groups while we're still allowing others to + * join the commit. + */ +int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_block_group_cache *cache; + struct btrfs_transaction *cur_trans = trans->transaction; + int ret = 0; + int should_put; + struct btrfs_path *path = NULL; + LIST_HEAD(dirty); + struct list_head *io = &cur_trans->io_bgs; + int num_started = 0; + int loops = 0; + + spin_lock(&cur_trans->dirty_bgs_lock); + if (list_empty(&cur_trans->dirty_bgs)) { + spin_unlock(&cur_trans->dirty_bgs_lock); + return 0; + } + list_splice_init(&cur_trans->dirty_bgs, &dirty); + spin_unlock(&cur_trans->dirty_bgs_lock); + +again: + /* Make sure all the block groups on our dirty list actually exist */ + btrfs_create_pending_block_groups(trans); + + if (!path) { + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + } + + /* + * cache_write_mutex is here only to save us from balance or automatic + * removal of empty block groups deleting this block group while we are + * writing out the cache + */ + mutex_lock(&trans->transaction->cache_write_mutex); + while (!list_empty(&dirty)) { + bool drop_reserve = true; + + cache = list_first_entry(&dirty, + struct btrfs_block_group_cache, + dirty_list); + /* + * This can happen if something re-dirties a block group that + * is already under IO. Just wait for it to finish and then do + * it all again + */ + if (!list_empty(&cache->io_list)) { + list_del_init(&cache->io_list); + btrfs_wait_cache_io(trans, cache, path); + btrfs_put_block_group(cache); + } + + + /* + * btrfs_wait_cache_io uses the cache->dirty_list to decide if + * it should update the cache_state. Don't delete until after + * we wait. + * + * Since we're not running in the commit critical section + * we need the dirty_bgs_lock to protect from update_block_group + */ + spin_lock(&cur_trans->dirty_bgs_lock); + list_del_init(&cache->dirty_list); + spin_unlock(&cur_trans->dirty_bgs_lock); + + should_put = 1; + + cache_save_setup(cache, trans, path); + + if (cache->disk_cache_state == BTRFS_DC_SETUP) { + cache->io_ctl.inode = NULL; + ret = btrfs_write_out_cache(trans, cache, path); + if (ret == 0 && cache->io_ctl.inode) { + num_started++; + should_put = 0; + + /* + * The cache_write_mutex is protecting the + * io_list, also refer to the definition of + * btrfs_transaction::io_bgs for more details + */ + list_add_tail(&cache->io_list, io); + } else { + /* + * If we failed to write the cache, the + * generation will be bad and life goes on + */ + ret = 0; + } + } + if (!ret) { + ret = write_one_cache_group(trans, path, cache); + /* + * Our block group might still be attached to the list + * of new block groups in the transaction handle of some + * other task (struct btrfs_trans_handle->new_bgs). This + * means its block group item isn't yet in the extent + * tree. If this happens ignore the error, as we will + * try again later in the critical section of the + * transaction commit. + */ + if (ret == -ENOENT) { + ret = 0; + spin_lock(&cur_trans->dirty_bgs_lock); + if (list_empty(&cache->dirty_list)) { + list_add_tail(&cache->dirty_list, + &cur_trans->dirty_bgs); + btrfs_get_block_group(cache); + drop_reserve = false; + } + spin_unlock(&cur_trans->dirty_bgs_lock); + } else if (ret) { + btrfs_abort_transaction(trans, ret); + } + } + + /* If it's not on the io list, we need to put the block group */ + if (should_put) + btrfs_put_block_group(cache); + if (drop_reserve) + btrfs_delayed_refs_rsv_release(fs_info, 1); + + if (ret) + break; + + /* + * Avoid blocking other tasks for too long. It might even save + * us from writing caches for block groups that are going to be + * removed. + */ + mutex_unlock(&trans->transaction->cache_write_mutex); + mutex_lock(&trans->transaction->cache_write_mutex); + } + mutex_unlock(&trans->transaction->cache_write_mutex); + + /* + * Go through delayed refs for all the stuff we've just kicked off + * and then loop back (just once) + */ + ret = btrfs_run_delayed_refs(trans, 0); + if (!ret && loops == 0) { + loops++; + spin_lock(&cur_trans->dirty_bgs_lock); + list_splice_init(&cur_trans->dirty_bgs, &dirty); + /* + * dirty_bgs_lock protects us from concurrent block group + * deletes too (not just cache_write_mutex). + */ + if (!list_empty(&dirty)) { + spin_unlock(&cur_trans->dirty_bgs_lock); + goto again; + } + spin_unlock(&cur_trans->dirty_bgs_lock); + } else if (ret < 0) { + btrfs_cleanup_dirty_bgs(cur_trans, fs_info); + } + + btrfs_free_path(path); + return ret; +} + +int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_block_group_cache *cache; + struct btrfs_transaction *cur_trans = trans->transaction; + int ret = 0; + int should_put; + struct btrfs_path *path; + struct list_head *io = &cur_trans->io_bgs; + int num_started = 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* + * Even though we are in the critical section of the transaction commit, + * we can still have concurrent tasks adding elements to this + * transaction's list of dirty block groups. These tasks correspond to + * endio free space workers started when writeback finishes for a + * space cache, which run inode.c:btrfs_finish_ordered_io(), and can + * allocate new block groups as a result of COWing nodes of the root + * tree when updating the free space inode. The writeback for the space + * caches is triggered by an earlier call to + * btrfs_start_dirty_block_groups() and iterations of the following + * loop. + * Also we want to do the cache_save_setup first and then run the + * delayed refs to make sure we have the best chance at doing this all + * in one shot. + */ + spin_lock(&cur_trans->dirty_bgs_lock); + while (!list_empty(&cur_trans->dirty_bgs)) { + cache = list_first_entry(&cur_trans->dirty_bgs, + struct btrfs_block_group_cache, + dirty_list); + + /* + * This can happen if cache_save_setup re-dirties a block group + * that is already under IO. Just wait for it to finish and + * then do it all again + */ + if (!list_empty(&cache->io_list)) { + spin_unlock(&cur_trans->dirty_bgs_lock); + list_del_init(&cache->io_list); + btrfs_wait_cache_io(trans, cache, path); + btrfs_put_block_group(cache); + spin_lock(&cur_trans->dirty_bgs_lock); + } + + /* + * Don't remove from the dirty list until after we've waited on + * any pending IO + */ + list_del_init(&cache->dirty_list); + spin_unlock(&cur_trans->dirty_bgs_lock); + should_put = 1; + + cache_save_setup(cache, trans, path); + + if (!ret) + ret = btrfs_run_delayed_refs(trans, + (unsigned long) -1); + + if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { + cache->io_ctl.inode = NULL; + ret = btrfs_write_out_cache(trans, cache, path); + if (ret == 0 && cache->io_ctl.inode) { + num_started++; + should_put = 0; + list_add_tail(&cache->io_list, io); + } else { + /* + * If we failed to write the cache, the + * generation will be bad and life goes on + */ + ret = 0; + } + } + if (!ret) { + ret = write_one_cache_group(trans, path, cache); + /* + * One of the free space endio workers might have + * created a new block group while updating a free space + * cache's inode (at inode.c:btrfs_finish_ordered_io()) + * and hasn't released its transaction handle yet, in + * which case the new block group is still attached to + * its transaction handle and its creation has not + * finished yet (no block group item in the extent tree + * yet, etc). If this is the case, wait for all free + * space endio workers to finish and retry. This is a + * a very rare case so no need for a more efficient and + * complex approach. + */ + if (ret == -ENOENT) { + wait_event(cur_trans->writer_wait, + atomic_read(&cur_trans->num_writers) == 1); + ret = write_one_cache_group(trans, path, cache); + } + if (ret) + btrfs_abort_transaction(trans, ret); + } + + /* If its not on the io list, we need to put the block group */ + if (should_put) + btrfs_put_block_group(cache); + btrfs_delayed_refs_rsv_release(fs_info, 1); + spin_lock(&cur_trans->dirty_bgs_lock); + } + spin_unlock(&cur_trans->dirty_bgs_lock); + + /* + * Refer to the definition of io_bgs member for details why it's safe + * to use it without any locking + */ + while (!list_empty(io)) { + cache = list_first_entry(io, struct btrfs_block_group_cache, + io_list); + list_del_init(&cache->io_list); + btrfs_wait_cache_io(trans, cache, path); + btrfs_put_block_group(cache); + } + + btrfs_free_path(path); + return ret; +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index a048a9408dec..749d34071f86 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -189,6 +189,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); +int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); +int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); +int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); static inline int btrfs_block_group_cache_done( struct btrfs_block_group_cache *cache) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f97ad638983b..6b17573c2fe6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2523,9 +2523,6 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans); int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref); -int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); -int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); -int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); int btrfs_free_block_groups(struct btrfs_fs_info *info); void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 519cf2cb5cef..0b78e5dcfe48 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2511,524 +2511,6 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, return __btrfs_mod_ref(trans, root, buf, full_backref, 0); } -static int write_one_cache_group(struct btrfs_trans_handle *trans, - struct btrfs_path *path, - struct btrfs_block_group_cache *cache) -{ - struct btrfs_fs_info *fs_info = trans->fs_info; - int ret; - struct btrfs_root *extent_root = fs_info->extent_root; - unsigned long bi; - struct extent_buffer *leaf; - - ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); - if (ret) { - if (ret > 0) - ret = -ENOENT; - goto fail; - } - - leaf = path->nodes[0]; - bi = btrfs_item_ptr_offset(leaf, path->slots[0]); - write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); - btrfs_mark_buffer_dirty(leaf); -fail: - btrfs_release_path(path); - return ret; - -} - -static int cache_save_setup(struct btrfs_block_group_cache *block_group, - struct btrfs_trans_handle *trans, - struct btrfs_path *path) -{ - struct btrfs_fs_info *fs_info = block_group->fs_info; - struct btrfs_root *root = fs_info->tree_root; - struct inode *inode = NULL; - struct extent_changeset *data_reserved = NULL; - u64 alloc_hint = 0; - int dcs = BTRFS_DC_ERROR; - u64 num_pages = 0; - int retries = 0; - int ret = 0; - - /* - * If this block group is smaller than 100 megs don't bother caching the - * block group. - */ - if (block_group->key.offset < (100 * SZ_1M)) { - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_WRITTEN; - spin_unlock(&block_group->lock); - return 0; - } - - if (trans->aborted) - return 0; -again: - inode = lookup_free_space_inode(block_group, path); - if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { - ret = PTR_ERR(inode); - btrfs_release_path(path); - goto out; - } - - if (IS_ERR(inode)) { - BUG_ON(retries); - retries++; - - if (block_group->ro) - goto out_free; - - ret = create_free_space_inode(trans, block_group, path); - if (ret) - goto out_free; - goto again; - } - - /* - * We want to set the generation to 0, that way if anything goes wrong - * from here on out we know not to trust this cache when we load up next - * time. - */ - BTRFS_I(inode)->generation = 0; - ret = btrfs_update_inode(trans, root, inode); - if (ret) { - /* - * So theoretically we could recover from this, simply set the - * super cache generation to 0 so we know to invalidate the - * cache, but then we'd have to keep track of the block groups - * that fail this way so we know we _have_ to reset this cache - * before the next commit or risk reading stale cache. So to - * limit our exposure to horrible edge cases lets just abort the - * transaction, this only happens in really bad situations - * anyway. - */ - btrfs_abort_transaction(trans, ret); - goto out_put; - } - WARN_ON(ret); - - /* We've already setup this transaction, go ahead and exit */ - if (block_group->cache_generation == trans->transid && - i_size_read(inode)) { - dcs = BTRFS_DC_SETUP; - goto out_put; - } - - if (i_size_read(inode) > 0) { - ret = btrfs_check_trunc_cache_free_space(fs_info, - &fs_info->global_block_rsv); - if (ret) - goto out_put; - - ret = btrfs_truncate_free_space_cache(trans, NULL, inode); - if (ret) - goto out_put; - } - - spin_lock(&block_group->lock); - if (block_group->cached != BTRFS_CACHE_FINISHED || - !btrfs_test_opt(fs_info, SPACE_CACHE)) { - /* - * don't bother trying to write stuff out _if_ - * a) we're not cached, - * b) we're with nospace_cache mount option, - * c) we're with v2 space_cache (FREE_SPACE_TREE). - */ - dcs = BTRFS_DC_WRITTEN; - spin_unlock(&block_group->lock); - goto out_put; - } - spin_unlock(&block_group->lock); - - /* - * We hit an ENOSPC when setting up the cache in this transaction, just - * skip doing the setup, we've already cleared the cache so we're safe. - */ - if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { - ret = -ENOSPC; - goto out_put; - } - - /* - * Try to preallocate enough space based on how big the block group is. - * Keep in mind this has to include any pinned space which could end up - * taking up quite a bit since it's not folded into the other space - * cache. - */ - num_pages = div_u64(block_group->key.offset, SZ_256M); - if (!num_pages) - num_pages = 1; - - num_pages *= 16; - num_pages *= PAGE_SIZE; - - ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages); - if (ret) - goto out_put; - - ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, - num_pages, num_pages, - &alloc_hint); - /* - * Our cache requires contiguous chunks so that we don't modify a bunch - * of metadata or split extents when writing the cache out, which means - * we can enospc if we are heavily fragmented in addition to just normal - * out of space conditions. So if we hit this just skip setting up any - * other block groups for this transaction, maybe we'll unpin enough - * space the next time around. - */ - if (!ret) - dcs = BTRFS_DC_SETUP; - else if (ret == -ENOSPC) - set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); - -out_put: - iput(inode); -out_free: - btrfs_release_path(path); -out: - spin_lock(&block_group->lock); - if (!ret && dcs == BTRFS_DC_SETUP) - block_group->cache_generation = trans->transid; - block_group->disk_cache_state = dcs; - spin_unlock(&block_group->lock); - - extent_changeset_free(data_reserved); - return ret; -} - -int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) -{ - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache, *tmp; - struct btrfs_transaction *cur_trans = trans->transaction; - struct btrfs_path *path; - - if (list_empty(&cur_trans->dirty_bgs) || - !btrfs_test_opt(fs_info, SPACE_CACHE)) - return 0; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - /* Could add new block groups, use _safe just in case */ - list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, - dirty_list) { - if (cache->disk_cache_state == BTRFS_DC_CLEAR) - cache_save_setup(cache, trans, path); - } - - btrfs_free_path(path); - return 0; -} - -/* - * transaction commit does final block group cache writeback during a - * critical section where nothing is allowed to change the FS. This is - * required in order for the cache to actually match the block group, - * but can introduce a lot of latency into the commit. - * - * So, btrfs_start_dirty_block_groups is here to kick off block group - * cache IO. There's a chance we'll have to redo some of it if the - * block group changes again during the commit, but it greatly reduces - * the commit latency by getting rid of the easy block groups while - * we're still allowing others to join the commit. - */ -int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) -{ - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache; - struct btrfs_transaction *cur_trans = trans->transaction; - int ret = 0; - int should_put; - struct btrfs_path *path = NULL; - LIST_HEAD(dirty); - struct list_head *io = &cur_trans->io_bgs; - int num_started = 0; - int loops = 0; - - spin_lock(&cur_trans->dirty_bgs_lock); - if (list_empty(&cur_trans->dirty_bgs)) { - spin_unlock(&cur_trans->dirty_bgs_lock); - return 0; - } - list_splice_init(&cur_trans->dirty_bgs, &dirty); - spin_unlock(&cur_trans->dirty_bgs_lock); - -again: - /* - * make sure all the block groups on our dirty list actually - * exist - */ - btrfs_create_pending_block_groups(trans); - - if (!path) { - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - } - - /* - * cache_write_mutex is here only to save us from balance or automatic - * removal of empty block groups deleting this block group while we are - * writing out the cache - */ - mutex_lock(&trans->transaction->cache_write_mutex); - while (!list_empty(&dirty)) { - bool drop_reserve = true; - - cache = list_first_entry(&dirty, - struct btrfs_block_group_cache, - dirty_list); - /* - * this can happen if something re-dirties a block - * group that is already under IO. Just wait for it to - * finish and then do it all again - */ - if (!list_empty(&cache->io_list)) { - list_del_init(&cache->io_list); - btrfs_wait_cache_io(trans, cache, path); - btrfs_put_block_group(cache); - } - - - /* - * btrfs_wait_cache_io uses the cache->dirty_list to decide - * if it should update the cache_state. Don't delete - * until after we wait. - * - * Since we're not running in the commit critical section - * we need the dirty_bgs_lock to protect from update_block_group - */ - spin_lock(&cur_trans->dirty_bgs_lock); - list_del_init(&cache->dirty_list); - spin_unlock(&cur_trans->dirty_bgs_lock); - - should_put = 1; - - cache_save_setup(cache, trans, path); - - if (cache->disk_cache_state == BTRFS_DC_SETUP) { - cache->io_ctl.inode = NULL; - ret = btrfs_write_out_cache(trans, cache, path); - if (ret == 0 && cache->io_ctl.inode) { - num_started++; - should_put = 0; - - /* - * The cache_write_mutex is protecting the - * io_list, also refer to the definition of - * btrfs_transaction::io_bgs for more details - */ - list_add_tail(&cache->io_list, io); - } else { - /* - * if we failed to write the cache, the - * generation will be bad and life goes on - */ - ret = 0; - } - } - if (!ret) { - ret = write_one_cache_group(trans, path, cache); - /* - * Our block group might still be attached to the list - * of new block groups in the transaction handle of some - * other task (struct btrfs_trans_handle->new_bgs). This - * means its block group item isn't yet in the extent - * tree. If this happens ignore the error, as we will - * try again later in the critical section of the - * transaction commit. - */ - if (ret == -ENOENT) { - ret = 0; - spin_lock(&cur_trans->dirty_bgs_lock); - if (list_empty(&cache->dirty_list)) { - list_add_tail(&cache->dirty_list, - &cur_trans->dirty_bgs); - btrfs_get_block_group(cache); - drop_reserve = false; - } - spin_unlock(&cur_trans->dirty_bgs_lock); - } else if (ret) { - btrfs_abort_transaction(trans, ret); - } - } - - /* if it's not on the io list, we need to put the block group */ - if (should_put) - btrfs_put_block_group(cache); - if (drop_reserve) - btrfs_delayed_refs_rsv_release(fs_info, 1); - - if (ret) - break; - - /* - * Avoid blocking other tasks for too long. It might even save - * us from writing caches for block groups that are going to be - * removed. - */ - mutex_unlock(&trans->transaction->cache_write_mutex); - mutex_lock(&trans->transaction->cache_write_mutex); - } - mutex_unlock(&trans->transaction->cache_write_mutex); - - /* - * go through delayed refs for all the stuff we've just kicked off - * and then loop back (just once) - */ - ret = btrfs_run_delayed_refs(trans, 0); - if (!ret && loops == 0) { - loops++; - spin_lock(&cur_trans->dirty_bgs_lock); - list_splice_init(&cur_trans->dirty_bgs, &dirty); - /* - * dirty_bgs_lock protects us from concurrent block group - * deletes too (not just cache_write_mutex). - */ - if (!list_empty(&dirty)) { - spin_unlock(&cur_trans->dirty_bgs_lock); - goto again; - } - spin_unlock(&cur_trans->dirty_bgs_lock); - } else if (ret < 0) { - btrfs_cleanup_dirty_bgs(cur_trans, fs_info); - } - - btrfs_free_path(path); - return ret; -} - -int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) -{ - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_block_group_cache *cache; - struct btrfs_transaction *cur_trans = trans->transaction; - int ret = 0; - int should_put; - struct btrfs_path *path; - struct list_head *io = &cur_trans->io_bgs; - int num_started = 0; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - /* - * Even though we are in the critical section of the transaction commit, - * we can still have concurrent tasks adding elements to this - * transaction's list of dirty block groups. These tasks correspond to - * endio free space workers started when writeback finishes for a - * space cache, which run inode.c:btrfs_finish_ordered_io(), and can - * allocate new block groups as a result of COWing nodes of the root - * tree when updating the free space inode. The writeback for the space - * caches is triggered by an earlier call to - * btrfs_start_dirty_block_groups() and iterations of the following - * loop. - * Also we want to do the cache_save_setup first and then run the - * delayed refs to make sure we have the best chance at doing this all - * in one shot. - */ - spin_lock(&cur_trans->dirty_bgs_lock); - while (!list_empty(&cur_trans->dirty_bgs)) { - cache = list_first_entry(&cur_trans->dirty_bgs, - struct btrfs_block_group_cache, - dirty_list); - - /* - * this can happen if cache_save_setup re-dirties a block - * group that is already under IO. Just wait for it to - * finish and then do it all again - */ - if (!list_empty(&cache->io_list)) { - spin_unlock(&cur_trans->dirty_bgs_lock); - list_del_init(&cache->io_list); - btrfs_wait_cache_io(trans, cache, path); - btrfs_put_block_group(cache); - spin_lock(&cur_trans->dirty_bgs_lock); - } - - /* - * don't remove from the dirty list until after we've waited - * on any pending IO - */ - list_del_init(&cache->dirty_list); - spin_unlock(&cur_trans->dirty_bgs_lock); - should_put = 1; - - cache_save_setup(cache, trans, path); - - if (!ret) - ret = btrfs_run_delayed_refs(trans, - (unsigned long) -1); - - if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { - cache->io_ctl.inode = NULL; - ret = btrfs_write_out_cache(trans, cache, path); - if (ret == 0 && cache->io_ctl.inode) { - num_started++; - should_put = 0; - list_add_tail(&cache->io_list, io); - } else { - /* - * if we failed to write the cache, the - * generation will be bad and life goes on - */ - ret = 0; - } - } - if (!ret) { - ret = write_one_cache_group(trans, path, cache); - /* - * One of the free space endio workers might have - * created a new block group while updating a free space - * cache's inode (at inode.c:btrfs_finish_ordered_io()) - * and hasn't released its transaction handle yet, in - * which case the new block group is still attached to - * its transaction handle and its creation has not - * finished yet (no block group item in the extent tree - * yet, etc). If this is the case, wait for all free - * space endio workers to finish and retry. This is a - * a very rare case so no need for a more efficient and - * complex approach. - */ - if (ret == -ENOENT) { - wait_event(cur_trans->writer_wait, - atomic_read(&cur_trans->num_writers) == 1); - ret = write_one_cache_group(trans, path, cache); - } - if (ret) - btrfs_abort_transaction(trans, ret); - } - - /* if its not on the io list, we need to put the block group */ - if (should_put) - btrfs_put_block_group(cache); - btrfs_delayed_refs_rsv_release(fs_info, 1); - spin_lock(&cur_trans->dirty_bgs_lock); - } - spin_unlock(&cur_trans->dirty_bgs_lock); - - /* - * Refer to the definition of io_bgs member for details why it's safe - * to use it without any locking - */ - while (!list_empty(io)) { - cache = list_first_entry(io, struct btrfs_block_group_cache, - io_list); - list_del_init(&cache->io_list); - btrfs_wait_cache_io(trans, cache, path); - btrfs_put_block_group(cache); - } - - btrfs_free_path(path); - return ret; -} - int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) { struct btrfs_block_group_cache *block_group; From ade4b5169f3f161e50412ad6279dc76219e05461 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:38:01 -0400 Subject: [PATCH 071/138] btrfs: export block group accounting helpers Want to move these functions into block-group.c, so export them. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 6 ++++++ fs/btrfs/extent-tree.c | 21 ++++++++++----------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 749d34071f86..886bfa88ae06 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -192,6 +192,12 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); +int btrfs_update_block_group(struct btrfs_trans_handle *trans, + u64 bytenr, u64 num_bytes, int alloc); +int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 ram_bytes, u64 num_bytes, int delalloc); +void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 num_bytes, int delalloc); static inline int btrfs_block_group_cache_done( struct btrfs_block_group_cache *cache) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0b78e5dcfe48..e49998dc9848 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2898,8 +2898,8 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, return ret; } -static int update_block_group(struct btrfs_trans_handle *trans, - u64 bytenr, u64 num_bytes, int alloc) +int btrfs_update_block_group(struct btrfs_trans_handle *trans, + u64 bytenr, u64 num_bytes, int alloc) { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_block_group_cache *cache = NULL; @@ -3199,8 +3199,8 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg) * reservation and the block group has become read only we cannot make the * reservation and return -EAGAIN, otherwise this function always succeeds. */ -static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, - u64 ram_bytes, u64 num_bytes, int delalloc) +int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 ram_bytes, u64 num_bytes, int delalloc) { struct btrfs_space_info *space_info = cache->space_info; int ret = 0; @@ -3233,9 +3233,8 @@ static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, * A and before transaction A commits you free that leaf, you call this with * reserve set to 0 in order to clear the reservation. */ - -static void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, - u64 num_bytes, int delalloc) +void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 num_bytes, int delalloc) { struct btrfs_space_info *space_info = cache->space_info; @@ -3705,7 +3704,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, goto out; } - ret = update_block_group(trans, bytenr, num_bytes, 0); + ret = btrfs_update_block_group(trans, bytenr, num_bytes, 0); if (ret) { btrfs_abort_transaction(trans, ret); goto out; @@ -4770,7 +4769,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, if (ret) return ret; - ret = update_block_group(trans, ins->objectid, ins->offset, 1); + ret = btrfs_update_block_group(trans, ins->objectid, ins->offset, 1); if (ret) { /* -ENOENT, logic error */ btrfs_err(fs_info, "update block group failed for %llu %llu", ins->objectid, ins->offset); @@ -4860,8 +4859,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, if (ret) return ret; - ret = update_block_group(trans, extent_key.objectid, - fs_info->nodesize, 1); + ret = btrfs_update_block_group(trans, extent_key.objectid, + fs_info->nodesize, 1); if (ret) { /* -ENOENT, logic error */ btrfs_err(fs_info, "update block group failed for %llu %llu", extent_key.objectid, extent_key.offset); From 606d1bf10d7ebafdee26e8896b467b885c5233ec Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:38:02 -0400 Subject: [PATCH 072/138] btrfs: migrate the block group space accounting helpers We can now easily migrate this code as well. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 175 +++++++++++++++++++++++++++++++++++++++++ fs/btrfs/extent-tree.c | 173 ---------------------------------------- 2 files changed, 175 insertions(+), 173 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 763bab380379..eebef70725c6 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2519,3 +2519,178 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) btrfs_free_path(path); return ret; } + +int btrfs_update_block_group(struct btrfs_trans_handle *trans, + u64 bytenr, u64 num_bytes, int alloc) +{ + struct btrfs_fs_info *info = trans->fs_info; + struct btrfs_block_group_cache *cache = NULL; + u64 total = num_bytes; + u64 old_val; + u64 byte_in_group; + int factor; + int ret = 0; + + /* Block accounting for super block */ + spin_lock(&info->delalloc_root_lock); + old_val = btrfs_super_bytes_used(info->super_copy); + if (alloc) + old_val += num_bytes; + else + old_val -= num_bytes; + btrfs_set_super_bytes_used(info->super_copy, old_val); + spin_unlock(&info->delalloc_root_lock); + + while (total) { + cache = btrfs_lookup_block_group(info, bytenr); + if (!cache) { + ret = -ENOENT; + break; + } + factor = btrfs_bg_type_to_factor(cache->flags); + + /* + * If this block group has free space cache written out, we + * need to make sure to load it if we are removing space. This + * is because we need the unpinning stage to actually add the + * space back to the block group, otherwise we will leak space. + */ + if (!alloc && cache->cached == BTRFS_CACHE_NO) + btrfs_cache_block_group(cache, 1); + + byte_in_group = bytenr - cache->key.objectid; + WARN_ON(byte_in_group > cache->key.offset); + + spin_lock(&cache->space_info->lock); + spin_lock(&cache->lock); + + if (btrfs_test_opt(info, SPACE_CACHE) && + cache->disk_cache_state < BTRFS_DC_CLEAR) + cache->disk_cache_state = BTRFS_DC_CLEAR; + + old_val = btrfs_block_group_used(&cache->item); + num_bytes = min(total, cache->key.offset - byte_in_group); + if (alloc) { + old_val += num_bytes; + btrfs_set_block_group_used(&cache->item, old_val); + cache->reserved -= num_bytes; + cache->space_info->bytes_reserved -= num_bytes; + cache->space_info->bytes_used += num_bytes; + cache->space_info->disk_used += num_bytes * factor; + spin_unlock(&cache->lock); + spin_unlock(&cache->space_info->lock); + } else { + old_val -= num_bytes; + btrfs_set_block_group_used(&cache->item, old_val); + cache->pinned += num_bytes; + btrfs_space_info_update_bytes_pinned(info, + cache->space_info, num_bytes); + cache->space_info->bytes_used -= num_bytes; + cache->space_info->disk_used -= num_bytes * factor; + spin_unlock(&cache->lock); + spin_unlock(&cache->space_info->lock); + + trace_btrfs_space_reservation(info, "pinned", + cache->space_info->flags, + num_bytes, 1); + percpu_counter_add_batch( + &cache->space_info->total_bytes_pinned, + num_bytes, + BTRFS_TOTAL_BYTES_PINNED_BATCH); + set_extent_dirty(info->pinned_extents, + bytenr, bytenr + num_bytes - 1, + GFP_NOFS | __GFP_NOFAIL); + } + + spin_lock(&trans->transaction->dirty_bgs_lock); + if (list_empty(&cache->dirty_list)) { + list_add_tail(&cache->dirty_list, + &trans->transaction->dirty_bgs); + trans->delayed_ref_updates++; + btrfs_get_block_group(cache); + } + spin_unlock(&trans->transaction->dirty_bgs_lock); + + /* + * No longer have used bytes in this block group, queue it for + * deletion. We do this after adding the block group to the + * dirty list to avoid races between cleaner kthread and space + * cache writeout. + */ + if (!alloc && old_val == 0) + btrfs_mark_bg_unused(cache); + + btrfs_put_block_group(cache); + total -= num_bytes; + bytenr += num_bytes; + } + + /* Modified block groups are accounted for in the delayed_refs_rsv. */ + btrfs_update_delayed_refs_rsv(trans); + return ret; +} + +/** + * btrfs_add_reserved_bytes - update the block_group and space info counters + * @cache: The cache we are manipulating + * @ram_bytes: The number of bytes of file content, and will be same to + * @num_bytes except for the compress path. + * @num_bytes: The number of bytes in question + * @delalloc: The blocks are allocated for the delalloc write + * + * This is called by the allocator when it reserves space. If this is a + * reservation and the block group has become read only we cannot make the + * reservation and return -EAGAIN, otherwise this function always succeeds. + */ +int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 ram_bytes, u64 num_bytes, int delalloc) +{ + struct btrfs_space_info *space_info = cache->space_info; + int ret = 0; + + spin_lock(&space_info->lock); + spin_lock(&cache->lock); + if (cache->ro) { + ret = -EAGAIN; + } else { + cache->reserved += num_bytes; + space_info->bytes_reserved += num_bytes; + btrfs_space_info_update_bytes_may_use(cache->fs_info, + space_info, -ram_bytes); + if (delalloc) + cache->delalloc_bytes += num_bytes; + } + spin_unlock(&cache->lock); + spin_unlock(&space_info->lock); + return ret; +} + +/** + * btrfs_free_reserved_bytes - update the block_group and space info counters + * @cache: The cache we are manipulating + * @num_bytes: The number of bytes in question + * @delalloc: The blocks are allocated for the delalloc write + * + * This is called by somebody who is freeing space that was never actually used + * on disk. For example if you reserve some space for a new leaf in transaction + * A and before transaction A commits you free that leaf, you call this with + * reserve set to 0 in order to clear the reservation. + */ +void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 num_bytes, int delalloc) +{ + struct btrfs_space_info *space_info = cache->space_info; + + spin_lock(&space_info->lock); + spin_lock(&cache->lock); + if (cache->ro) + space_info->bytes_readonly += num_bytes; + cache->reserved -= num_bytes; + space_info->bytes_reserved -= num_bytes; + space_info->max_extent_size = 0; + + if (delalloc) + cache->delalloc_bytes -= num_bytes; + spin_unlock(&cache->lock); + spin_unlock(&space_info->lock); +} diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e49998dc9848..03c0210840a2 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2898,115 +2898,6 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, return ret; } -int btrfs_update_block_group(struct btrfs_trans_handle *trans, - u64 bytenr, u64 num_bytes, int alloc) -{ - struct btrfs_fs_info *info = trans->fs_info; - struct btrfs_block_group_cache *cache = NULL; - u64 total = num_bytes; - u64 old_val; - u64 byte_in_group; - int factor; - int ret = 0; - - /* block accounting for super block */ - spin_lock(&info->delalloc_root_lock); - old_val = btrfs_super_bytes_used(info->super_copy); - if (alloc) - old_val += num_bytes; - else - old_val -= num_bytes; - btrfs_set_super_bytes_used(info->super_copy, old_val); - spin_unlock(&info->delalloc_root_lock); - - while (total) { - cache = btrfs_lookup_block_group(info, bytenr); - if (!cache) { - ret = -ENOENT; - break; - } - factor = btrfs_bg_type_to_factor(cache->flags); - - /* - * If this block group has free space cache written out, we - * need to make sure to load it if we are removing space. This - * is because we need the unpinning stage to actually add the - * space back to the block group, otherwise we will leak space. - */ - if (!alloc && cache->cached == BTRFS_CACHE_NO) - btrfs_cache_block_group(cache, 1); - - byte_in_group = bytenr - cache->key.objectid; - WARN_ON(byte_in_group > cache->key.offset); - - spin_lock(&cache->space_info->lock); - spin_lock(&cache->lock); - - if (btrfs_test_opt(info, SPACE_CACHE) && - cache->disk_cache_state < BTRFS_DC_CLEAR) - cache->disk_cache_state = BTRFS_DC_CLEAR; - - old_val = btrfs_block_group_used(&cache->item); - num_bytes = min(total, cache->key.offset - byte_in_group); - if (alloc) { - old_val += num_bytes; - btrfs_set_block_group_used(&cache->item, old_val); - cache->reserved -= num_bytes; - cache->space_info->bytes_reserved -= num_bytes; - cache->space_info->bytes_used += num_bytes; - cache->space_info->disk_used += num_bytes * factor; - spin_unlock(&cache->lock); - spin_unlock(&cache->space_info->lock); - } else { - old_val -= num_bytes; - btrfs_set_block_group_used(&cache->item, old_val); - cache->pinned += num_bytes; - btrfs_space_info_update_bytes_pinned(info, - cache->space_info, num_bytes); - cache->space_info->bytes_used -= num_bytes; - cache->space_info->disk_used -= num_bytes * factor; - spin_unlock(&cache->lock); - spin_unlock(&cache->space_info->lock); - - trace_btrfs_space_reservation(info, "pinned", - cache->space_info->flags, - num_bytes, 1); - percpu_counter_add_batch(&cache->space_info->total_bytes_pinned, - num_bytes, - BTRFS_TOTAL_BYTES_PINNED_BATCH); - set_extent_dirty(info->pinned_extents, - bytenr, bytenr + num_bytes - 1, - GFP_NOFS | __GFP_NOFAIL); - } - - spin_lock(&trans->transaction->dirty_bgs_lock); - if (list_empty(&cache->dirty_list)) { - list_add_tail(&cache->dirty_list, - &trans->transaction->dirty_bgs); - trans->delayed_ref_updates++; - btrfs_get_block_group(cache); - } - spin_unlock(&trans->transaction->dirty_bgs_lock); - - /* - * No longer have used bytes in this block group, queue it for - * deletion. We do this after adding the block group to the - * dirty list to avoid races between cleaner kthread and space - * cache writeout. - */ - if (!alloc && old_val == 0) - btrfs_mark_bg_unused(cache); - - btrfs_put_block_group(cache); - total -= num_bytes; - bytenr += num_bytes; - } - - /* Modified block groups are accounted for in the delayed_refs_rsv. */ - btrfs_update_delayed_refs_rsv(trans); - return ret; -} - static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start) { struct btrfs_block_group_cache *cache; @@ -3187,70 +3078,6 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg) atomic_inc(&bg->reservations); } -/** - * btrfs_add_reserved_bytes - update the block_group and space info counters - * @cache: The cache we are manipulating - * @ram_bytes: The number of bytes of file content, and will be same to - * @num_bytes except for the compress path. - * @num_bytes: The number of bytes in question - * @delalloc: The blocks are allocated for the delalloc write - * - * This is called by the allocator when it reserves space. If this is a - * reservation and the block group has become read only we cannot make the - * reservation and return -EAGAIN, otherwise this function always succeeds. - */ -int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, - u64 ram_bytes, u64 num_bytes, int delalloc) -{ - struct btrfs_space_info *space_info = cache->space_info; - int ret = 0; - - spin_lock(&space_info->lock); - spin_lock(&cache->lock); - if (cache->ro) { - ret = -EAGAIN; - } else { - cache->reserved += num_bytes; - space_info->bytes_reserved += num_bytes; - btrfs_space_info_update_bytes_may_use(cache->fs_info, - space_info, -ram_bytes); - if (delalloc) - cache->delalloc_bytes += num_bytes; - } - spin_unlock(&cache->lock); - spin_unlock(&space_info->lock); - return ret; -} - -/** - * btrfs_free_reserved_bytes - update the block_group and space info counters - * @cache: The cache we are manipulating - * @num_bytes: The number of bytes in question - * @delalloc: The blocks are allocated for the delalloc write - * - * This is called by somebody who is freeing space that was never actually used - * on disk. For example if you reserve some space for a new leaf in transaction - * A and before transaction A commits you free that leaf, you call this with - * reserve set to 0 in order to clear the reservation. - */ -void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, - u64 num_bytes, int delalloc) -{ - struct btrfs_space_info *space_info = cache->space_info; - - spin_lock(&space_info->lock); - spin_lock(&cache->lock); - if (cache->ro) - space_info->bytes_readonly += num_bytes; - cache->reserved -= num_bytes; - space_info->bytes_reserved -= num_bytes; - space_info->max_extent_size = 0; - - if (delalloc) - cache->delalloc_bytes -= num_bytes; - spin_unlock(&cache->lock); - spin_unlock(&space_info->lock); -} void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info) { struct btrfs_caching_control *next; From 07730d87ac7872b54efa02da5d20b42fd6bb165a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:38:04 -0400 Subject: [PATCH 073/138] btrfs: migrate the chunk allocation code This feels more at home in block-group.c than in extent-tree.c. Signed-off-by: Josef Bacik Reviewed-by: David Sterba i [ refresh ] Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 246 ++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 21 ++++ fs/btrfs/ctree.h | 24 ---- fs/btrfs/delalloc-space.c | 1 + fs/btrfs/extent-tree.c | 244 ------------------------------------- 5 files changed, 268 insertions(+), 268 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index eebef70725c6..8f702cf4c0db 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -13,6 +13,7 @@ #include "sysfs.h" #include "tree-log.h" #include "delalloc-space.h" +#include "math.h" void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { @@ -2694,3 +2695,248 @@ void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, spin_unlock(&cache->lock); spin_unlock(&space_info->lock); } + +static void force_metadata_allocation(struct btrfs_fs_info *info) +{ + struct list_head *head = &info->space_info; + struct btrfs_space_info *found; + + rcu_read_lock(); + list_for_each_entry_rcu(found, head, list) { + if (found->flags & BTRFS_BLOCK_GROUP_METADATA) + found->force_alloc = CHUNK_ALLOC_FORCE; + } + rcu_read_unlock(); +} + +static int should_alloc_chunk(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *sinfo, int force) +{ + u64 bytes_used = btrfs_space_info_used(sinfo, false); + u64 thresh; + + if (force == CHUNK_ALLOC_FORCE) + return 1; + + /* + * in limited mode, we want to have some free space up to + * about 1% of the FS size. + */ + if (force == CHUNK_ALLOC_LIMITED) { + thresh = btrfs_super_total_bytes(fs_info->super_copy); + thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1)); + + if (sinfo->total_bytes - bytes_used < thresh) + return 1; + } + + if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8)) + return 0; + return 1; +} + +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) +{ + u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); + + return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); +} + +/* + * If force is CHUNK_ALLOC_FORCE: + * - return 1 if it successfully allocates a chunk, + * - return errors including -ENOSPC otherwise. + * If force is NOT CHUNK_ALLOC_FORCE: + * - return 0 if it doesn't need to allocate a new chunk, + * - return 1 if it successfully allocates a chunk, + * - return errors including -ENOSPC otherwise. + */ +int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, + enum btrfs_chunk_alloc_enum force) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_space_info *space_info; + bool wait_for_alloc = false; + bool should_alloc = false; + int ret = 0; + + /* Don't re-enter if we're already allocating a chunk */ + if (trans->allocating_chunk) + return -ENOSPC; + + space_info = btrfs_find_space_info(fs_info, flags); + ASSERT(space_info); + + do { + spin_lock(&space_info->lock); + if (force < space_info->force_alloc) + force = space_info->force_alloc; + should_alloc = should_alloc_chunk(fs_info, space_info, force); + if (space_info->full) { + /* No more free physical space */ + if (should_alloc) + ret = -ENOSPC; + else + ret = 0; + spin_unlock(&space_info->lock); + return ret; + } else if (!should_alloc) { + spin_unlock(&space_info->lock); + return 0; + } else if (space_info->chunk_alloc) { + /* + * Someone is already allocating, so we need to block + * until this someone is finished and then loop to + * recheck if we should continue with our allocation + * attempt. + */ + wait_for_alloc = true; + spin_unlock(&space_info->lock); + mutex_lock(&fs_info->chunk_mutex); + mutex_unlock(&fs_info->chunk_mutex); + } else { + /* Proceed with allocation */ + space_info->chunk_alloc = 1; + wait_for_alloc = false; + spin_unlock(&space_info->lock); + } + + cond_resched(); + } while (wait_for_alloc); + + mutex_lock(&fs_info->chunk_mutex); + trans->allocating_chunk = true; + + /* + * If we have mixed data/metadata chunks we want to make sure we keep + * allocating mixed chunks instead of individual chunks. + */ + if (btrfs_mixed_space_info(space_info)) + flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); + + /* + * if we're doing a data chunk, go ahead and make sure that + * we keep a reasonable number of metadata chunks allocated in the + * FS as well. + */ + if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { + fs_info->data_chunk_allocations++; + if (!(fs_info->data_chunk_allocations % + fs_info->metadata_ratio)) + force_metadata_allocation(fs_info); + } + + /* + * Check if we have enough space in SYSTEM chunk because we may need + * to update devices. + */ + check_system_chunk(trans, flags); + + ret = btrfs_alloc_chunk(trans, flags); + trans->allocating_chunk = false; + + spin_lock(&space_info->lock); + if (ret < 0) { + if (ret == -ENOSPC) + space_info->full = 1; + else + goto out; + } else { + ret = 1; + space_info->max_extent_size = 0; + } + + space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; +out: + space_info->chunk_alloc = 0; + spin_unlock(&space_info->lock); + mutex_unlock(&fs_info->chunk_mutex); + /* + * When we allocate a new chunk we reserve space in the chunk block + * reserve to make sure we can COW nodes/leafs in the chunk tree or + * add new nodes/leafs to it if we end up needing to do it when + * inserting the chunk item and updating device items as part of the + * second phase of chunk allocation, performed by + * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a + * large number of new block groups to create in our transaction + * handle's new_bgs list to avoid exhausting the chunk block reserve + * in extreme cases - like having a single transaction create many new + * block groups when starting to write out the free space caches of all + * the block groups that were made dirty during the lifetime of the + * transaction. + */ + if (trans->chunk_bytes_reserved >= (u64)SZ_2M) + btrfs_create_pending_block_groups(trans); + + return ret; +} + +static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) +{ + u64 num_dev; + + num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; + if (!num_dev) + num_dev = fs_info->fs_devices->rw_devices; + + return num_dev; +} + +/* + * If @is_allocation is true, reserve space in the system space info necessary + * for allocating a chunk, otherwise if it's false, reserve space necessary for + * removing a chunk. + */ +void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) +{ + struct btrfs_fs_info *fs_info = trans->fs_info; + struct btrfs_space_info *info; + u64 left; + u64 thresh; + int ret = 0; + u64 num_devs; + + /* + * Needed because we can end up allocating a system chunk and for an + * atomic and race free space reservation in the chunk block reserve. + */ + lockdep_assert_held(&fs_info->chunk_mutex); + + info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); + spin_lock(&info->lock); + left = info->total_bytes - btrfs_space_info_used(info, true); + spin_unlock(&info->lock); + + num_devs = get_profile_num_devs(fs_info, type); + + /* num_devs device items to update and 1 chunk item to add or remove */ + thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) + + btrfs_calc_trans_metadata_size(fs_info, 1); + + if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { + btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", + left, thresh, type); + btrfs_dump_space_info(fs_info, info, 0, 0); + } + + if (left < thresh) { + u64 flags = btrfs_system_alloc_profile(fs_info); + + /* + * Ignore failure to create system chunk. We might end up not + * needing it, as we might not need to COW all nodes/leafs from + * the paths we visit in the chunk tree (they were already COWed + * or created in the current transaction for example). + */ + ret = btrfs_alloc_chunk(trans, flags); + } + + if (!ret) { + ret = btrfs_block_rsv_add(fs_info->chunk_root, + &fs_info->chunk_block_rsv, + thresh, BTRFS_RESERVE_NO_FLUSH); + if (!ret) + trans->chunk_bytes_reserved += thresh; + } +} + diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 886bfa88ae06..de90f7311574 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -10,6 +10,23 @@ enum btrfs_disk_cache_state { BTRFS_DC_SETUP, }; +/* + * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to + * only allocate a chunk if we really need one. + * + * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few + * chunks already allocated. This is used as part of the clustering code to + * help make sure we have a good pool of storage to cluster in, without filling + * the FS with empty chunks + * + * CHUNK_ALLOC_FORCE means it must try to allocate one + */ +enum btrfs_chunk_alloc_enum { + CHUNK_ALLOC_NO_FORCE, + CHUNK_ALLOC_LIMITED, + CHUNK_ALLOC_FORCE, +}; + struct btrfs_caching_control { struct list_head list; struct mutex mutex; @@ -198,6 +215,10 @@ int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, u64 ram_bytes, u64 num_bytes, int delalloc); void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, u64 num_bytes, int delalloc); +int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, + enum btrfs_chunk_alloc_enum force); +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type); +void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); static inline int btrfs_block_group_cache_done( struct btrfs_block_group_cache *cache) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 6b17573c2fe6..fe25b7211f2d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2556,28 +2556,6 @@ enum btrfs_flush_state { COMMIT_TRANS = 9, }; -/* - * control flags for do_chunk_alloc's force field - * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk - * if we really need one. - * - * CHUNK_ALLOC_LIMITED means to only try and allocate one - * if we have very few chunks already allocated. This is - * used as part of the clustering code to help make sure - * we have a good pool of storage to cluster in, without - * filling the FS with empty chunks - * - * CHUNK_ALLOC_FORCE means it must try to allocate one - * - */ -enum btrfs_chunk_alloc_enum { - CHUNK_ALLOC_NO_FORCE, - CHUNK_ALLOC_LIMITED, - CHUNK_ALLOC_FORCE, -}; - -int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, - enum btrfs_chunk_alloc_enum force); int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, struct btrfs_block_rsv *rsv, int nitems, bool use_global_rsv); @@ -2593,7 +2571,6 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end); int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, u64 *actual_bytes); -int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type); int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range); int btrfs_init_space_info(struct btrfs_fs_info *fs_info); @@ -2602,7 +2579,6 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, int btrfs_start_write_no_snapshotting(struct btrfs_root *root); void btrfs_end_write_no_snapshotting(struct btrfs_root *root); void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); -void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index 17f7c0d38768..d2dfc201b2e1 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -7,6 +7,7 @@ #include "space-info.h" #include "transaction.h" #include "qgroup.h" +#include "block-group.h" int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes) { diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 03c0210840a2..9dd8b08e4615 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2661,243 +2661,6 @@ u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); } -static void force_metadata_allocation(struct btrfs_fs_info *info) -{ - struct list_head *head = &info->space_info; - struct btrfs_space_info *found; - - rcu_read_lock(); - list_for_each_entry_rcu(found, head, list) { - if (found->flags & BTRFS_BLOCK_GROUP_METADATA) - found->force_alloc = CHUNK_ALLOC_FORCE; - } - rcu_read_unlock(); -} - -static int should_alloc_chunk(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *sinfo, int force) -{ - u64 bytes_used = btrfs_space_info_used(sinfo, false); - u64 thresh; - - if (force == CHUNK_ALLOC_FORCE) - return 1; - - /* - * in limited mode, we want to have some free space up to - * about 1% of the FS size. - */ - if (force == CHUNK_ALLOC_LIMITED) { - thresh = btrfs_super_total_bytes(fs_info->super_copy); - thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1)); - - if (sinfo->total_bytes - bytes_used < thresh) - return 1; - } - - if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8)) - return 0; - return 1; -} - -static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) -{ - u64 num_dev; - - num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; - if (!num_dev) - num_dev = fs_info->fs_devices->rw_devices; - - return num_dev; -} - -/* - * If @is_allocation is true, reserve space in the system space info necessary - * for allocating a chunk, otherwise if it's false, reserve space necessary for - * removing a chunk. - */ -void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) -{ - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_space_info *info; - u64 left; - u64 thresh; - int ret = 0; - u64 num_devs; - - /* - * Needed because we can end up allocating a system chunk and for an - * atomic and race free space reservation in the chunk block reserve. - */ - lockdep_assert_held(&fs_info->chunk_mutex); - - info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); - spin_lock(&info->lock); - left = info->total_bytes - btrfs_space_info_used(info, true); - spin_unlock(&info->lock); - - num_devs = get_profile_num_devs(fs_info, type); - - /* num_devs device items to update and 1 chunk item to add or remove */ - thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) + - btrfs_calc_trans_metadata_size(fs_info, 1); - - if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { - btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", - left, thresh, type); - btrfs_dump_space_info(fs_info, info, 0, 0); - } - - if (left < thresh) { - u64 flags = btrfs_system_alloc_profile(fs_info); - - /* - * Ignore failure to create system chunk. We might end up not - * needing it, as we might not need to COW all nodes/leafs from - * the paths we visit in the chunk tree (they were already COWed - * or created in the current transaction for example). - */ - ret = btrfs_alloc_chunk(trans, flags); - } - - if (!ret) { - ret = btrfs_block_rsv_add(fs_info->chunk_root, - &fs_info->chunk_block_rsv, - thresh, BTRFS_RESERVE_NO_FLUSH); - if (!ret) - trans->chunk_bytes_reserved += thresh; - } -} - -/* - * If force is CHUNK_ALLOC_FORCE: - * - return 1 if it successfully allocates a chunk, - * - return errors including -ENOSPC otherwise. - * If force is NOT CHUNK_ALLOC_FORCE: - * - return 0 if it doesn't need to allocate a new chunk, - * - return 1 if it successfully allocates a chunk, - * - return errors including -ENOSPC otherwise. - */ -int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, - enum btrfs_chunk_alloc_enum force) -{ - struct btrfs_fs_info *fs_info = trans->fs_info; - struct btrfs_space_info *space_info; - bool wait_for_alloc = false; - bool should_alloc = false; - int ret = 0; - - /* Don't re-enter if we're already allocating a chunk */ - if (trans->allocating_chunk) - return -ENOSPC; - - space_info = btrfs_find_space_info(fs_info, flags); - ASSERT(space_info); - - do { - spin_lock(&space_info->lock); - if (force < space_info->force_alloc) - force = space_info->force_alloc; - should_alloc = should_alloc_chunk(fs_info, space_info, force); - if (space_info->full) { - /* No more free physical space */ - if (should_alloc) - ret = -ENOSPC; - else - ret = 0; - spin_unlock(&space_info->lock); - return ret; - } else if (!should_alloc) { - spin_unlock(&space_info->lock); - return 0; - } else if (space_info->chunk_alloc) { - /* - * Someone is already allocating, so we need to block - * until this someone is finished and then loop to - * recheck if we should continue with our allocation - * attempt. - */ - wait_for_alloc = true; - spin_unlock(&space_info->lock); - mutex_lock(&fs_info->chunk_mutex); - mutex_unlock(&fs_info->chunk_mutex); - } else { - /* Proceed with allocation */ - space_info->chunk_alloc = 1; - wait_for_alloc = false; - spin_unlock(&space_info->lock); - } - - cond_resched(); - } while (wait_for_alloc); - - mutex_lock(&fs_info->chunk_mutex); - trans->allocating_chunk = true; - - /* - * If we have mixed data/metadata chunks we want to make sure we keep - * allocating mixed chunks instead of individual chunks. - */ - if (btrfs_mixed_space_info(space_info)) - flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); - - /* - * if we're doing a data chunk, go ahead and make sure that - * we keep a reasonable number of metadata chunks allocated in the - * FS as well. - */ - if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { - fs_info->data_chunk_allocations++; - if (!(fs_info->data_chunk_allocations % - fs_info->metadata_ratio)) - force_metadata_allocation(fs_info); - } - - /* - * Check if we have enough space in SYSTEM chunk because we may need - * to update devices. - */ - check_system_chunk(trans, flags); - - ret = btrfs_alloc_chunk(trans, flags); - trans->allocating_chunk = false; - - spin_lock(&space_info->lock); - if (ret < 0) { - if (ret == -ENOSPC) - space_info->full = 1; - else - goto out; - } else { - ret = 1; - space_info->max_extent_size = 0; - } - - space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; -out: - space_info->chunk_alloc = 0; - spin_unlock(&space_info->lock); - mutex_unlock(&fs_info->chunk_mutex); - /* - * When we allocate a new chunk we reserve space in the chunk block - * reserve to make sure we can COW nodes/leafs in the chunk tree or - * add new nodes/leafs to it if we end up needing to do it when - * inserting the chunk item and updating device items as part of the - * second phase of chunk allocation, performed by - * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a - * large number of new block groups to create in our transaction - * handle's new_bgs list to avoid exhausting the chunk block reserve - * in extreme cases - like having a single transaction create many new - * block groups when starting to write out the free space caches of all - * the block groups that were made dirty during the lifetime of the - * transaction. - */ - if (trans->chunk_bytes_reserved >= (u64)SZ_2M) - btrfs_create_pending_block_groups(trans); - - return ret; -} - static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start) { struct btrfs_block_group_cache *cache; @@ -5837,13 +5600,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, return ret; } -int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) -{ - u64 alloc_flags = get_alloc_profile(trans->fs_info, type); - - return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); -} - /* * helper to account the unused space of all the readonly block group in the * space_info. takes mirrors into account. From 878d7b679491665997122d6599001538c639cca9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:38:05 -0400 Subject: [PATCH 074/138] btrfs: migrate the alloc_profile helpers These feel more at home in block-group.c. Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ refresh, adjust btrfs_get_alloc_profile exports ] Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 105 +++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 16 ++++++ fs/btrfs/ctree.h | 4 -- fs/btrfs/extent-tree.c | 122 +---------------------------------------- 4 files changed, 122 insertions(+), 125 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 8f702cf4c0db..2608f91a00ef 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -15,6 +15,111 @@ #include "delalloc-space.h" #include "math.h" +/* + * Return target flags in extended format or 0 if restripe for this chunk_type + * is not in progress + * + * Should be called with balance_lock held + */ +u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) +{ + struct btrfs_balance_control *bctl = fs_info->balance_ctl; + u64 target = 0; + + if (!bctl) + return 0; + + if (flags & BTRFS_BLOCK_GROUP_DATA && + bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { + target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; + } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && + bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { + target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; + } else if (flags & BTRFS_BLOCK_GROUP_METADATA && + bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { + target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; + } + + return target; +} + +/* + * @flags: available profiles in extended format (see ctree.h) + * + * Return reduced profile in chunk format. If profile changing is in progress + * (either running or paused) picks the target profile (if it's already + * available), otherwise falls back to plain reducing. + */ +static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) +{ + u64 num_devices = fs_info->fs_devices->rw_devices; + u64 target; + u64 raid_type; + u64 allowed = 0; + + /* + * See if restripe for this chunk_type is in progress, if so try to + * reduce to the target profile + */ + spin_lock(&fs_info->balance_lock); + target = btrfs_get_restripe_target(fs_info, flags); + if (target) { + /* Pick target profile only if it's already available */ + if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { + spin_unlock(&fs_info->balance_lock); + return extended_to_chunk(target); + } + } + spin_unlock(&fs_info->balance_lock); + + /* First, mask out the RAID levels which aren't possible */ + for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { + if (num_devices >= btrfs_raid_array[raid_type].devs_min) + allowed |= btrfs_raid_array[raid_type].bg_flag; + } + allowed &= flags; + + if (allowed & BTRFS_BLOCK_GROUP_RAID6) + allowed = BTRFS_BLOCK_GROUP_RAID6; + else if (allowed & BTRFS_BLOCK_GROUP_RAID5) + allowed = BTRFS_BLOCK_GROUP_RAID5; + else if (allowed & BTRFS_BLOCK_GROUP_RAID10) + allowed = BTRFS_BLOCK_GROUP_RAID10; + else if (allowed & BTRFS_BLOCK_GROUP_RAID1) + allowed = BTRFS_BLOCK_GROUP_RAID1; + else if (allowed & BTRFS_BLOCK_GROUP_RAID0) + allowed = BTRFS_BLOCK_GROUP_RAID0; + + flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; + + return extended_to_chunk(flags | allowed); +} + +static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) +{ + unsigned seq; + u64 flags; + + do { + flags = orig_flags; + seq = read_seqbegin(&fs_info->profiles_lock); + + if (flags & BTRFS_BLOCK_GROUP_DATA) + flags |= fs_info->avail_data_alloc_bits; + else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) + flags |= fs_info->avail_system_alloc_bits; + else if (flags & BTRFS_BLOCK_GROUP_METADATA) + flags |= fs_info->avail_metadata_alloc_bits; + } while (read_seqretry(&fs_info->profiles_lock, seq)); + + return btrfs_reduce_alloc_profile(fs_info, flags); +} + +u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) +{ + return get_alloc_profile(fs_info, orig_flags); +} + void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { atomic_inc(&cache->count); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index de90f7311574..34a0098eadfc 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -219,6 +219,22 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, enum btrfs_chunk_alloc_enum force); int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type); void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); +u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags); + +static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info) +{ + return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA); +} + +static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info) +{ + return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA); +} + +static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) +{ + return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); +} static inline int btrfs_block_group_cache_done( struct btrfs_block_group_cache *cache) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index fe25b7211f2d..e1ad681b9e1a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2527,10 +2527,6 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); int btrfs_free_block_groups(struct btrfs_fs_info *info); void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); -u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info); -u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info); -u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info); -u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); enum btrfs_reserve_flush_enum { diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9dd8b08e4615..402199248549 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2524,111 +2524,6 @@ int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr) return readonly; } -/* - * returns target flags in extended format or 0 if restripe for this - * chunk_type is not in progress - * - * should be called with balance_lock held - */ -u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) -{ - struct btrfs_balance_control *bctl = fs_info->balance_ctl; - u64 target = 0; - - if (!bctl) - return 0; - - if (flags & BTRFS_BLOCK_GROUP_DATA && - bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { - target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; - } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && - bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { - target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; - } else if (flags & BTRFS_BLOCK_GROUP_METADATA && - bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { - target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; - } - - return target; -} - -/* - * @flags: available profiles in extended format (see ctree.h) - * - * Returns reduced profile in chunk format. If profile changing is in - * progress (either running or paused) picks the target profile (if it's - * already available), otherwise falls back to plain reducing. - */ -static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) -{ - u64 num_devices = fs_info->fs_devices->rw_devices; - u64 target; - u64 raid_type; - u64 allowed = 0; - - /* - * see if restripe for this chunk_type is in progress, if so - * try to reduce to the target profile - */ - spin_lock(&fs_info->balance_lock); - target = btrfs_get_restripe_target(fs_info, flags); - if (target) { - /* pick target profile only if it's already available */ - if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { - spin_unlock(&fs_info->balance_lock); - return extended_to_chunk(target); - } - } - spin_unlock(&fs_info->balance_lock); - - /* First, mask out the RAID levels which aren't possible */ - for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { - if (num_devices >= btrfs_raid_array[raid_type].devs_min) - allowed |= btrfs_raid_array[raid_type].bg_flag; - } - allowed &= flags; - - if (allowed & BTRFS_BLOCK_GROUP_RAID6) - allowed = BTRFS_BLOCK_GROUP_RAID6; - else if (allowed & BTRFS_BLOCK_GROUP_RAID5) - allowed = BTRFS_BLOCK_GROUP_RAID5; - else if (allowed & BTRFS_BLOCK_GROUP_RAID10) - allowed = BTRFS_BLOCK_GROUP_RAID10; - else if (allowed & BTRFS_BLOCK_GROUP_RAID1) - allowed = BTRFS_BLOCK_GROUP_RAID1; - else if (allowed & BTRFS_BLOCK_GROUP_RAID0) - allowed = BTRFS_BLOCK_GROUP_RAID0; - - flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; - - return extended_to_chunk(flags | allowed); -} - -static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) -{ - unsigned seq; - u64 flags; - - do { - flags = orig_flags; - seq = read_seqbegin(&fs_info->profiles_lock); - - if (flags & BTRFS_BLOCK_GROUP_DATA) - flags |= fs_info->avail_data_alloc_bits; - else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) - flags |= fs_info->avail_system_alloc_bits; - else if (flags & BTRFS_BLOCK_GROUP_METADATA) - flags |= fs_info->avail_metadata_alloc_bits; - } while (read_seqretry(&fs_info->profiles_lock, seq)); - - return btrfs_reduce_alloc_profile(fs_info, flags); -} - -u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) -{ - return get_alloc_profile(fs_info, orig_flags); -} - static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -2642,25 +2537,10 @@ static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) else flags = BTRFS_BLOCK_GROUP_METADATA; - ret = get_alloc_profile(fs_info, flags); + ret = btrfs_get_alloc_profile(fs_info, flags); return ret; } -u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info) -{ - return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA); -} - -u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info) -{ - return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA); -} - -u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) -{ - return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); -} - static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start) { struct btrfs_block_group_cache *cache; From 3e43c279e824889dacd5321505a88506e8c772e3 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:38:06 -0400 Subject: [PATCH 075/138] btrfs: migrate the block group cleanup code This can now be easily migrated as well. Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ refresh on top of sysfs cleanups ] Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 127 ++++++++++++++++++++++++++++++++++++++++ fs/btrfs/block-group.h | 2 + fs/btrfs/ctree.h | 2 - fs/btrfs/extent-tree.c | 128 ----------------------------------------- 4 files changed, 129 insertions(+), 130 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 2608f91a00ef..9c9f5a1bcdcb 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -3045,3 +3045,130 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) } } +void btrfs_put_block_group_cache(struct btrfs_fs_info *info) +{ + struct btrfs_block_group_cache *block_group; + u64 last = 0; + + while (1) { + struct inode *inode; + + block_group = btrfs_lookup_first_block_group(info, last); + while (block_group) { + btrfs_wait_block_group_cache_done(block_group); + spin_lock(&block_group->lock); + if (block_group->iref) + break; + spin_unlock(&block_group->lock); + block_group = btrfs_next_block_group(block_group); + } + if (!block_group) { + if (last == 0) + break; + last = 0; + continue; + } + + inode = block_group->inode; + block_group->iref = 0; + block_group->inode = NULL; + spin_unlock(&block_group->lock); + ASSERT(block_group->io_ctl.inode == NULL); + iput(inode); + last = block_group->key.objectid + block_group->key.offset; + btrfs_put_block_group(block_group); + } +} + +/* + * Must be called only after stopping all workers, since we could have block + * group caching kthreads running, and therefore they could race with us if we + * freed the block groups before stopping them. + */ +int btrfs_free_block_groups(struct btrfs_fs_info *info) +{ + struct btrfs_block_group_cache *block_group; + struct btrfs_space_info *space_info; + struct btrfs_caching_control *caching_ctl; + struct rb_node *n; + + down_write(&info->commit_root_sem); + while (!list_empty(&info->caching_block_groups)) { + caching_ctl = list_entry(info->caching_block_groups.next, + struct btrfs_caching_control, list); + list_del(&caching_ctl->list); + btrfs_put_caching_control(caching_ctl); + } + up_write(&info->commit_root_sem); + + spin_lock(&info->unused_bgs_lock); + while (!list_empty(&info->unused_bgs)) { + block_group = list_first_entry(&info->unused_bgs, + struct btrfs_block_group_cache, + bg_list); + list_del_init(&block_group->bg_list); + btrfs_put_block_group(block_group); + } + spin_unlock(&info->unused_bgs_lock); + + spin_lock(&info->block_group_cache_lock); + while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { + block_group = rb_entry(n, struct btrfs_block_group_cache, + cache_node); + rb_erase(&block_group->cache_node, + &info->block_group_cache_tree); + RB_CLEAR_NODE(&block_group->cache_node); + spin_unlock(&info->block_group_cache_lock); + + down_write(&block_group->space_info->groups_sem); + list_del(&block_group->list); + up_write(&block_group->space_info->groups_sem); + + /* + * We haven't cached this block group, which means we could + * possibly have excluded extents on this block group. + */ + if (block_group->cached == BTRFS_CACHE_NO || + block_group->cached == BTRFS_CACHE_ERROR) + btrfs_free_excluded_extents(block_group); + + btrfs_remove_free_space_cache(block_group); + ASSERT(block_group->cached != BTRFS_CACHE_STARTED); + ASSERT(list_empty(&block_group->dirty_list)); + ASSERT(list_empty(&block_group->io_list)); + ASSERT(list_empty(&block_group->bg_list)); + ASSERT(atomic_read(&block_group->count) == 1); + btrfs_put_block_group(block_group); + + spin_lock(&info->block_group_cache_lock); + } + spin_unlock(&info->block_group_cache_lock); + + /* + * Now that all the block groups are freed, go through and free all the + * space_info structs. This is only called during the final stages of + * unmount, and so we know nobody is using them. We call + * synchronize_rcu() once before we start, just to be on the safe side. + */ + synchronize_rcu(); + + btrfs_release_global_block_rsv(info); + + while (!list_empty(&info->space_info)) { + space_info = list_entry(info->space_info.next, + struct btrfs_space_info, + list); + + /* + * Do not hide this behind enospc_debug, this is actually + * important and indicates a real bug if this happens. + */ + if (WARN_ON(space_info->bytes_pinned > 0 || + space_info->bytes_reserved > 0 || + space_info->bytes_may_use > 0)) + btrfs_dump_space_info(info, space_info, 0, 0); + list_del(&space_info->list); + btrfs_sysfs_remove_space_info(space_info); + } + return 0; +} diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 34a0098eadfc..55e68a8d2c44 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -220,6 +220,8 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type); void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags); +void btrfs_put_block_group_cache(struct btrfs_fs_info *info); +int btrfs_free_block_groups(struct btrfs_fs_info *info); static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info) { diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e1ad681b9e1a..85b808e3ea42 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2524,7 +2524,6 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref); int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); -int btrfs_free_block_groups(struct btrfs_fs_info *info); void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); @@ -2561,7 +2560,6 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, bool qgroup_free); int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); -void btrfs_put_block_group_cache(struct btrfs_fs_info *info); u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 402199248549..cd210550a349 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5515,134 +5515,6 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) return free_bytes; } -void btrfs_put_block_group_cache(struct btrfs_fs_info *info) -{ - struct btrfs_block_group_cache *block_group; - u64 last = 0; - - while (1) { - struct inode *inode; - - block_group = btrfs_lookup_first_block_group(info, last); - while (block_group) { - btrfs_wait_block_group_cache_done(block_group); - spin_lock(&block_group->lock); - if (block_group->iref) - break; - spin_unlock(&block_group->lock); - block_group = btrfs_next_block_group(block_group); - } - if (!block_group) { - if (last == 0) - break; - last = 0; - continue; - } - - inode = block_group->inode; - block_group->iref = 0; - block_group->inode = NULL; - spin_unlock(&block_group->lock); - ASSERT(block_group->io_ctl.inode == NULL); - iput(inode); - last = block_group->key.objectid + block_group->key.offset; - btrfs_put_block_group(block_group); - } -} - -/* - * Must be called only after stopping all workers, since we could have block - * group caching kthreads running, and therefore they could race with us if we - * freed the block groups before stopping them. - */ -int btrfs_free_block_groups(struct btrfs_fs_info *info) -{ - struct btrfs_block_group_cache *block_group; - struct btrfs_space_info *space_info; - struct btrfs_caching_control *caching_ctl; - struct rb_node *n; - - down_write(&info->commit_root_sem); - while (!list_empty(&info->caching_block_groups)) { - caching_ctl = list_entry(info->caching_block_groups.next, - struct btrfs_caching_control, list); - list_del(&caching_ctl->list); - btrfs_put_caching_control(caching_ctl); - } - up_write(&info->commit_root_sem); - - spin_lock(&info->unused_bgs_lock); - while (!list_empty(&info->unused_bgs)) { - block_group = list_first_entry(&info->unused_bgs, - struct btrfs_block_group_cache, - bg_list); - list_del_init(&block_group->bg_list); - btrfs_put_block_group(block_group); - } - spin_unlock(&info->unused_bgs_lock); - - spin_lock(&info->block_group_cache_lock); - while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { - block_group = rb_entry(n, struct btrfs_block_group_cache, - cache_node); - rb_erase(&block_group->cache_node, - &info->block_group_cache_tree); - RB_CLEAR_NODE(&block_group->cache_node); - spin_unlock(&info->block_group_cache_lock); - - down_write(&block_group->space_info->groups_sem); - list_del(&block_group->list); - up_write(&block_group->space_info->groups_sem); - - /* - * We haven't cached this block group, which means we could - * possibly have excluded extents on this block group. - */ - if (block_group->cached == BTRFS_CACHE_NO || - block_group->cached == BTRFS_CACHE_ERROR) - btrfs_free_excluded_extents(block_group); - - btrfs_remove_free_space_cache(block_group); - ASSERT(block_group->cached != BTRFS_CACHE_STARTED); - ASSERT(list_empty(&block_group->dirty_list)); - ASSERT(list_empty(&block_group->io_list)); - ASSERT(list_empty(&block_group->bg_list)); - ASSERT(atomic_read(&block_group->count) == 1); - btrfs_put_block_group(block_group); - - spin_lock(&info->block_group_cache_lock); - } - spin_unlock(&info->block_group_cache_lock); - - /* now that all the block groups are freed, go through and - * free all the space_info structs. This is only called during - * the final stages of unmount, and so we know nobody is - * using them. We call synchronize_rcu() once before we start, - * just to be on the safe side. - */ - synchronize_rcu(); - - btrfs_release_global_block_rsv(info); - - while (!list_empty(&info->space_info)) { - space_info = list_entry(info->space_info.next, - struct btrfs_space_info, - list); - - /* - * Do not hide this behind enospc_debug, this is actually - * important and indicates a real bug if this happens. - */ - if (WARN_ON(space_info->bytes_pinned > 0 || - space_info->bytes_reserved > 0 || - space_info->bytes_may_use > 0)) - btrfs_dump_space_info(info, space_info, 0, 0); - list_del(&space_info->list); - btrfs_sysfs_remove_space_info(space_info); - } - return 0; -} - int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end) { From e11c0406ad1bb602e1e880fa4ff37dadb716639d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 20 Jun 2019 15:38:07 -0400 Subject: [PATCH 076/138] btrfs: unexport the temporary exported functions These were renamed and exported to facilitate logical migration of different code chunks into block-group.c. Now that all the users are in one file go ahead and rename them back, move the code around, and make them static. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 28 ++++++++++++++-------------- fs/btrfs/block-group.h | 4 ---- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 9c9f5a1bcdcb..55d6d1c36b62 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -21,7 +21,7 @@ * * Should be called with balance_lock held */ -u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) +static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; u64 target = 0; @@ -62,7 +62,7 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) * reduce to the target profile */ spin_lock(&fs_info->balance_lock); - target = btrfs_get_restripe_target(fs_info, flags); + target = get_restripe_target(fs_info, flags); if (target) { /* Pick target profile only if it's already available */ if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { @@ -424,7 +424,7 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) } #ifdef CONFIG_BTRFS_DEBUG -void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group) +static void fragment_free_space(struct btrfs_block_group_cache *block_group) { struct btrfs_fs_info *fs_info = block_group->fs_info; u64 start = block_group->key.objectid; @@ -661,7 +661,7 @@ static noinline void caching_thread(struct btrfs_work *work) block_group->space_info->bytes_used += bytes_used >> 1; spin_unlock(&block_group->lock); spin_unlock(&block_group->space_info->lock); - btrfs_fragment_free_space(block_group); + fragment_free_space(block_group); } #endif @@ -768,7 +768,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, cache->space_info->bytes_used += bytes_used >> 1; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - btrfs_fragment_free_space(cache); + fragment_free_space(cache); } #endif mutex_unlock(&caching_ctl->mutex); @@ -1180,7 +1180,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( * data in this block group. That check should be done by relocation routine, * not this function. */ -int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) +static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) { struct btrfs_space_info *sinfo = cache->space_info; u64 num_bytes; @@ -1296,7 +1296,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) spin_unlock(&block_group->lock); /* We don't want to force the issue, only flip if it's ok. */ - ret = __btrfs_inc_block_group_ro(block_group, 0); + ret = inc_block_group_ro(block_group, 0); up_write(&space_info->groups_sem); if (ret < 0) { ret = 0; @@ -1822,7 +1822,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) set_avail_alloc_bits(info, cache->flags); if (btrfs_chunk_readonly(info, cache->key.objectid)) { - __btrfs_inc_block_group_ro(cache, 1); + inc_block_group_ro(cache, 1); } else if (btrfs_block_group_used(&cache->item) == 0) { ASSERT(list_empty(&cache->bg_list)); btrfs_mark_bg_unused(cache); @@ -1843,11 +1843,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) list_for_each_entry(cache, &space_info->block_groups[BTRFS_RAID_RAID0], list) - __btrfs_inc_block_group_ro(cache, 1); + inc_block_group_ro(cache, 1); list_for_each_entry(cache, &space_info->block_groups[BTRFS_RAID_SINGLE], list) - __btrfs_inc_block_group_ro(cache, 1); + inc_block_group_ro(cache, 1); } btrfs_init_global_block_rsv(info); @@ -1936,7 +1936,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 new_bytes_used = size - bytes_used; bytes_used += new_bytes_used >> 1; - btrfs_fragment_free_space(cache); + fragment_free_space(cache); } #endif /* @@ -1982,7 +1982,7 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) * if restripe for this chunk_type is on pick target profile and * return, otherwise do the usual balance */ - stripped = btrfs_get_restripe_target(fs_info, flags); + stripped = get_restripe_target(fs_info, flags); if (stripped) return extended_to_chunk(stripped); @@ -2070,14 +2070,14 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) goto out; } - ret = __btrfs_inc_block_group_ro(cache, 0); + ret = inc_block_group_ro(cache, 0); if (!ret) goto out; alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); if (ret < 0) goto out; - ret = __btrfs_inc_block_group_ro(cache, 0); + ret = inc_block_group_ro(cache, 0); out: if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { alloc_flags = update_block_group_flags(fs_info, cache->flags); diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 55e68a8d2c44..5c6e2fb23e35 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -166,7 +166,6 @@ static inline int btrfs_should_fragment_free_space( (btrfs_test_opt(fs_info, FRAGMENT_DATA) && block_group->flags & BTRFS_BLOCK_GROUP_DATA); } -void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group); #endif struct btrfs_block_group_cache *btrfs_lookup_first_block_group( @@ -246,7 +245,4 @@ static inline int btrfs_block_group_cache_done( cache->cached == BTRFS_CACHE_ERROR; } -int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force); -u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags); - #endif /* BTRFS_BLOCK_GROUP_H */ From 844245b4548499efad26e33e408a459b1fe3a346 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 1 Aug 2019 18:19:33 -0400 Subject: [PATCH 077/138] btrfs: add a flush step for delayed iputs Delayed iputs could very well free up enough space without needing to commit the transaction, so make this step it's own step. This will allow us to skip the step for evictions in a later patch. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 3 ++- fs/btrfs/space-info.c | 5 +++-- include/trace/events/btrfs.h | 1 + 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 85b808e3ea42..4ad4715a7941 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2548,7 +2548,8 @@ enum btrfs_flush_state { FLUSH_DELALLOC_WAIT = 6, ALLOC_CHUNK = 7, ALLOC_CHUNK_FORCE = 8, - COMMIT_TRANS = 9, + RUN_DELAYED_IPUTS = 9, + COMMIT_TRANS = 10, }; int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index e9406b2133d1..1f4e97070f33 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -664,7 +664,7 @@ static void flush_space(struct btrfs_fs_info *fs_info, if (ret > 0 || ret == -ENOSPC) ret = 0; break; - case COMMIT_TRANS: + case RUN_DELAYED_IPUTS: /* * If we have pending delayed iputs then we could free up a * bunch of pinned space, so make sure we run the iputs before @@ -672,7 +672,8 @@ static void flush_space(struct btrfs_fs_info *fs_info, */ btrfs_run_delayed_iputs(fs_info); btrfs_wait_on_delayed_iputs(fs_info); - + break; + case COMMIT_TRANS: ret = may_commit_transaction(fs_info, space_info); break; default: diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 5cb95646b94e..5df604de4f11 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1088,6 +1088,7 @@ TRACE_EVENT(btrfs_trigger_flush, { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \ { ALLOC_CHUNK, "ALLOC_CHUNK"}, \ { ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE"}, \ + { RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS"}, \ { COMMIT_TRANS, "COMMIT_TRANS"}) TRACE_EVENT(btrfs_flush_space, From 374bf9c5cd7d0b5c270cb954aaf18d794d4b088c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 1 Aug 2019 18:19:34 -0400 Subject: [PATCH 078/138] btrfs: unify error handling for ticket flushing Currently we handle the cleanup of errored out tickets in both the priority flush path and the normal flushing path. This is the same code in both places, so just refactor so we don't duplicate the cleanup work. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 32 +++++++++++--------------------- 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 1f4e97070f33..bf2fde3fe782 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -878,20 +878,19 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, } while (flush_state < ARRAY_SIZE(priority_flush_states)); } -static int wait_reserve_ticket(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - struct reserve_ticket *ticket) +static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info, + struct reserve_ticket *ticket) { DEFINE_WAIT(wait); - u64 reclaim_bytes = 0; int ret = 0; spin_lock(&space_info->lock); while (ticket->bytes > 0 && ticket->error == 0) { ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); if (ret) { - ret = -EINTR; + ticket->error = -EINTR; break; } spin_unlock(&space_info->lock); @@ -901,18 +900,7 @@ static int wait_reserve_ticket(struct btrfs_fs_info *fs_info, finish_wait(&ticket->wait, &wait); spin_lock(&space_info->lock); } - if (!ret) - ret = ticket->error; - if (!list_empty(&ticket->list)) - list_del_init(&ticket->list); - if (ticket->bytes && ticket->bytes < ticket->orig_bytes) - reclaim_bytes = ticket->orig_bytes - ticket->bytes; spin_unlock(&space_info->lock); - - if (reclaim_bytes) - btrfs_space_info_add_old_bytes(fs_info, space_info, - reclaim_bytes); - return ret; } /** @@ -1010,16 +998,18 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, return ret; if (flush == BTRFS_RESERVE_FLUSH_ALL) - return wait_reserve_ticket(fs_info, space_info, &ticket); + wait_reserve_ticket(fs_info, space_info, &ticket); + else + priority_reclaim_metadata_space(fs_info, space_info, &ticket); - ret = 0; - priority_reclaim_metadata_space(fs_info, space_info, &ticket); spin_lock(&space_info->lock); - if (ticket.bytes) { + ret = ticket.error; + if (ticket.bytes || ticket.error) { if (ticket.bytes < orig_bytes) reclaim_bytes = orig_bytes - ticket.bytes; list_del_init(&ticket.list); - ret = -ENOSPC; + if (!ret) + ret = -ENOSPC; } spin_unlock(&space_info->lock); From 03235279b4defc85e0e593824b27b5cf814b2fa0 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 1 Aug 2019 18:19:35 -0400 Subject: [PATCH 079/138] btrfs: factor out the ticket flush handling We're going to make this logic a little more complicated for evict, so factor the ticket flushing/waiting code out of __reserve_metadata_bytes. This has no functional change. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 64 ++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index bf2fde3fe782..8e00f53601ff 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -903,6 +903,47 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, spin_unlock(&space_info->lock); } +/** + * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket + * @fs_info - the fs + * @space_info - the space_info for the reservation + * @ticket - the ticket for the reservation + * @flush - how much we can flush + * + * This does the work of figuring out how to flush for the ticket, waiting for + * the reservation, and returning the appropriate error if there is one. + */ +static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info, + struct reserve_ticket *ticket, + enum btrfs_reserve_flush_enum flush) +{ + u64 reclaim_bytes = 0; + int ret; + + if (flush == BTRFS_RESERVE_FLUSH_ALL) + wait_reserve_ticket(fs_info, space_info, ticket); + else + priority_reclaim_metadata_space(fs_info, space_info, ticket); + + spin_lock(&space_info->lock); + ret = ticket->error; + if (ticket->bytes || ticket->error) { + if (ticket->bytes < ticket->orig_bytes) + reclaim_bytes = ticket->orig_bytes - ticket->bytes; + list_del_init(&ticket->list); + if (!ret) + ret = -ENOSPC; + } + spin_unlock(&space_info->lock); + + if (reclaim_bytes) + btrfs_space_info_add_old_bytes(fs_info, space_info, + reclaim_bytes); + ASSERT(list_empty(&ticket->list)); + return ret; +} + /** * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space * @root - the root we're allocating for @@ -925,7 +966,6 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, { struct reserve_ticket ticket; u64 used; - u64 reclaim_bytes = 0; int ret = 0; ASSERT(orig_bytes); @@ -997,27 +1037,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, if (!ret || flush == BTRFS_RESERVE_NO_FLUSH) return ret; - if (flush == BTRFS_RESERVE_FLUSH_ALL) - wait_reserve_ticket(fs_info, space_info, &ticket); - else - priority_reclaim_metadata_space(fs_info, space_info, &ticket); - - spin_lock(&space_info->lock); - ret = ticket.error; - if (ticket.bytes || ticket.error) { - if (ticket.bytes < orig_bytes) - reclaim_bytes = orig_bytes - ticket.bytes; - list_del_init(&ticket.list); - if (!ret) - ret = -ENOSPC; - } - spin_unlock(&space_info->lock); - - if (reclaim_bytes) - btrfs_space_info_add_old_bytes(fs_info, space_info, - reclaim_bytes); - ASSERT(list_empty(&ticket.list)); - return ret; + return handle_reserve_ticket(fs_info, space_info, &ticket, flush); } /** From 9ce2f423b9463f975720782b3838640da313ecb4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 1 Aug 2019 18:19:36 -0400 Subject: [PATCH 080/138] btrfs: refactor priority_reclaim_metadata_space With the eviction flushing stuff we'll want to allow for different states, but still work basically the same way that priority_reclaim_metadata_space works currently. Refactor this to take the flushing states and size as an argument so we can use the same logic for limit flushing and eviction flushing. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 8e00f53601ff..37ec31199675 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -849,8 +849,10 @@ static const enum btrfs_flush_state priority_flush_states[] = { }; static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - struct reserve_ticket *ticket) + struct btrfs_space_info *space_info, + struct reserve_ticket *ticket, + const enum btrfs_flush_state *states, + int states_nr) { u64 to_reclaim; int flush_state; @@ -866,8 +868,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, flush_state = 0; do { - flush_space(fs_info, space_info, to_reclaim, - priority_flush_states[flush_state]); + flush_space(fs_info, space_info, to_reclaim, states[flush_state]); flush_state++; spin_lock(&space_info->lock); if (ticket->bytes == 0) { @@ -875,7 +876,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, return; } spin_unlock(&space_info->lock); - } while (flush_state < ARRAY_SIZE(priority_flush_states)); + } while (flush_state < states_nr); } static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, @@ -924,7 +925,9 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, if (flush == BTRFS_RESERVE_FLUSH_ALL) wait_reserve_ticket(fs_info, space_info, ticket); else - priority_reclaim_metadata_space(fs_info, space_info, ticket); + priority_reclaim_metadata_space(fs_info, space_info, ticket, + priority_flush_states, + ARRAY_SIZE(priority_flush_states)); spin_lock(&space_info->lock); ret = ticket->error; From d3984c90414a36af581b3b7c0daa87f9de3c0533 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 1 Aug 2019 18:19:37 -0400 Subject: [PATCH 081/138] btrfs: introduce an evict flushing state We have this weird space flushing loop inside inode.c for evict where we'll do the normal LIMIT flush, and then commit the transaction and hope we get our space. This is super janky, and in fact there's really nothing stopping us from using FLUSH_ALL except that we run delayed iputs, which means we could deadlock. So introduce a new flush state for eviction that does the normal priority flushing with all of the states that are safe for eviction. The nice side-effect of this is that we'll try harder for evictions. Previously if (for example generic/269) you had a bunch of other operations happening on the fs you could race with those reservations when committing the transaction, and eventually miss getting a reservation for the evict. With this code we'll have our ticket in place through the transaction commit, so any pinned bytes will go to our pending evictions first. Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/inode.c | 83 +++++++++++++++++++------------------------ fs/btrfs/space-info.c | 27 ++++++++++++-- 3 files changed, 63 insertions(+), 48 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4ad4715a7941..b161224b5a0b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2536,6 +2536,7 @@ enum btrfs_reserve_flush_enum { * case, use FLUSH LIMIT */ BTRFS_RESERVE_FLUSH_LIMIT, + BTRFS_RESERVE_FLUSH_EVICT, BTRFS_RESERVE_FLUSH_ALL, }; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 612c25aac15c..c4116bc58827 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5336,59 +5336,50 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; + struct btrfs_trans_handle *trans; u64 delayed_refs_extra = btrfs_calc_trans_metadata_size(fs_info, 1); - int failures = 0; - - for (;;) { - struct btrfs_trans_handle *trans; - int ret; - - ret = btrfs_block_rsv_refill(root, rsv, - rsv->size + delayed_refs_extra, - BTRFS_RESERVE_FLUSH_LIMIT); - - if (ret && ++failures > 2) { - btrfs_warn(fs_info, - "could not allocate space for a delete; will truncate on mount"); - return ERR_PTR(-ENOSPC); - } - - /* - * Evict can generate a large amount of delayed refs without - * having a way to add space back since we exhaust our temporary - * block rsv. We aren't allowed to do FLUSH_ALL in this case - * because we could deadlock with so many things in the flushing - * code, so we have to try and hold some extra space to - * compensate for our delayed ref generation. If we can't get - * that space then we need see if we can steal our minimum from - * the global reserve. We will be ratelimited by the amount of - * space we have for the delayed refs rsv, so we'll end up - * committing and trying again. - */ - trans = btrfs_join_transaction(root); - if (IS_ERR(trans) || !ret) { - if (!IS_ERR(trans)) { - trans->block_rsv = &fs_info->trans_block_rsv; - trans->bytes_reserved = delayed_refs_extra; - btrfs_block_rsv_migrate(rsv, trans->block_rsv, - delayed_refs_extra, 1); - } - return trans; - } + int ret; + /* + * Eviction should be taking place at some place safe because of our + * delayed iputs. However the normal flushing code will run delayed + * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock. + * + * We reserve the delayed_refs_extra here again because we can't use + * btrfs_start_transaction(root, 0) for the same deadlocky reason as + * above. We reserve our extra bit here because we generate a ton of + * delayed refs activity by truncating. + * + * If we cannot make our reservation we'll attempt to steal from the + * global reserve, because we really want to be able to free up space. + */ + ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra, + BTRFS_RESERVE_FLUSH_EVICT); + if (ret) { /* * Try to steal from the global reserve if there is space for * it. */ - if (!btrfs_check_space_for_delayed_refs(fs_info) && - !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) - return trans; - - /* If not, commit and try again. */ - ret = btrfs_commit_transaction(trans); - if (ret) - return ERR_PTR(ret); + if (btrfs_check_space_for_delayed_refs(fs_info) || + btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) { + btrfs_warn(fs_info, + "could not allocate space for delete; will truncate on mount"); + return ERR_PTR(-ENOSPC); + } + delayed_refs_extra = 0; } + + trans = btrfs_join_transaction(root); + if (IS_ERR(trans)) + return trans; + + if (delayed_refs_extra) { + trans->block_rsv = &fs_info->trans_block_rsv; + trans->bytes_reserved = delayed_refs_extra; + btrfs_block_rsv_migrate(rsv, trans->block_rsv, + delayed_refs_extra, 1); + } + return trans; } void btrfs_evict_inode(struct inode *inode) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 37ec31199675..5f8f65599de1 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -848,6 +848,17 @@ static const enum btrfs_flush_state priority_flush_states[] = { ALLOC_CHUNK, }; +static const enum btrfs_flush_state evict_flush_states[] = { + FLUSH_DELAYED_ITEMS_NR, + FLUSH_DELAYED_ITEMS, + FLUSH_DELAYED_REFS_NR, + FLUSH_DELAYED_REFS, + FLUSH_DELALLOC, + FLUSH_DELALLOC_WAIT, + ALLOC_CHUNK, + COMMIT_TRANS, +}; + static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, struct reserve_ticket *ticket, @@ -922,12 +933,24 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, u64 reclaim_bytes = 0; int ret; - if (flush == BTRFS_RESERVE_FLUSH_ALL) + switch (flush) { + case BTRFS_RESERVE_FLUSH_ALL: wait_reserve_ticket(fs_info, space_info, ticket); - else + break; + case BTRFS_RESERVE_FLUSH_LIMIT: priority_reclaim_metadata_space(fs_info, space_info, ticket, priority_flush_states, ARRAY_SIZE(priority_flush_states)); + break; + case BTRFS_RESERVE_FLUSH_EVICT: + priority_reclaim_metadata_space(fs_info, space_info, ticket, + evict_flush_states, + ARRAY_SIZE(evict_flush_states)); + break; + default: + ASSERT(0); + break; + } spin_lock(&space_info->lock); ret = ticket->error; From 27e022a9c6fe97dd80e31c038328d4f79b2080c2 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Thu, 8 Aug 2019 12:32:44 +0800 Subject: [PATCH 082/138] btrfs: replace: BTRFS_DEV_REPLACE_ITEM_STATE_x defines should go The BTRFS_DEV_REPLACE_ITEM_STATE_x defines, as shown in [1], are unused in both kernel and btrfs-progs (except for one instance of BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED in kernel). [1] btrfs.h:#define BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED 2 btrfs.h:#define BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED 3 btrfs.h:#define BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED 4 Further these define-values are different form its counterpart BTRFS_IOCTL_DEV_REPLACE_STATE_x series as shown in [2]. [2] btrfs_tree.h:#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 btrfs_tree.h:#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 btrfs_tree.h:#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 So this patch deletes the BTRFS_DEV_REPLACE_ITEM_STATE_x altogether, and one instance of BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED is replaced with BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED in the kernel. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 2 +- include/uapi/linux/btrfs_tree.h | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 6b2e9aa83ffa..00ea828beb00 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -56,7 +56,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info) no_valid_dev_replace_entry_found: ret = 0; dev_replace->replace_state = - BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED; + BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; dev_replace->cont_reading_from_srcdev_mode = BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS; dev_replace->time_started = 0; diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 34d5b34286fa..71246c1941aa 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -806,11 +806,6 @@ struct btrfs_dev_stats_item { #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1 -#define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0 -#define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1 -#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 -#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 -#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 struct btrfs_dev_replace_item { /* From efad8a853ad2057f96664328a0d327a05ce39c76 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 12 Aug 2019 19:14:29 +0100 Subject: [PATCH 083/138] Btrfs: fix use-after-free when using the tree modification log At ctree.c:get_old_root(), we are accessing a root's header owner field after we have freed the respective extent buffer. This results in an use-after-free that can lead to crashes, and when CONFIG_DEBUG_PAGEALLOC is set, results in a stack trace like the following: [ 3876.799331] stack segment: 0000 [#1] SMP DEBUG_PAGEALLOC PTI [ 3876.799363] CPU: 0 PID: 15436 Comm: pool Not tainted 5.3.0-rc3-btrfs-next-54 #1 [ 3876.799385] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-0-ga698c8995f-prebuilt.qemu.org 04/01/2014 [ 3876.799433] RIP: 0010:btrfs_search_old_slot+0x652/0xd80 [btrfs] (...) [ 3876.799502] RSP: 0018:ffff9f08c1a2f9f0 EFLAGS: 00010286 [ 3876.799518] RAX: ffff8dd300000000 RBX: ffff8dd85a7a9348 RCX: 000000038da26000 [ 3876.799538] RDX: 0000000000000000 RSI: ffffe522ce368980 RDI: 0000000000000246 [ 3876.799559] RBP: dae1922adadad000 R08: 0000000008020000 R09: ffffe522c0000000 [ 3876.799579] R10: ffff8dd57fd788c8 R11: 000000007511b030 R12: ffff8dd781ddc000 [ 3876.799599] R13: ffff8dd9e6240578 R14: ffff8dd6896f7a88 R15: ffff8dd688cf90b8 [ 3876.799620] FS: 00007f23ddd97700(0000) GS:ffff8dda20200000(0000) knlGS:0000000000000000 [ 3876.799643] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 3876.799660] CR2: 00007f23d4024000 CR3: 0000000710bb0005 CR4: 00000000003606f0 [ 3876.799682] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 3876.799703] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 3876.799723] Call Trace: [ 3876.799735] ? do_raw_spin_unlock+0x49/0xc0 [ 3876.799749] ? _raw_spin_unlock+0x24/0x30 [ 3876.799779] resolve_indirect_refs+0x1eb/0xc80 [btrfs] [ 3876.799810] find_parent_nodes+0x38d/0x1180 [btrfs] [ 3876.799841] btrfs_check_shared+0x11a/0x1d0 [btrfs] [ 3876.799870] ? extent_fiemap+0x598/0x6e0 [btrfs] [ 3876.799895] extent_fiemap+0x598/0x6e0 [btrfs] [ 3876.799913] do_vfs_ioctl+0x45a/0x700 [ 3876.799926] ksys_ioctl+0x70/0x80 [ 3876.799938] ? trace_hardirqs_off_thunk+0x1a/0x20 [ 3876.799953] __x64_sys_ioctl+0x16/0x20 [ 3876.799965] do_syscall_64+0x62/0x220 [ 3876.799977] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 3876.799993] RIP: 0033:0x7f23e0013dd7 (...) [ 3876.800056] RSP: 002b:00007f23ddd96ca8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 3876.800078] RAX: ffffffffffffffda RBX: 00007f23d80210f8 RCX: 00007f23e0013dd7 [ 3876.800099] RDX: 00007f23d80210f8 RSI: 00000000c020660b RDI: 0000000000000003 [ 3876.800626] RBP: 000055fa2a2a2440 R08: 0000000000000000 R09: 00007f23ddd96d7c [ 3876.801143] R10: 00007f23d8022000 R11: 0000000000000246 R12: 00007f23ddd96d80 [ 3876.801662] R13: 00007f23ddd96d78 R14: 00007f23d80210f0 R15: 00007f23ddd96d80 (...) [ 3876.805107] ---[ end trace e53161e179ef04f9 ]--- Fix that by saving the root's header owner field into a local variable before freeing the root's extent buffer, and then use that local variable when needed. Fixes: 30b0463a9394d9 ("Btrfs: fix accessing the root pointer in tree mod log functions") CC: stable@vger.kernel.org # 3.10+ Reviewed-by: Nikolay Borisov Reviewed-by: Anand Jain Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index f8dc582db690..a2f3cd7a619c 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1343,6 +1343,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) struct tree_mod_elem *tm; struct extent_buffer *eb = NULL; struct extent_buffer *eb_root; + u64 eb_root_owner = 0; struct extent_buffer *old; struct tree_mod_root *old_root = NULL; u64 old_generation = 0; @@ -1380,6 +1381,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) free_extent_buffer(old); } } else if (old_root) { + eb_root_owner = btrfs_header_owner(eb_root); btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); eb = alloc_dummy_extent_buffer(fs_info, logical); @@ -1396,7 +1398,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); - btrfs_set_header_owner(eb, btrfs_header_owner(eb_root)); + btrfs_set_header_owner(eb, eb_root_owner); btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } From e18333a7cb97d838c09802092fcecd3269363ecb Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 9 Aug 2019 16:25:34 +0200 Subject: [PATCH 084/138] btrfs: define compression levels statically The maximum and default levels do not change and can be defined directly. The set_level callback was a temporary solution and will be removed. Reviewed-by: Nikolay Borisov Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/compression.h | 4 ++++ fs/btrfs/lzo.c | 2 ++ fs/btrfs/zlib.c | 2 ++ fs/btrfs/zstd.c | 2 ++ 4 files changed, 10 insertions(+) diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 2035b8eb1290..cffd689adb6e 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -162,6 +162,10 @@ struct btrfs_compress_op { * if the level is out of bounds or the default if 0 is passed in. */ unsigned int (*set_level)(unsigned int level); + + /* Maximum level supported by the compression algorithm */ + unsigned int max_level; + unsigned int default_level; }; /* The heuristic workspaces are managed via the 0th workspace manager */ diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 579d53ae256f..adac6cb30d65 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -523,4 +523,6 @@ const struct btrfs_compress_op btrfs_lzo_compress = { .decompress_bio = lzo_decompress_bio, .decompress = lzo_decompress, .set_level = lzo_set_level, + .max_level = 1, + .default_level = 1, }; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index b86b7ad6b900..03d6c3683bd9 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -437,4 +437,6 @@ const struct btrfs_compress_op btrfs_zlib_compress = { .decompress_bio = zlib_decompress_bio, .decompress = zlib_decompress, .set_level = zlib_set_level, + .max_level = 9, + .default_level = BTRFS_ZLIB_DEFAULT_LEVEL, }; diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 3837ca180d52..b2b23a6a497d 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -729,4 +729,6 @@ const struct btrfs_compress_op btrfs_zstd_compress = { .decompress_bio = zstd_decompress_bio, .decompress = zstd_decompress, .set_level = zstd_set_level, + .max_level = ZSTD_BTRFS_MAX_LEVEL, + .default_level = ZSTD_BTRFS_DEFAULT_LEVEL, }; From b0c1fe1eaf5eea10e8d577545298b6d5f9f7ff38 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 9 Aug 2019 16:49:06 +0200 Subject: [PATCH 085/138] btrfs: compression: replace set_level callbacks by a common helper The set_level callbacks do not do anything special and can be replaced by a helper that uses the levels defined in the tables. Reviewed-by: Nikolay Borisov Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/compression.c | 20 ++++++++++++++++++-- fs/btrfs/compression.h | 9 ++------- fs/btrfs/lzo.c | 6 ------ fs/btrfs/zlib.c | 9 --------- fs/btrfs/zstd.c | 9 --------- 5 files changed, 20 insertions(+), 33 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 60c47b417a4b..fe7a8b5ff96c 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1039,7 +1039,7 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, struct list_head *workspace; int ret; - level = btrfs_compress_op[type]->set_level(level); + level = btrfs_compress_set_level(type, level); workspace = get_workspace(type, level); ret = btrfs_compress_op[type]->compress_pages(workspace, mapping, start, pages, @@ -1611,7 +1611,23 @@ unsigned int btrfs_compress_str2level(unsigned int type, const char *str) level = 0; } - level = btrfs_compress_op[type]->set_level(level); + level = btrfs_compress_set_level(type, level); + + return level; +} + +/* + * Adjust @level according to the limits of the compression algorithm or + * fallback to default + */ +unsigned int btrfs_compress_set_level(int type, unsigned level) +{ + const struct btrfs_compress_op *ops = btrfs_compress_op[type]; + + if (level == 0) + level = ops->default_level; + else + level = min(level, ops->max_level); return level; } diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index cffd689adb6e..4cb8be9ff88b 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -156,13 +156,6 @@ struct btrfs_compress_op { unsigned long start_byte, size_t srclen, size_t destlen); - /* - * This bounds the level set by the user to be within range of a - * particular compression type. It returns the level that will be used - * if the level is out of bounds or the default if 0 is passed in. - */ - unsigned int (*set_level)(unsigned int level); - /* Maximum level supported by the compression algorithm */ unsigned int max_level; unsigned int default_level; @@ -179,6 +172,8 @@ extern const struct btrfs_compress_op btrfs_zstd_compress; const char* btrfs_compress_type2str(enum btrfs_compression_type type); bool btrfs_compress_is_valid_type(const char *str, size_t len); +unsigned int btrfs_compress_set_level(int type, unsigned level); + int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end); #endif diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index adac6cb30d65..acad4174f68d 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -507,11 +507,6 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in, return ret; } -static unsigned int lzo_set_level(unsigned int level) -{ - return 0; -} - const struct btrfs_compress_op btrfs_lzo_compress = { .init_workspace_manager = lzo_init_workspace_manager, .cleanup_workspace_manager = lzo_cleanup_workspace_manager, @@ -522,7 +517,6 @@ const struct btrfs_compress_op btrfs_lzo_compress = { .compress_pages = lzo_compress_pages, .decompress_bio = lzo_decompress_bio, .decompress = lzo_decompress, - .set_level = lzo_set_level, .max_level = 1, .default_level = 1, }; diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 03d6c3683bd9..df1aace5df50 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -418,14 +418,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, return ret; } -static unsigned int zlib_set_level(unsigned int level) -{ - if (!level) - return BTRFS_ZLIB_DEFAULT_LEVEL; - - return min_t(unsigned int, level, 9); -} - const struct btrfs_compress_op btrfs_zlib_compress = { .init_workspace_manager = zlib_init_workspace_manager, .cleanup_workspace_manager = zlib_cleanup_workspace_manager, @@ -436,7 +428,6 @@ const struct btrfs_compress_op btrfs_zlib_compress = { .compress_pages = zlib_compress_pages, .decompress_bio = zlib_decompress_bio, .decompress = zlib_decompress, - .set_level = zlib_set_level, .max_level = 9, .default_level = BTRFS_ZLIB_DEFAULT_LEVEL, }; diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index b2b23a6a497d..0af4a5cd4313 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -710,14 +710,6 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in, return ret; } -static unsigned int zstd_set_level(unsigned int level) -{ - if (!level) - return ZSTD_BTRFS_DEFAULT_LEVEL; - - return min_t(unsigned int, level, ZSTD_BTRFS_MAX_LEVEL); -} - const struct btrfs_compress_op btrfs_zstd_compress = { .init_workspace_manager = zstd_init_workspace_manager, .cleanup_workspace_manager = zstd_cleanup_workspace_manager, @@ -728,7 +720,6 @@ const struct btrfs_compress_op btrfs_zstd_compress = { .compress_pages = zstd_compress_pages, .decompress_bio = zstd_decompress_bio, .decompress = zstd_decompress, - .set_level = zstd_set_level, .max_level = ZSTD_BTRFS_MAX_LEVEL, .default_level = ZSTD_BTRFS_DEFAULT_LEVEL, }; From 4f84bd7f99fb67475e17e1cdca412e5d14bd3f45 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 21 Aug 2019 16:38:15 +0300 Subject: [PATCH 086/138] btrfs: Make reada_tree_block_flagged private This function is used only for the readahead machinery. It makes no sense to keep it external to reada.c file. Place it above its sole caller and make it static. No functional changes. Reviewed-by: Johannes Thumshirn Signed-off-by: Nikolay Borisov Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 29 ----------------------------- fs/btrfs/disk-io.h | 2 -- fs/btrfs/reada.c | 29 +++++++++++++++++++++++++++++ 3 files changed, 29 insertions(+), 31 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 589405eeb80f..99dfd889b9f7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1038,35 +1038,6 @@ void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr) free_extent_buffer(buf); } -int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, - int mirror_num, struct extent_buffer **eb) -{ - struct extent_buffer *buf = NULL; - int ret; - - buf = btrfs_find_create_tree_block(fs_info, bytenr); - if (IS_ERR(buf)) - return 0; - - set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); - - ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num); - if (ret) { - free_extent_buffer_stale(buf); - return ret; - } - - if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { - free_extent_buffer_stale(buf); - return -EIO; - } else if (extent_buffer_uptodate(buf)) { - *eb = buf; - } else { - free_extent_buffer(buf); - } - return 0; -} - struct extent_buffer *btrfs_find_create_tree_block( struct btrfs_fs_info *fs_info, u64 bytenr) diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index e80f7c45a307..a6958103d87e 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -45,8 +45,6 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, u64 parent_transid, int level, struct btrfs_key *first_key); void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr); -int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, - int mirror_num, struct extent_buffer **eb); struct extent_buffer *btrfs_find_create_tree_block( struct btrfs_fs_info *fs_info, u64 bytenr); diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 0b034c494355..ee6f60547a8d 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -639,6 +639,35 @@ static int reada_pick_zone(struct btrfs_device *dev) return 1; } +static int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, + int mirror_num, struct extent_buffer **eb) +{ + struct extent_buffer *buf = NULL; + int ret; + + buf = btrfs_find_create_tree_block(fs_info, bytenr); + if (IS_ERR(buf)) + return 0; + + set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); + + ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num); + if (ret) { + free_extent_buffer_stale(buf); + return ret; + } + + if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { + free_extent_buffer_stale(buf); + return -EIO; + } else if (extent_buffer_uptodate(buf)) { + *eb = buf; + } else { + free_extent_buffer(buf); + } + return 0; +} + static int reada_start_machine_dev(struct btrfs_device *dev) { struct btrfs_fs_info *fs_info = dev->fs_info; From 440630ea7c37d0f785aca4122ac9272d01ee36db Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Wed, 21 Aug 2019 17:26:33 +0800 Subject: [PATCH 087/138] btrfs: dev stats item key conversion per cpu type is not needed %found_key is not used, drop it since it hasn't been used since the beginning in 733f4fbbc108 ("Btrfs: read device stats on mount, write modified ones during commit"). Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index fa6eb9e0ba89..539071bdbf4f 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7279,7 +7279,6 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) { struct btrfs_key key; - struct btrfs_key found_key; struct btrfs_root *dev_root = fs_info->dev_root; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct extent_buffer *eb; @@ -7313,7 +7312,6 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) } slot = path->slots[0]; eb = path->nodes[0]; - btrfs_item_key_to_cpu(eb, &found_key, slot); item_size = btrfs_item_size_nr(eb, slot); ptr = btrfs_item_ptr(eb, slot, From 3b80a984d2283e6f1dc59571e5aadc36eadac939 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Wed, 21 Aug 2019 17:26:32 +0800 Subject: [PATCH 088/138] btrfs: dev stat drop useless goto In the function btrfs_init_dev_stats() goto out is not needed, because the alloc has failed. So just return -ENOMEM. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 539071bdbf4f..48526923c17c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7289,10 +7289,8 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) int i; path = btrfs_alloc_path(); - if (!path) { - ret = -ENOMEM; - goto out; - } + if (!path) + return -ENOMEM; mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { @@ -7331,7 +7329,6 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) } mutex_unlock(&fs_devices->device_list_mutex); -out: btrfs_free_path(path); return ret < 0 ? ret : 0; } From f11369897ed4f8609cdee00d3af47c18fe6bda29 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 9 Aug 2019 17:07:39 +0300 Subject: [PATCH 089/138] btrfs: fix error pointer check in __btrfs_map_block() The btrfs_get_chunk_map() never returns NULL, it returns error pointers. Fixes: 89b798ad1b42 ("btrfs: Use btrfs_get_io_geometry appropriately") Reviewed-by: Nikolay Borisov Signed-off-by: Dan Carpenter Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 48526923c17c..e2de7c7b674a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6023,7 +6023,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, return ret; em = btrfs_get_chunk_map(fs_info, logical, *length); - ASSERT(em); + ASSERT(!IS_ERR(em)); map = em->map_lookup; *length = geom.len; From f82d1c7ca8ae1bf89e8d78c5ecb56b6b228c1a75 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 9 Aug 2019 09:24:22 +0800 Subject: [PATCH 090/138] btrfs: tree-checker: Add EXTENT_ITEM and METADATA_ITEM check This patch introduces the ability to check extent items. This check involves: - key->objectid check Basic alignment check. - key->type check Against btrfs_extent_item::type and SKINNY_METADATA feature. - key->offset alignment check for EXTENT_ITEM - key->offset check for METADATA_ITEM - item size check Both against minimal size and stepping check. - btrfs_extent_item check Checks its flags and generation. - btrfs_extent_inline_ref checks Against 4 types inline ref. Checks bytenr alignment and tree level. - btrfs_extent_item::refs check Check against total refs found in inline refs. This check would be the most complex single item check due to its nature of inlined items. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 248 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index d83adda6c090..7eee0bbd8c37 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -910,6 +910,250 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, return 0; } +__printf(3,4) +__cold +static void extent_err(const struct extent_buffer *eb, int slot, + const char *fmt, ...) +{ + struct btrfs_key key; + struct va_format vaf; + va_list args; + u64 bytenr; + u64 len; + + btrfs_item_key_to_cpu(eb, &key, slot); + bytenr = key.objectid; + if (key.type == BTRFS_METADATA_ITEM_KEY) + len = eb->fs_info->nodesize; + else + len = key.offset; + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + btrfs_crit(eb->fs_info, + "corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV", + btrfs_header_level(eb) == 0 ? "leaf" : "node", + eb->start, slot, bytenr, len, &vaf); + va_end(args); +} + +static int check_extent_item(struct extent_buffer *leaf, + struct btrfs_key *key, int slot) +{ + struct btrfs_fs_info *fs_info = leaf->fs_info; + struct btrfs_extent_item *ei; + bool is_tree_block = false; + unsigned long ptr; /* Current pointer inside inline refs */ + unsigned long end; /* Extent item end */ + const u32 item_size = btrfs_item_size_nr(leaf, slot); + u64 flags; + u64 generation; + u64 total_refs; /* Total refs in btrfs_extent_item */ + u64 inline_refs = 0; /* found total inline refs */ + + if (key->type == BTRFS_METADATA_ITEM_KEY && + !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { + generic_err(leaf, slot, +"invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled"); + return -EUCLEAN; + } + /* key->objectid is the bytenr for both key types */ + if (!IS_ALIGNED(key->objectid, fs_info->sectorsize)) { + generic_err(leaf, slot, + "invalid key objectid, have %llu expect to be aligned to %u", + key->objectid, fs_info->sectorsize); + return -EUCLEAN; + } + + /* key->offset is tree level for METADATA_ITEM_KEY */ + if (key->type == BTRFS_METADATA_ITEM_KEY && + key->offset >= BTRFS_MAX_LEVEL) { + extent_err(leaf, slot, + "invalid tree level, have %llu expect [0, %u]", + key->offset, BTRFS_MAX_LEVEL - 1); + return -EUCLEAN; + } + + /* + * EXTENT/METADATA_ITEM consists of: + * 1) One btrfs_extent_item + * Records the total refs, type and generation of the extent. + * + * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only) + * Records the first key and level of the tree block. + * + * 2) Zero or more btrfs_extent_inline_ref(s) + * Each inline ref has one btrfs_extent_inline_ref shows: + * 2.1) The ref type, one of the 4 + * TREE_BLOCK_REF Tree block only + * SHARED_BLOCK_REF Tree block only + * EXTENT_DATA_REF Data only + * SHARED_DATA_REF Data only + * 2.2) Ref type specific data + * Either using btrfs_extent_inline_ref::offset, or specific + * data structure. + */ + if (item_size < sizeof(*ei)) { + extent_err(leaf, slot, + "invalid item size, have %u expect [%zu, %u)", + item_size, sizeof(*ei), + BTRFS_LEAF_DATA_SIZE(fs_info)); + return -EUCLEAN; + } + end = item_size + btrfs_item_ptr_offset(leaf, slot); + + /* Checks against extent_item */ + ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); + flags = btrfs_extent_flags(leaf, ei); + total_refs = btrfs_extent_refs(leaf, ei); + generation = btrfs_extent_generation(leaf, ei); + if (generation > btrfs_super_generation(fs_info->super_copy) + 1) { + extent_err(leaf, slot, + "invalid generation, have %llu expect (0, %llu]", + generation, + btrfs_super_generation(fs_info->super_copy) + 1); + return -EUCLEAN; + } + if (!is_power_of_2(flags & (BTRFS_EXTENT_FLAG_DATA | + BTRFS_EXTENT_FLAG_TREE_BLOCK))) { + extent_err(leaf, slot, + "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx", + flags, BTRFS_EXTENT_FLAG_DATA | + BTRFS_EXTENT_FLAG_TREE_BLOCK); + return -EUCLEAN; + } + is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK); + if (is_tree_block) { + if (key->type == BTRFS_EXTENT_ITEM_KEY && + key->offset != fs_info->nodesize) { + extent_err(leaf, slot, + "invalid extent length, have %llu expect %u", + key->offset, fs_info->nodesize); + return -EUCLEAN; + } + } else { + if (key->type != BTRFS_EXTENT_ITEM_KEY) { + extent_err(leaf, slot, + "invalid key type, have %u expect %u for data backref", + key->type, BTRFS_EXTENT_ITEM_KEY); + return -EUCLEAN; + } + if (!IS_ALIGNED(key->offset, fs_info->sectorsize)) { + extent_err(leaf, slot, + "invalid extent length, have %llu expect aligned to %u", + key->offset, fs_info->sectorsize); + return -EUCLEAN; + } + } + ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1); + + /* Check the special case of btrfs_tree_block_info */ + if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) { + struct btrfs_tree_block_info *info; + + info = (struct btrfs_tree_block_info *)ptr; + if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) { + extent_err(leaf, slot, + "invalid tree block info level, have %u expect [0, %u]", + btrfs_tree_block_level(leaf, info), + BTRFS_MAX_LEVEL - 1); + return -EUCLEAN; + } + ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1); + } + + /* Check inline refs */ + while (ptr < end) { + struct btrfs_extent_inline_ref *iref; + struct btrfs_extent_data_ref *dref; + struct btrfs_shared_data_ref *sref; + u64 dref_offset; + u64 inline_offset; + u8 inline_type; + + if (ptr + sizeof(*iref) > end) { + extent_err(leaf, slot, +"inline ref item overflows extent item, ptr %lu iref size %zu end %lu", + ptr, sizeof(*iref), end); + return -EUCLEAN; + } + iref = (struct btrfs_extent_inline_ref *)ptr; + inline_type = btrfs_extent_inline_ref_type(leaf, iref); + inline_offset = btrfs_extent_inline_ref_offset(leaf, iref); + if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) { + extent_err(leaf, slot, +"inline ref item overflows extent item, ptr %lu iref size %u end %lu", + ptr, inline_type, end); + return -EUCLEAN; + } + + switch (inline_type) { + /* inline_offset is subvolid of the owner, no need to check */ + case BTRFS_TREE_BLOCK_REF_KEY: + inline_refs++; + break; + /* Contains parent bytenr */ + case BTRFS_SHARED_BLOCK_REF_KEY: + if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) { + extent_err(leaf, slot, + "invalid tree parent bytenr, have %llu expect aligned to %u", + inline_offset, fs_info->sectorsize); + return -EUCLEAN; + } + inline_refs++; + break; + /* + * Contains owner subvolid, owner key objectid, adjusted offset. + * The only obvious corruption can happen in that offset. + */ + case BTRFS_EXTENT_DATA_REF_KEY: + dref = (struct btrfs_extent_data_ref *)(&iref->offset); + dref_offset = btrfs_extent_data_ref_offset(leaf, dref); + if (!IS_ALIGNED(dref_offset, fs_info->sectorsize)) { + extent_err(leaf, slot, + "invalid data ref offset, have %llu expect aligned to %u", + dref_offset, fs_info->sectorsize); + return -EUCLEAN; + } + inline_refs += btrfs_extent_data_ref_count(leaf, dref); + break; + /* Contains parent bytenr and ref count */ + case BTRFS_SHARED_DATA_REF_KEY: + sref = (struct btrfs_shared_data_ref *)(iref + 1); + if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) { + extent_err(leaf, slot, + "invalid data parent bytenr, have %llu expect aligned to %u", + inline_offset, fs_info->sectorsize); + return -EUCLEAN; + } + inline_refs += btrfs_shared_data_ref_count(leaf, sref); + break; + default: + extent_err(leaf, slot, "unknown inline ref type: %u", + inline_type); + return -EUCLEAN; + } + ptr += btrfs_extent_inline_ref_size(inline_type); + } + /* No padding is allowed */ + if (ptr != end) { + extent_err(leaf, slot, + "invalid extent item size, padding bytes found"); + return -EUCLEAN; + } + + /* Finally, check the inline refs against total refs */ + if (inline_refs > total_refs) { + extent_err(leaf, slot, + "invalid extent refs, have %llu expect >= inline %llu", + total_refs, inline_refs); + return -EUCLEAN; + } + return 0; +} + /* * Common point to switch the item-specific validation. */ @@ -948,6 +1192,10 @@ static int check_leaf_item(struct extent_buffer *leaf, case BTRFS_ROOT_ITEM_KEY: ret = check_root_item(leaf, key, slot); break; + case BTRFS_EXTENT_ITEM_KEY: + case BTRFS_METADATA_ITEM_KEY: + ret = check_extent_item(leaf, key, slot); + break; } return ret; } From e2406a6f13bdc7df43bf7e4e66cb06348daef618 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 9 Aug 2019 09:24:23 +0800 Subject: [PATCH 091/138] btrfs: tree-checker: Add simple keyed refs check For TREE_BLOCK_REF, SHARED_DATA_REF and SHARED_BLOCK_REF we need to check: | TREE_BLOCK_REF | SHARED_BLOCK_REF | SHARED_BLOCK_REF --------------+----------------+-----------------+------------------ key->objectid | Alignment | Alignment | Alignment key->offset | Any value | Alignment | Alignment item_size | 0 | 0 | sizeof(le32) (*) *: sizeof(struct btrfs_shared_data_ref) So introduce a check to check all these 3 key types together. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 7eee0bbd8c37..0a56616bef9a 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -923,7 +923,9 @@ static void extent_err(const struct extent_buffer *eb, int slot, btrfs_item_key_to_cpu(eb, &key, slot); bytenr = key.objectid; - if (key.type == BTRFS_METADATA_ITEM_KEY) + if (key.type == BTRFS_METADATA_ITEM_KEY || + key.type == BTRFS_TREE_BLOCK_REF_KEY || + key.type == BTRFS_SHARED_BLOCK_REF_KEY) len = eb->fs_info->nodesize; else len = key.offset; @@ -1154,6 +1156,37 @@ static int check_extent_item(struct extent_buffer *leaf, return 0; } +static int check_simple_keyed_refs(struct extent_buffer *leaf, + struct btrfs_key *key, int slot) +{ + u32 expect_item_size = 0; + + if (key->type == BTRFS_SHARED_DATA_REF_KEY) + expect_item_size = sizeof(struct btrfs_shared_data_ref); + + if (btrfs_item_size_nr(leaf, slot) != expect_item_size) { + generic_err(leaf, slot, + "invalid item size, have %u expect %u for key type %u", + btrfs_item_size_nr(leaf, slot), + expect_item_size, key->type); + return -EUCLEAN; + } + if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) { + generic_err(leaf, slot, +"invalid key objectid for shared block ref, have %llu expect aligned to %u", + key->objectid, leaf->fs_info->sectorsize); + return -EUCLEAN; + } + if (key->type != BTRFS_TREE_BLOCK_REF_KEY && + !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize)) { + extent_err(leaf, slot, + "invalid tree parent bytenr, have %llu expect aligned to %u", + key->offset, leaf->fs_info->sectorsize); + return -EUCLEAN; + } + return 0; +} + /* * Common point to switch the item-specific validation. */ @@ -1196,6 +1229,11 @@ static int check_leaf_item(struct extent_buffer *leaf, case BTRFS_METADATA_ITEM_KEY: ret = check_extent_item(leaf, key, slot); break; + case BTRFS_TREE_BLOCK_REF_KEY: + case BTRFS_SHARED_DATA_REF_KEY: + case BTRFS_SHARED_BLOCK_REF_KEY: + ret = check_simple_keyed_refs(leaf, key, slot); + break; } return ret; } From 0785a9aacf9de9cd6a89a628b3b5d6b7e9ce5316 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 9 Aug 2019 09:24:24 +0800 Subject: [PATCH 092/138] btrfs: tree-checker: Add EXTENT_DATA_REF check EXTENT_DATA_REF is a little like DIR_ITEM which contains hash in its key->offset. This patch will check the following contents: - Key->objectid Basic alignment check. - Hash Hash of each extent_data_ref item must match key->offset. - Offset Basic alignment check. Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/extent-tree.c | 2 +- fs/btrfs/tree-checker.c | 48 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b161224b5a0b..20793742c9d3 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2447,6 +2447,7 @@ enum btrfs_inline_ref_type { int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, struct btrfs_extent_inline_ref *iref, enum btrfs_inline_ref_type is_data); +u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset); u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd210550a349..5e8c6a0bac4b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -438,7 +438,7 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, return BTRFS_REF_TYPE_INVALID; } -static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) +u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) { u32 high_crc = ~(u32)0; u32 low_crc = ~(u32)0; diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 0a56616bef9a..9645389a1187 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1187,6 +1187,51 @@ static int check_simple_keyed_refs(struct extent_buffer *leaf, return 0; } +static int check_extent_data_ref(struct extent_buffer *leaf, + struct btrfs_key *key, int slot) +{ + struct btrfs_extent_data_ref *dref; + unsigned long ptr = btrfs_item_ptr_offset(leaf, slot); + const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot); + + if (btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0) { + generic_err(leaf, slot, + "invalid item size, have %u expect aligned to %zu for key type %u", + btrfs_item_size_nr(leaf, slot), + sizeof(*dref), key->type); + } + if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) { + generic_err(leaf, slot, +"invalid key objectid for shared block ref, have %llu expect aligned to %u", + key->objectid, leaf->fs_info->sectorsize); + return -EUCLEAN; + } + for (; ptr < end; ptr += sizeof(*dref)) { + u64 root_objectid; + u64 owner; + u64 offset; + u64 hash; + + dref = (struct btrfs_extent_data_ref *)ptr; + root_objectid = btrfs_extent_data_ref_root(leaf, dref); + owner = btrfs_extent_data_ref_objectid(leaf, dref); + offset = btrfs_extent_data_ref_offset(leaf, dref); + hash = hash_extent_data_ref(root_objectid, owner, offset); + if (hash != key->offset) { + extent_err(leaf, slot, + "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx", + hash, key->offset); + return -EUCLEAN; + } + if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) { + extent_err(leaf, slot, + "invalid extent data backref offset, have %llu expect aligned to %u", + offset, leaf->fs_info->sectorsize); + } + } + return 0; +} + /* * Common point to switch the item-specific validation. */ @@ -1234,6 +1279,9 @@ static int check_leaf_item(struct extent_buffer *leaf, case BTRFS_SHARED_BLOCK_REF_KEY: ret = check_simple_keyed_refs(leaf, key, slot); break; + case BTRFS_EXTENT_DATA_REF_KEY: + ret = check_extent_data_ref(leaf, key, slot); + break; } return ret; } From 2bd36e7b4fd60d4ff5f9ba6a0ad84557ae4803c4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:14:33 -0400 Subject: [PATCH 093/138] btrfs: rename the btrfs_calc_*_metadata_size helpers btrfs_calc_trunc_metadata_size differs from trans_metadata_size in that it doesn't take into account any splitting at the levels, because truncate will never split nodes. However truncate _and_ changing will never split nodes, so rename btrfs_calc_trunc_metadata_size to btrfs_calc_metadata_size. Also btrfs_calc_trans_metadata_size is purely for inserting items, so rename this to btrfs_calc_insert_metadata_size. Making these clearer will help when I start using them differently in upcoming patches. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 4 ++-- fs/btrfs/ctree.h | 14 +++++++++----- fs/btrfs/delalloc-space.c | 8 ++++---- fs/btrfs/delayed-inode.c | 4 ++-- fs/btrfs/delayed-ref.c | 8 ++++---- fs/btrfs/file.c | 4 ++-- fs/btrfs/free-space-cache.c | 4 ++-- fs/btrfs/inode-map.c | 2 +- fs/btrfs/inode.c | 6 +++--- fs/btrfs/props.c | 2 +- fs/btrfs/root-tree.c | 2 +- fs/btrfs/space-info.c | 2 +- fs/btrfs/transaction.c | 4 ++-- 13 files changed, 34 insertions(+), 30 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 55d6d1c36b62..c912ee26e85d 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -3015,8 +3015,8 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) num_devs = get_profile_num_devs(fs_info, type); /* num_devs device items to update and 1 chunk item to add or remove */ - thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) + - btrfs_calc_trans_metadata_size(fs_info, 1); + thresh = btrfs_calc_metadata_size(fs_info, num_devs) + + btrfs_calc_insert_metadata_size(fs_info, 1); if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 20793742c9d3..d27b39858339 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2451,17 +2451,21 @@ u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset); u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes); -static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info, - unsigned num_items) +/* + * Use this if we would be adding new items, as we could split nodes as we cow + * down the tree. + */ +static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info, + unsigned num_items) { return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; } /* - * Doing a truncate won't result in new nodes or leaves, just what we need for - * COW. + * Doing a truncate or a modification won't result in new nodes or leaves, just + * what we need for COW. */ -static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, +static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info, unsigned num_items) { return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index d2dfc201b2e1..62aa18b38a08 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -256,12 +256,12 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, lockdep_assert_held(&inode->lock); outstanding_extents = inode->outstanding_extents; if (outstanding_extents) - reserve_size = btrfs_calc_trans_metadata_size(fs_info, + reserve_size = btrfs_calc_insert_metadata_size(fs_info, outstanding_extents + 1); csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); - reserve_size += btrfs_calc_trans_metadata_size(fs_info, - csum_leaves); + reserve_size += btrfs_calc_insert_metadata_size(fs_info, + csum_leaves); /* * For qgroup rsv, the calculation is very simple: * account one nodesize for each outstanding extent @@ -284,7 +284,7 @@ static void calc_inode_reservations(struct btrfs_fs_info *fs_info, u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, num_bytes); /* We add one for the inode update at finish ordered time */ - *meta_reserve = btrfs_calc_trans_metadata_size(fs_info, + *meta_reserve = btrfs_calc_insert_metadata_size(fs_info, nr_extents + csum_leaves + 1); *qgroup_reserve = nr_extents * fs_info->nodesize; } diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 6858a05606dd..de87ea7ce84d 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -558,7 +558,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, src_rsv = trans->block_rsv; dst_rsv = &fs_info->delayed_block_rsv; - num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); + num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1); /* * Here we migrate space rsv from transaction rsv, since have already @@ -612,7 +612,7 @@ static int btrfs_delayed_inode_reserve_metadata( src_rsv = trans->block_rsv; dst_rsv = &fs_info->delayed_block_rsv; - num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); + num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1); /* * btrfs_dirty_inode will update the inode under btrfs_join_transaction diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 9a91d1eb0af4..951a60c740e7 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -79,7 +79,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans) void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr) { struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; - u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, nr); + u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr); u64 released = 0; released = __btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, @@ -105,8 +105,8 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans) if (!trans->delayed_ref_updates) return; - num_bytes = btrfs_calc_trans_metadata_size(fs_info, - trans->delayed_ref_updates); + num_bytes = btrfs_calc_insert_metadata_size(fs_info, + trans->delayed_ref_updates); spin_lock(&delayed_rsv->lock); delayed_rsv->size += num_bytes; delayed_rsv->full = 0; @@ -174,7 +174,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, enum btrfs_reserve_flush_enum flush) { struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; - u64 limit = btrfs_calc_trans_metadata_size(fs_info, 1); + u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1); u64 num_bytes = 0; int ret = -ENOSPC; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b31991f0f440..1cb694c96500 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2511,7 +2511,7 @@ int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, struct btrfs_trans_handle **trans_out) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1); + u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1); u64 ino_size = round_up(inode->i_size, fs_info->sectorsize); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans = NULL; @@ -2530,7 +2530,7 @@ int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, ret = -ENOMEM; goto out; } - rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1); + rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1); rsv->failfast = 1; /* diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index faaf57a7c289..265dc75f7a7a 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -211,8 +211,8 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, int ret; /* 1 for slack space, 1 for updating the inode */ - needed_bytes = btrfs_calc_trunc_metadata_size(fs_info, 1) + - btrfs_calc_trans_metadata_size(fs_info, 1); + needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) + + btrfs_calc_metadata_size(fs_info, 1); spin_lock(&rsv->lock); if (rsv->reserved < needed_bytes) diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 86031cdfc356..63cad7865d75 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -436,7 +436,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root, * 1 item for free space object * 3 items for pre-allocation */ - trans->bytes_reserved = btrfs_calc_trans_metadata_size(fs_info, 10); + trans->bytes_reserved = btrfs_calc_insert_metadata_size(fs_info, 10); ret = btrfs_block_rsv_add(root, trans->block_rsv, trans->bytes_reserved, BTRFS_RESERVE_NO_FLUSH); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c4116bc58827..31eacbf51d4e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5337,7 +5337,7 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root, struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; struct btrfs_trans_handle *trans; - u64 delayed_refs_extra = btrfs_calc_trans_metadata_size(fs_info, 1); + u64 delayed_refs_extra = btrfs_calc_insert_metadata_size(fs_info, 1); int ret; /* @@ -5426,7 +5426,7 @@ void btrfs_evict_inode(struct inode *inode) rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); if (!rsv) goto no_delete; - rsv->size = btrfs_calc_trunc_metadata_size(fs_info, 1); + rsv->size = btrfs_calc_metadata_size(fs_info, 1); rsv->failfast = 1; btrfs_i_size_write(BTRFS_I(inode), 0); @@ -9040,7 +9040,7 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback) int ret; struct btrfs_trans_handle *trans; u64 mask = fs_info->sectorsize - 1; - u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1); + u64 min_size = btrfs_calc_metadata_size(fs_info, 1); if (!skip_writeback) { ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index e0469816c678..1e664e0b59b8 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c @@ -362,7 +362,7 @@ static int inherit_props(struct btrfs_trans_handle *trans, * reservations if we do add more properties in the future. */ if (need_reserve) { - num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); + num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1); ret = btrfs_block_rsv_add(root, trans->block_rsv, num_bytes, BTRFS_RESERVE_NO_FLUSH); if (ret) diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 47733fb55df7..3b17b647d002 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -533,7 +533,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, return ret; } - num_bytes = btrfs_calc_trans_metadata_size(fs_info, items); + num_bytes = btrfs_calc_insert_metadata_size(fs_info, items); rsv->space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); ret = btrfs_block_rsv_add(root, rsv, num_bytes, diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 5f8f65599de1..13a4326c8821 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -412,7 +412,7 @@ static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info, u64 bytes; u64 nr; - bytes = btrfs_calc_trans_metadata_size(fs_info, 1); + bytes = btrfs_calc_insert_metadata_size(fs_info, 1); nr = div64_u64(to_reclaim, bytes); if (!nr) nr = 1; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2e3f6778bfa3..f21416d68c2c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -485,7 +485,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, * worth of delayed refs updates in this trans handle, and * refill that amount for whatever is missing in the reserve. */ - num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items); + num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); if (delayed_refs_rsv->full == 0) { delayed_refs_bytes = num_bytes; num_bytes <<= 1; @@ -636,7 +636,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( if (IS_ERR(trans)) return trans; - num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items); + num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv, num_bytes, min_factor); if (ret) { From bcacf5f3f92b886431b3a739038cc74b5e7e9403 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:14:34 -0400 Subject: [PATCH 094/138] btrfs: only reserve metadata_size for inodes Historically we reserved worst case for every btree operation, and generally speaking we want to do that in cases where it could be the worst case. However for updating inodes we know the inode items are already in the tree, so it will only be an update operation and never an insert operation. This allows us to always reserve only the metadata_size amount for inode updates rather than the insert_metadata_size amount. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/delalloc-space.c | 21 +++++++++++++++++---- fs/btrfs/delayed-inode.c | 2 +- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index 62aa18b38a08..391dcb217098 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -255,9 +255,16 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, lockdep_assert_held(&inode->lock); outstanding_extents = inode->outstanding_extents; - if (outstanding_extents) + + /* + * Insert size for the number of outstanding extents, 1 normal size for + * updating the inode. + */ + if (outstanding_extents) { reserve_size = btrfs_calc_insert_metadata_size(fs_info, - outstanding_extents + 1); + outstanding_extents); + reserve_size += btrfs_calc_metadata_size(fs_info, 1); + } csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); reserve_size += btrfs_calc_insert_metadata_size(fs_info, @@ -282,10 +289,16 @@ static void calc_inode_reservations(struct btrfs_fs_info *fs_info, { u64 nr_extents = count_max_extents(num_bytes); u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, num_bytes); + u64 inode_update = btrfs_calc_metadata_size(fs_info, 1); - /* We add one for the inode update at finish ordered time */ *meta_reserve = btrfs_calc_insert_metadata_size(fs_info, - nr_extents + csum_leaves + 1); + nr_extents + csum_leaves); + + /* + * finish_ordered_io has to update the inode, so add the space required + * for an inode update. + */ + *meta_reserve += inode_update; *qgroup_reserve = nr_extents * fs_info->nodesize; } diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index de87ea7ce84d..9318cf761a07 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -612,7 +612,7 @@ static int btrfs_delayed_inode_reserve_metadata( src_rsv = trans->block_rsv; dst_rsv = &fs_info->delayed_block_rsv; - num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1); + num_bytes = btrfs_calc_metadata_size(fs_info, 1); /* * btrfs_dirty_inode will update the inode under btrfs_join_transaction From 3e024846d241b992ded27fdfdb78ea819abaa0cb Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 21 Aug 2019 10:42:03 +0300 Subject: [PATCH 095/138] btrfs: refactor variable scope in run_delalloc_nocow Of the 22 (!!!) local variables declared in this function only 9 have function-wide context. Of the remaining 13, 12 are needed in the main while loop of the function and 1 is needed in a tiny if branch, only in case we have prealloc extent. This commit reduces the lifespan of every variable to its bare minimum. It also renames the 'nolock' boolean to freespace_inode to clearly indicate its purpose. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 61 ++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 33 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 31eacbf51d4e..fdc20d81af82 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1301,30 +1301,18 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, */ static noinline int run_delalloc_nocow(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started, int force, - unsigned long *nr_written) + const u64 start, const u64 end, + int *page_started, int force, + unsigned long *nr_written) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; - struct extent_buffer *leaf; struct btrfs_path *path; - struct btrfs_file_extent_item *fi; - struct btrfs_key found_key; - struct extent_map *em; - u64 cow_start; - u64 cur_offset; - u64 extent_end; - u64 extent_offset; - u64 disk_bytenr; - u64 num_bytes; - u64 disk_num_bytes; - u64 ram_bytes; - int extent_type; + u64 cow_start = (u64)-1; + u64 cur_offset = start; int ret; - int type; - int nocow; - int check_prev = 1; - bool nolock; + bool check_prev = true; + const bool freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode)); u64 ino = btrfs_ino(BTRFS_I(inode)); path = btrfs_alloc_path(); @@ -1339,11 +1327,20 @@ static noinline int run_delalloc_nocow(struct inode *inode, return -ENOMEM; } - nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); - - cow_start = (u64)-1; - cur_offset = start; while (1) { + struct btrfs_key found_key; + struct btrfs_file_extent_item *fi; + struct extent_buffer *leaf; + u64 extent_end; + u64 extent_offset; + u64 disk_bytenr = 0; + u64 num_bytes = 0; + u64 disk_num_bytes; + int type; + u64 ram_bytes; + int extent_type; + bool nocow = false; + ret = btrfs_lookup_file_extent(NULL, root, path, ino, cur_offset, 0); if (ret < 0) @@ -1356,7 +1353,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, found_key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } - check_prev = 0; + check_prev = false; next_slot: leaf = path->nodes[0]; if (path->slots[0] >= btrfs_header_nritems(leaf)) { @@ -1371,9 +1368,6 @@ static noinline int run_delalloc_nocow(struct inode *inode, leaf = path->nodes[0]; } - nocow = 0; - disk_bytenr = 0; - num_bytes = 0; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid > ino) @@ -1420,7 +1414,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, * Do the same check as in btrfs_cross_ref_exist but * without the unnecessary search. */ - if (!nolock && + if (!freespace_inode && btrfs_file_extent_generation(leaf, fi) <= btrfs_root_last_snapshot(&root->root_item)) goto out_check; @@ -1442,7 +1436,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, goto error; } - WARN_ON_ONCE(nolock); + WARN_ON_ONCE(freespace_inode); goto out_check; } disk_bytenr += extent_offset; @@ -1452,7 +1446,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, * if there are pending snapshots for this root, * we fall into common COW way. */ - if (!nolock && atomic_read(&root->snapshot_force_cow)) + if (!freespace_inode && atomic_read(&root->snapshot_force_cow)) goto out_check; /* * force cow if csum exists in the range. @@ -1471,12 +1465,12 @@ static noinline int run_delalloc_nocow(struct inode *inode, cur_offset = cow_start; goto error; } - WARN_ON_ONCE(nolock); + WARN_ON_ONCE(freespace_inode); goto out_check; } if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) goto out_check; - nocow = 1; + nocow = true; } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { extent_end = found_key.offset + btrfs_file_extent_ram_bytes(leaf, fi); @@ -1518,6 +1512,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { u64 orig_start = found_key.offset - extent_offset; + struct extent_map *em; em = create_io_em(inode, cur_offset, num_bytes, orig_start, @@ -1543,7 +1538,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, } ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, - num_bytes, num_bytes, type); + num_bytes, num_bytes,type); if (nocow) btrfs_dec_nocow_writers(fs_info, disk_bytenr); BUG_ON(ret); /* -ENOMEM */ From a6bd9cd1559b8e78d1d03fe64445ae6dc419ea16 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 21 Aug 2019 10:42:57 +0300 Subject: [PATCH 096/138] btrfs: improve comments around nocow path run_delalloc_nocow contains numerous, somewhat subtle, checks when figuring out whether a particular extent should be CoW'ed or not. This patch explicitly states the assumptions those checks verify. As a result also document 2 of the more subtle checks in check_committed_ref as well. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 3 +++ fs/btrfs/inode.c | 51 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5e8c6a0bac4b..af7631472073 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2357,16 +2357,19 @@ static noinline int check_committed_ref(struct btrfs_root *root, item_size = btrfs_item_size_nr(leaf, path->slots[0]); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); + /* If extent item has more than 1 inline ref then it's shared */ if (item_size != sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) goto out; + /* If extent created before last snapshot => it's definitely shared */ if (btrfs_extent_generation(leaf, ei) <= btrfs_root_last_snapshot(&root->root_item)) goto out; iref = (struct btrfs_extent_inline_ref *)(ei + 1); + /* If this extent has SHARED_DATA_REF then it's shared */ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA); if (type != BTRFS_EXTENT_DATA_REF_KEY) goto out; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fdc20d81af82..ae44ec2f399e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1345,6 +1345,12 @@ static noinline int run_delalloc_nocow(struct inode *inode, cur_offset, 0); if (ret < 0) goto error; + + /* + * If there is no extent for our range when doing the initial + * search, then go back to the previous slot as it will be the + * one containing the search offset + */ if (ret > 0 && path->slots[0] > 0 && check_prev) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, @@ -1355,6 +1361,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, } check_prev = false; next_slot: + /* Go to next leaf if we have exhausted the current one */ leaf = path->nodes[0]; if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); @@ -1370,23 +1377,38 @@ static noinline int run_delalloc_nocow(struct inode *inode, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); + /* Didn't find anything for our INO */ if (found_key.objectid > ino) break; + /* + * Keep searching until we find an EXTENT_ITEM or there are no + * more extents for this inode + */ if (WARN_ON_ONCE(found_key.objectid < ino) || found_key.type < BTRFS_EXTENT_DATA_KEY) { path->slots[0]++; goto next_slot; } + + /* Found key is not EXTENT_DATA_KEY or starts after req range */ if (found_key.type > BTRFS_EXTENT_DATA_KEY || found_key.offset > end) break; + /* + * If the found extent starts after requested offset, then + * adjust extent_end to be right before this extent begins + */ if (found_key.offset > cur_offset) { extent_end = found_key.offset; extent_type = 0; goto out_check; } + /* + * Found extent which begins before our range and potentially + * intersect it + */ fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); extent_type = btrfs_file_extent_type(leaf, fi); @@ -1400,19 +1422,28 @@ static noinline int run_delalloc_nocow(struct inode *inode, btrfs_file_extent_num_bytes(leaf, fi); disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); + /* + * If extent we got ends before our range starts, skip + * to next extent + */ if (extent_end <= start) { path->slots[0]++; goto next_slot; } + /* Skip holes */ if (disk_bytenr == 0) goto out_check; + /* Skip compressed/encrypted/encoded extents */ if (btrfs_file_extent_compression(leaf, fi) || btrfs_file_extent_encryption(leaf, fi) || btrfs_file_extent_other_encoding(leaf, fi)) goto out_check; /* - * Do the same check as in btrfs_cross_ref_exist but - * without the unnecessary search. + * If extent is created before the last volume's snapshot + * this implies the extent is shared, hence we can't do + * nocow. This is the same check as in + * btrfs_cross_ref_exist but without calling + * btrfs_search_slot. */ if (!freespace_inode && btrfs_file_extent_generation(leaf, fi) <= @@ -1420,6 +1451,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, goto out_check; if (extent_type == BTRFS_FILE_EXTENT_REG && !force) goto out_check; + /* If extent is RO, we must COW it */ if (btrfs_extent_readonly(fs_info, disk_bytenr)) goto out_check; ret = btrfs_cross_ref_exist(root, ino, @@ -1443,8 +1475,8 @@ static noinline int run_delalloc_nocow(struct inode *inode, disk_bytenr += cur_offset - found_key.offset; num_bytes = min(end + 1, extent_end) - cur_offset; /* - * if there are pending snapshots for this root, - * we fall into common COW way. + * If there are pending snapshots for this root, we + * fall into common COW way */ if (!freespace_inode && atomic_read(&root->snapshot_force_cow)) goto out_check; @@ -1480,12 +1512,17 @@ static noinline int run_delalloc_nocow(struct inode *inode, BUG(); } out_check: + /* Skip extents outside of our requested range */ if (extent_end <= start) { path->slots[0]++; if (nocow) btrfs_dec_nocow_writers(fs_info, disk_bytenr); goto next_slot; } + /* + * If nocow is false then record the beginning of the range + * that needs to be COWed + */ if (!nocow) { if (cow_start == (u64)-1) cow_start = cur_offset; @@ -1497,6 +1534,12 @@ static noinline int run_delalloc_nocow(struct inode *inode, } btrfs_release_path(path); + + /* + * COW range from cow_start to found_key.offset - 1. As the key + * will contain the beginning of the first extent that can be + * NOCOW, following one which needs to be COW'ed + */ if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, found_key.offset - 1, From bb55f6260b4756ba0ce46fc373f6f2e1893f752d Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 5 Aug 2019 17:47:05 +0300 Subject: [PATCH 097/138] btrfs: simplify extent type checks in run_delalloc_nocow There is no point in checking the type of the extent again just to set the 'type' variable, when this check has already been performed before. Instead, extend the original if branch with an 'else' clause. This allows to remove one local variable and make it obvious how the code flow differs for prealloc/regular extents. Reviewed-by: Johannes Thumshirn Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ae44ec2f399e..383ad5256f70 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1336,7 +1336,6 @@ static noinline int run_delalloc_nocow(struct inode *inode, u64 disk_bytenr = 0; u64 num_bytes = 0; u64 disk_num_bytes; - int type; u64 ram_bytes; int extent_type; bool nocow = false; @@ -1572,16 +1571,17 @@ static noinline int run_delalloc_nocow(struct inode *inode, goto error; } free_extent_map(em); - } - - if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { - type = BTRFS_ORDERED_PREALLOC; + ret = btrfs_add_ordered_extent(inode, cur_offset, + disk_bytenr, num_bytes, + num_bytes, + BTRFS_ORDERED_PREALLOC); } else { - type = BTRFS_ORDERED_NOCOW; + ret = btrfs_add_ordered_extent(inode, cur_offset, + disk_bytenr, num_bytes, + num_bytes, + BTRFS_ORDERED_NOCOW); } - ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, - num_bytes, num_bytes,type); if (nocow) btrfs_dec_nocow_writers(fs_info, disk_bytenr); BUG_ON(ret); /* -ENOMEM */ From 922f0518249d20d00ba11470331a2c89b6749c91 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 5 Aug 2019 17:47:06 +0300 Subject: [PATCH 098/138] btrfs: streamline code in run_delalloc_nocow in case of inline extents The extent range check right after the "out_check" label is redundant, because the only way it can trigger is if we have an inline extent. In this case it makes more sense to actually move it in the branch explictly dealing with inlines extents. What's more, the nested 'if (nocow)' can never be true because for inline extents we always do COW and there is no chance 'nocow' can be true, just remove that check. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 383ad5256f70..aece5dd0e7a8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1507,17 +1507,15 @@ static noinline int run_delalloc_nocow(struct inode *inode, btrfs_file_extent_ram_bytes(leaf, fi); extent_end = ALIGN(extent_end, fs_info->sectorsize); + /* Skip extents outside of our requested range */ + if (extent_end <= start) { + path->slots[0]++; + goto next_slot; + } } else { BUG(); } out_check: - /* Skip extents outside of our requested range */ - if (extent_end <= start) { - path->slots[0]++; - if (nocow) - btrfs_dec_nocow_writers(fs_info, disk_bytenr); - goto next_slot; - } /* * If nocow is false then record the beginning of the range * that needs to be COWed From e8e210075a901aac577fd33619509dfb21a07071 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 22 Aug 2019 17:25:23 +0300 Subject: [PATCH 099/138] btrfs: comment and minor simplifications in run_delalloc_nocow Add a comment explaining why we keep the BUG also use the already read and cached value of extent ram bytes stored in 'ram_bytes'. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index aece5dd0e7a8..90c6a4813a19 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1503,16 +1503,15 @@ static noinline int run_delalloc_nocow(struct inode *inode, goto out_check; nocow = true; } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { - extent_end = found_key.offset + - btrfs_file_extent_ram_bytes(leaf, fi); - extent_end = ALIGN(extent_end, - fs_info->sectorsize); + extent_end = found_key.offset + ram_bytes; + extent_end = ALIGN(extent_end, fs_info->sectorsize); /* Skip extents outside of our requested range */ if (extent_end <= start) { path->slots[0]++; goto next_slot; } } else { + /* If this triggers then we have a memory corruption */ BUG(); } out_check: From 762bf09893b42d1ac8f7f4c02b86e3143b99e61f Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Thu, 22 Aug 2019 17:24:20 +0300 Subject: [PATCH 100/138] btrfs: improve error handling in run_delalloc_nocow Correctly handle failure cases when adding an ordered extents in case of REGULAR or PREALLOC extents. Remove the BUG_ON. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 90c6a4813a19..b52282df8c4d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1314,6 +1314,8 @@ static noinline int run_delalloc_nocow(struct inode *inode, bool check_prev = true; const bool freespace_inode = btrfs_is_free_space_inode(BTRFS_I(inode)); u64 ino = btrfs_ino(BTRFS_I(inode)); + bool nocow = false; + u64 disk_bytenr = 0; path = btrfs_alloc_path(); if (!path) { @@ -1333,12 +1335,12 @@ static noinline int run_delalloc_nocow(struct inode *inode, struct extent_buffer *leaf; u64 extent_end; u64 extent_offset; - u64 disk_bytenr = 0; u64 num_bytes = 0; u64 disk_num_bytes; u64 ram_bytes; int extent_type; - bool nocow = false; + + nocow = false; ret = btrfs_lookup_file_extent(NULL, root, path, ino, cur_offset, 0); @@ -1572,16 +1574,25 @@ static noinline int run_delalloc_nocow(struct inode *inode, disk_bytenr, num_bytes, num_bytes, BTRFS_ORDERED_PREALLOC); + if (ret) { + btrfs_drop_extent_cache(BTRFS_I(inode), + cur_offset, + cur_offset + num_bytes - 1, + 0); + goto error; + } } else { ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, num_bytes, num_bytes, BTRFS_ORDERED_NOCOW); + if (ret) + goto error; } if (nocow) btrfs_dec_nocow_writers(fs_info, disk_bytenr); - BUG_ON(ret); /* -ENOMEM */ + nocow = false; if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) @@ -1626,6 +1637,9 @@ static noinline int run_delalloc_nocow(struct inode *inode, } error: + if (nocow) + btrfs_dec_nocow_writers(fs_info, disk_bytenr); + if (ret && cur_offset < end) extent_clear_unlock_delalloc(inode, cur_offset, end, locked_page, EXTENT_LOCKED | From ebc87351e5fc43022f687c01daca7e013837ae11 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Mon, 26 Aug 2019 17:34:24 +0300 Subject: [PATCH 101/138] btrfs: Deprecate BTRFS_SUBVOL_CREATE_ASYNC flag Support for asynchronous snapshot creation was originally added in 72fd032e9424 ("Btrfs: add SNAP_CREATE_ASYNC ioctl") to cater for ceph's backend needs. However, since Ceph has deprecated support for btrfs there is no longer need for that support in btrfs. Additionally, this was never supported by btrfs-progs, the official userspace tools. Reviewed-by: Qu Wenruo Reviewed-by: Johannes Thumshirn Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4eabd419aaca..5942615be398 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1841,8 +1841,15 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, goto free_args; } - if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) + if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) { + struct inode *inode = file_inode(file); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + + btrfs_warn(fs_info, +"SNAP_CREATE_V2 ioctl with CREATE_ASYNC is deprecated and will be removed in kernel 5.7"); + ptr = &transid; + } if (vol_args->flags & BTRFS_SUBVOL_RDONLY) readonly = true; if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { @@ -4191,6 +4198,9 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root, u64 transid; int ret; + btrfs_warn(root->fs_info, + "START_SYNC ioctl is deprecated and will be removed in kernel 5.7"); + trans = btrfs_attach_transaction_barrier(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT) @@ -4218,6 +4228,9 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info, { u64 transid; + btrfs_warn(fs_info, + "WAIT_SYNC ioctl is deprecated and will be removed in kernel 5.7"); + if (argp) { if (copy_from_user(&transid, argp, sizeof(transid))) return -EFAULT; From 62fdaa52a3d00a875da771719b6dc537ca79fce1 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 22 Aug 2019 10:14:15 +0800 Subject: [PATCH 102/138] btrfs: Detect unbalanced tree with empty leaf before crashing btree operations [BUG] With crafted image, btrfs will panic at btree operations: kernel BUG at fs/btrfs/ctree.c:3894! invalid opcode: 0000 [#1] SMP PTI CPU: 0 PID: 1138 Comm: btrfs-transacti Not tainted 5.0.0-rc8+ #9 RIP: 0010:__push_leaf_left+0x6b6/0x6e0 RSP: 0018:ffffc0bd4128b990 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffffa0a4ab8f0e38 RCX: 0000000000000000 RDX: ffffa0a280000000 RSI: 0000000000000000 RDI: ffffa0a4b3814000 RBP: ffffc0bd4128ba38 R08: 0000000000001000 R09: ffffc0bd4128b948 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000240 R13: ffffa0a4b556fb60 R14: ffffa0a4ab8f0af0 R15: ffffa0a4ab8f0af0 FS: 0000000000000000(0000) GS:ffffa0a4b7a00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f2461c80020 CR3: 000000022b32a006 CR4: 00000000000206f0 Call Trace: ? _cond_resched+0x1a/0x50 push_leaf_left+0x179/0x190 btrfs_del_items+0x316/0x470 btrfs_del_csums+0x215/0x3a0 __btrfs_free_extent.isra.72+0x5a7/0xbe0 __btrfs_run_delayed_refs+0x539/0x1120 btrfs_run_delayed_refs+0xdb/0x1b0 btrfs_commit_transaction+0x52/0x950 ? start_transaction+0x94/0x450 transaction_kthread+0x163/0x190 kthread+0x105/0x140 ? btrfs_cleanup_transaction+0x560/0x560 ? kthread_destroy_worker+0x50/0x50 ret_from_fork+0x35/0x40 Modules linked in: ---[ end trace c2425e6e89b5558f ]--- [CAUSE] The offending csum tree looks like this: checksum tree key (CSUM_TREE ROOT_ITEM 0) node 29741056 level 1 items 14 free 107 generation 19 owner CSUM_TREE ... key (EXTENT_CSUM EXTENT_CSUM 85975040) block 29630464 gen 17 key (EXTENT_CSUM EXTENT_CSUM 89911296) block 29642752 gen 17 <<< key (EXTENT_CSUM EXTENT_CSUM 92274688) block 29646848 gen 17 ... leaf 29630464 items 6 free space 1 generation 17 owner CSUM_TREE item 0 key (EXTENT_CSUM EXTENT_CSUM 85975040) itemoff 3987 itemsize 8 range start 85975040 end 85983232 length 8192 ... leaf 29642752 items 0 free space 3995 generation 17 owner 0 ^ empty leaf invalid owner ^ leaf 29646848 items 1 free space 602 generation 17 owner CSUM_TREE item 0 key (EXTENT_CSUM EXTENT_CSUM 92274688) itemoff 627 itemsize 3368 range start 92274688 end 95723520 length 3448832 So we have a corrupted csum tree where one tree leaf is completely empty, causing unbalanced btree, thus leading to unexpected btree balance error. [FIX] For this particular case, we handle it in two directions to catch it: - Check if the tree block is empty through btrfs_verify_level_key() So that invalid tree blocks won't be read out through btrfs_search_slot() and its variants. - Check 0 tree owner in tree checker NO tree is using 0 as its tree owner, detect it and reject at tree block read time. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=202821 Reviewed-by: Nikolay Borisov Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 10 ++++++++++ fs/btrfs/tree-checker.c | 6 ++++++ 2 files changed, 16 insertions(+) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 99dfd889b9f7..044981cf6df9 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -417,6 +417,16 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level, */ if (btrfs_header_generation(eb) > fs_info->last_trans_committed) return 0; + + /* We have @first_key, so this @eb must have at least one item */ + if (btrfs_header_nritems(eb) == 0) { + btrfs_err(fs_info, + "invalid tree nritems, bytenr=%llu nritems=0 expect >0", + eb->start); + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + return -EUCLEAN; + } + if (found_level) btrfs_node_key_to_cpu(eb, &found_key, 0); else diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 9645389a1187..43e488f5d063 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1325,6 +1325,12 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data) owner); return -EUCLEAN; } + /* Unknown tree */ + if (owner == 0) { + generic_err(leaf, 0, + "invalid owner, root 0 is not defined"); + return -EUCLEAN; + } return 0; } From 3acd48507dc43eeeb0a1fe965b8bad91cab904a7 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 21 Aug 2019 15:05:55 +0000 Subject: [PATCH 103/138] btrfs: fix allocation of free space cache v1 bitmap pages Various notifications of type "BUG kmalloc-4096 () : Redzone overwritten" have been observed recently in various parts of the kernel. After some time, it has been made a relation with the use of BTRFS filesystem and with SLUB_DEBUG turned on. [ 22.809700] BUG kmalloc-4096 (Tainted: G W ): Redzone overwritten [ 22.810286] INFO: 0xbe1a5921-0xfbfc06cd. First byte 0x0 instead of 0xcc [ 22.810866] INFO: Allocated in __load_free_space_cache+0x588/0x780 [btrfs] age=22 cpu=0 pid=224 [ 22.811193] __slab_alloc.constprop.26+0x44/0x70 [ 22.811345] kmem_cache_alloc_trace+0xf0/0x2ec [ 22.811588] __load_free_space_cache+0x588/0x780 [btrfs] [ 22.811848] load_free_space_cache+0xf4/0x1b0 [btrfs] [ 22.812090] cache_block_group+0x1d0/0x3d0 [btrfs] [ 22.812321] find_free_extent+0x680/0x12a4 [btrfs] [ 22.812549] btrfs_reserve_extent+0xec/0x220 [btrfs] [ 22.812785] btrfs_alloc_tree_block+0x178/0x5f4 [btrfs] [ 22.813032] __btrfs_cow_block+0x150/0x5d4 [btrfs] [ 22.813262] btrfs_cow_block+0x194/0x298 [btrfs] [ 22.813484] commit_cowonly_roots+0x44/0x294 [btrfs] [ 22.813718] btrfs_commit_transaction+0x63c/0xc0c [btrfs] [ 22.813973] close_ctree+0xf8/0x2a4 [btrfs] [ 22.814107] generic_shutdown_super+0x80/0x110 [ 22.814250] kill_anon_super+0x18/0x30 [ 22.814437] btrfs_kill_super+0x18/0x90 [btrfs] [ 22.814590] INFO: Freed in proc_cgroup_show+0xc0/0x248 age=41 cpu=0 pid=83 [ 22.814841] proc_cgroup_show+0xc0/0x248 [ 22.814967] proc_single_show+0x54/0x98 [ 22.815086] seq_read+0x278/0x45c [ 22.815190] __vfs_read+0x28/0x17c [ 22.815289] vfs_read+0xa8/0x14c [ 22.815381] ksys_read+0x50/0x94 [ 22.815475] ret_from_syscall+0x0/0x38 Commit 69d2480456d1 ("btrfs: use copy_page for copying pages instead of memcpy") changed the way bitmap blocks are copied. But allthough bitmaps have the size of a page, they were allocated with kzalloc(). Most of the time, kzalloc() allocates aligned blocks of memory, so copy_page() can be used. But when some debug options like SLAB_DEBUG are activated, kzalloc() may return unaligned pointer. On powerpc, memcpy(), copy_page() and other copying functions use 'dcbz' instruction which provides an entire zeroed cacheline to avoid memory read when the intention is to overwrite a full line. Functions like memcpy() are writen to care about partial cachelines at the start and end of the destination, but copy_page() assumes it gets pages. As pages are naturally cache aligned, copy_page() doesn't care about partial lines. This means that when copy_page() is called with a misaligned pointer, a few leading bytes are zeroed. To fix it, allocate bitmaps through kmem_cache instead of using kzalloc() The cache pool is created with PAGE_SIZE alignment constraint. Reported-by: Erhard F. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204371 Fixes: 69d2480456d1 ("btrfs: use copy_page for copying pages instead of memcpy") Cc: stable@vger.kernel.org # 4.19+ Signed-off-by: Christophe Leroy Reviewed-by: David Sterba [ rename to btrfs_free_space_bitmap ] Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/free-space-cache.c | 20 +++++++++++++------- fs/btrfs/inode.c | 8 ++++++++ 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d27b39858339..ef40fffb5e46 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -43,6 +43,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; extern struct kmem_cache *btrfs_free_space_cachep; +extern struct kmem_cache *btrfs_free_space_bitmap_cachep; struct btrfs_ordered_sum; struct btrfs_ref; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 265dc75f7a7a..ab806d82fe12 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -765,7 +765,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, } else { ASSERT(num_bitmaps); num_bitmaps--; - e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); + e->bitmap = kmem_cache_zalloc( + btrfs_free_space_bitmap_cachep, GFP_NOFS); if (!e->bitmap) { kmem_cache_free( btrfs_free_space_cachep, e); @@ -1882,7 +1883,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info) { unlink_free_space(ctl, bitmap_info); - kfree(bitmap_info->bitmap); + kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); kmem_cache_free(btrfs_free_space_cachep, bitmap_info); ctl->total_bitmaps--; ctl->op->recalc_thresholds(ctl); @@ -2136,7 +2137,8 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, } /* allocate the bitmap */ - info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); + info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, + GFP_NOFS); spin_lock(&ctl->tree_lock); if (!info->bitmap) { ret = -ENOMEM; @@ -2147,7 +2149,9 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, out: if (info) { - kfree(info->bitmap); + if (info->bitmap) + kmem_cache_free(btrfs_free_space_bitmap_cachep, + info->bitmap); kmem_cache_free(btrfs_free_space_cachep, info); } @@ -2811,7 +2815,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, if (entry->bytes == 0) { ctl->free_extents--; if (entry->bitmap) { - kfree(entry->bitmap); + kmem_cache_free(btrfs_free_space_bitmap_cachep, + entry->bitmap); ctl->total_bitmaps--; ctl->op->recalc_thresholds(ctl); } @@ -3615,7 +3620,7 @@ int test_add_free_space_entry(struct btrfs_block_group_cache *cache, } if (!map) { - map = kzalloc(PAGE_SIZE, GFP_NOFS); + map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS); if (!map) { kmem_cache_free(btrfs_free_space_cachep, info); return -ENOMEM; @@ -3644,7 +3649,8 @@ int test_add_free_space_entry(struct btrfs_block_group_cache *cache, if (info) kmem_cache_free(btrfs_free_space_cachep, info); - kfree(map); + if (map) + kmem_cache_free(btrfs_free_space_bitmap_cachep, map); return 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b52282df8c4d..d79ad5abd06e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -74,6 +74,7 @@ static struct kmem_cache *btrfs_inode_cachep; struct kmem_cache *btrfs_trans_handle_cachep; struct kmem_cache *btrfs_path_cachep; struct kmem_cache *btrfs_free_space_cachep; +struct kmem_cache *btrfs_free_space_bitmap_cachep; static int btrfs_setsize(struct inode *inode, struct iattr *attr); static int btrfs_truncate(struct inode *inode, bool skip_writeback); @@ -9409,6 +9410,7 @@ void __cold btrfs_destroy_cachep(void) kmem_cache_destroy(btrfs_trans_handle_cachep); kmem_cache_destroy(btrfs_path_cachep); kmem_cache_destroy(btrfs_free_space_cachep); + kmem_cache_destroy(btrfs_free_space_bitmap_cachep); } int __init btrfs_init_cachep(void) @@ -9438,6 +9440,12 @@ int __init btrfs_init_cachep(void) if (!btrfs_free_space_cachep) goto fail; + btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap", + PAGE_SIZE, PAGE_SIZE, + SLAB_RED_ZONE, NULL); + if (!btrfs_free_space_bitmap_cachep) + goto fail; + return 0; fail: btrfs_destroy_cachep(); From a06dee4d7eb67c35e7b7da0b84216424d60a87d3 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Tue, 27 Aug 2019 15:40:44 +0800 Subject: [PATCH 104/138] btrfs: proper error handling when invalid device is found in find_next_devid In a corrupted tree, if search for next devid finds the device with devid = -1, then report the error -EUCLEAN back to the parent function to fail gracefully. The tree checker will not catch this in case the devids are created using the following script: umount /btrfs dev1=/dev/sdb dev2=/dev/sdc mkfs.btrfs -fq -dsingle -msingle $dev1 mount $dev1 /btrfs _fail() { echo $1 exit 1 } while true; do btrfs dev add -f $dev2 /btrfs || _fail "add failed" btrfs dev del $dev1 /btrfs || _fail "del failed" dev_tmp=$dev1 dev1=$dev2 dev2=$dev_tmp done With output: BTRFS critical (device sdb): corrupt leaf: root=3 block=313739198464 slot=1 devid=1 invalid devid: has=507 expect=[0, 506] BTRFS error (device sdb): block=313739198464 write time tree block corruption detected BTRFS: error (device sdb) in btrfs_commit_transaction:2268: errno=-5 IO failure (Error while writing out transaction) BTRFS warning (device sdb): Skipping commit of aborted transaction. BTRFS: error (device sdb) in cleanup_transaction:1827: errno=-5 IO failure Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Reviewed-by: Nikolay Borisov Signed-off-by: Anand Jain [ add script and messages ] Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e2de7c7b674a..c7a08fe26672 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1849,7 +1849,12 @@ static noinline int find_next_devid(struct btrfs_fs_info *fs_info, if (ret < 0) goto error; - BUG_ON(ret == 0); /* Corruption */ + if (ret == 0) { + /* Corruption */ + btrfs_err(fs_info, "corrupted chunk tree devid -1 matched"); + ret = -EUCLEAN; + goto error; + } ret = btrfs_previous_item(fs_info->chunk_root, path, BTRFS_DEV_ITEMS_OBJECTID, From d2979aa25fc8de6498d906ae6b46b028457d6400 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Tue, 27 Aug 2019 15:40:45 +0800 Subject: [PATCH 105/138] btrfs: use proper error values on allocation failure in clone_fs_devices Fix the fake ENOMEM return error code to the actual error in clone_fs_devices(). Reviewed-by: Johannes Thumshirn Reviewed-by: Nikolay Borisov Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c7a08fe26672..8bfc41f1b3b6 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1115,6 +1115,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) struct btrfs_fs_devices *fs_devices; struct btrfs_device *device; struct btrfs_device *orig_dev; + int ret = 0; fs_devices = alloc_fs_devices(orig->fsid, NULL); if (IS_ERR(fs_devices)) @@ -1128,8 +1129,10 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) device = btrfs_alloc_device(NULL, &orig_dev->devid, orig_dev->uuid); - if (IS_ERR(device)) + if (IS_ERR(device)) { + ret = PTR_ERR(device); goto error; + } /* * This is ok to do without rcu read locked because we hold the @@ -1140,6 +1143,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) GFP_KERNEL); if (!name) { btrfs_free_device(device); + ret = -ENOMEM; goto error; } rcu_assign_pointer(device->name, name); @@ -1154,7 +1158,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) error: mutex_unlock(&orig->device_list_mutex); free_fs_devices(fs_devices); - return ERR_PTR(-ENOMEM); + return ERR_PTR(ret); } /* From 602cbe91fb012a923a9fea880e600e004eb1543b Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 21 Aug 2019 18:48:25 +0200 Subject: [PATCH 106/138] btrfs: move cond_wake_up functions out of ctree The file ctree.h serves as a header for everything and has become quite bloated. Split some helpers that are generic and create a new file that should be the catch-all for code that's not btrfs-specific. Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/compression.c | 1 + fs/btrfs/ctree.h | 22 ---------------------- fs/btrfs/delayed-inode.c | 1 + fs/btrfs/dev-replace.c | 1 + fs/btrfs/extent-tree.c | 1 + fs/btrfs/inode.c | 1 + fs/btrfs/locking.c | 1 + fs/btrfs/misc.h | 33 +++++++++++++++++++++++++++++++++ fs/btrfs/ordered-data.c | 1 + fs/btrfs/transaction.c | 1 + fs/btrfs/tree-log.c | 1 + fs/btrfs/zstd.c | 1 + 12 files changed, 43 insertions(+), 22 deletions(-) create mode 100644 fs/btrfs/misc.h diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index fe7a8b5ff96c..b05b361e2062 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -18,6 +18,7 @@ #include #include #include +#include "misc.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ef40fffb5e46..5cb410cc1502 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3517,26 +3517,4 @@ static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) } #endif -static inline void cond_wake_up(struct wait_queue_head *wq) -{ - /* - * This implies a full smp_mb barrier, see comments for - * waitqueue_active why. - */ - if (wq_has_sleeper(wq)) - wake_up(wq); -} - -static inline void cond_wake_up_nomb(struct wait_queue_head *wq) -{ - /* - * Special case for conditional wakeup where the barrier required for - * waitqueue_active is implied by some of the preceding code. Eg. one - * of such atomic operations (atomic_dec_and_return, ...), or a - * unlock/lock sequence, etc. - */ - if (waitqueue_active(wq)) - wake_up(wq); -} - #endif diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 9318cf761a07..1f7f39b10bd0 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -6,6 +6,7 @@ #include #include +#include "misc.h" #include "delayed-inode.h" #include "disk-io.h" #include "transaction.h" diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 00ea828beb00..48890826b5e6 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -9,6 +9,7 @@ #include #include #include +#include "misc.h" #include "ctree.h" #include "extent_map.h" #include "disk-io.h" diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index af7631472073..795b592e5269 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -16,6 +16,7 @@ #include #include #include +#include "misc.h" #include "tree-log.h" #include "disk-io.h" #include "print-tree.h" diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d79ad5abd06e..07f77c7e6b22 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -30,6 +30,7 @@ #include #include #include +#include "misc.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index e4309bcf0b5f..7f9a578a1a20 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -8,6 +8,7 @@ #include #include #include +#include "misc.h" #include "ctree.h" #include "extent_io.h" #include "locking.h" diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h new file mode 100644 index 000000000000..ef3901238ddd --- /dev/null +++ b/fs/btrfs/misc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef BTRFS_MISC_H +#define BTRFS_MISC_H + +#include +#include + +#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len)) + +static inline void cond_wake_up(struct wait_queue_head *wq) +{ + /* + * This implies a full smp_mb barrier, see comments for + * waitqueue_active why. + */ + if (wq_has_sleeper(wq)) + wake_up(wq); +} + +static inline void cond_wake_up_nomb(struct wait_queue_head *wq) +{ + /* + * Special case for conditional wakeup where the barrier required for + * waitqueue_active is implied by some of the preceding code. Eg. one + * of such atomic operations (atomic_dec_and_return, ...), or a + * unlock/lock sequence, etc. + */ + if (waitqueue_active(wq)) + wake_up(wq); +} + +#endif diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index ae7f64a8facb..24b6c72b9a59 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -7,6 +7,7 @@ #include #include #include +#include "misc.h" #include "ctree.h" #include "transaction.h" #include "btrfs_inode.h" diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f21416d68c2c..8624bdee8c5b 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -10,6 +10,7 @@ #include #include #include +#include "misc.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 19a4b9dc669f..34d087008b72 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -8,6 +8,7 @@ #include #include #include +#include "misc.h" #include "ctree.h" #include "tree-log.h" #include "disk-io.h" diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c index 0af4a5cd4313..764d47b107e5 100644 --- a/fs/btrfs/zstd.c +++ b/fs/btrfs/zstd.c @@ -17,6 +17,7 @@ #include #include #include +#include "misc.h" #include "compression.h" #include "ctree.h" From 784352fe0bb4814bd969fb390c3d03486a5aaafa Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 21 Aug 2019 18:54:28 +0200 Subject: [PATCH 107/138] btrfs: move math functions to misc.h Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 2 +- fs/btrfs/block-rsv.c | 2 +- fs/btrfs/extent-tree.c | 1 - fs/btrfs/math.h | 28 ---------------------------- fs/btrfs/misc.h | 17 +++++++++++++++++ fs/btrfs/space-info.c | 2 +- fs/btrfs/volumes.c | 2 +- 7 files changed, 21 insertions(+), 33 deletions(-) delete mode 100644 fs/btrfs/math.h diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index c912ee26e85d..9a09f459337b 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include "misc.h" #include "ctree.h" #include "block-group.h" #include "space-info.h" @@ -13,7 +14,6 @@ #include "sysfs.h" #include "tree-log.h" #include "delalloc-space.h" -#include "math.h" /* * Return target flags in extended format or 0 if restripe for this chunk_type diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index 698470b9f32d..ef8b8ae27386 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 +#include "misc.h" #include "ctree.h" #include "block-rsv.h" #include "space-info.h" -#include "math.h" #include "transaction.h" static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 795b592e5269..2bf5dad82bf1 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -25,7 +25,6 @@ #include "locking.h" #include "free-space-cache.h" #include "free-space-tree.h" -#include "math.h" #include "sysfs.h" #include "qgroup.h" #include "ref-verify.h" diff --git a/fs/btrfs/math.h b/fs/btrfs/math.h deleted file mode 100644 index 75246f2f56ba..000000000000 --- a/fs/btrfs/math.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2012 Fujitsu. All rights reserved. - * Written by Miao Xie - */ - -#ifndef BTRFS_MATH_H -#define BTRFS_MATH_H - -#include - -static inline u64 div_factor(u64 num, int factor) -{ - if (factor == 10) - return num; - num *= factor; - return div_u64(num, 10); -} - -static inline u64 div_factor_fine(u64 num, int factor) -{ - if (factor == 100) - return num; - num *= factor; - return div_u64(num, 100); -} - -#endif diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h index ef3901238ddd..7d564924dfeb 100644 --- a/fs/btrfs/misc.h +++ b/fs/btrfs/misc.h @@ -5,6 +5,7 @@ #include #include +#include #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len)) @@ -30,4 +31,20 @@ static inline void cond_wake_up_nomb(struct wait_queue_head *wq) wake_up(wq); } +static inline u64 div_factor(u64 num, int factor) +{ + if (factor == 10) + return num; + num *= factor; + return div_u64(num, 10); +} + +static inline u64 div_factor_fine(u64 num, int factor) +{ + if (factor == 100) + return num; + num *= factor; + return div_u64(num, 100); +} + #endif diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 13a4326c8821..bea7ae0a9739 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include "misc.h" #include "ctree.h" #include "space-info.h" #include "sysfs.h" @@ -7,7 +8,6 @@ #include "free-space-cache.h" #include "ordered-data.h" #include "transaction.h" -#include "math.h" #include "block-group.h" u64 btrfs_space_info_used(struct btrfs_space_info *s_info, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8bfc41f1b3b6..02976c174f32 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -14,6 +14,7 @@ #include #include #include +#include "misc.h" #include "ctree.h" #include "extent_map.h" #include "disk-io.h" @@ -24,7 +25,6 @@ #include "async-thread.h" #include "check-integrity.h" #include "rcu-string.h" -#include "math.h" #include "dev-replace.h" #include "sysfs.h" #include "tree-checker.h" From 8a953348afdd75f45d75e5ff489876fe88f3731d Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 21 Aug 2019 19:06:17 +0200 Subject: [PATCH 108/138] btrfs: move private raid56 definitions from ctree.h Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 16 ---------------- fs/btrfs/raid56.c | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 5cb410cc1502..07c08831d6e7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -471,22 +471,6 @@ enum btrfs_orphan_cleanup_state { ORPHAN_CLEANUP_DONE = 2, }; -/* used by the raid56 code to lock stripes for read/modify/write */ -struct btrfs_stripe_hash { - struct list_head hash_list; - spinlock_t lock; -}; - -/* used by the raid56 code to lock stripes for read/modify/write */ -struct btrfs_stripe_hash_table { - struct list_head stripe_cache; - spinlock_t cache_lock; - int cache_size; - struct btrfs_stripe_hash table[]; -}; - -#define BTRFS_STRIPE_HASH_TABLE_BITS 11 - void btrfs_init_async_reclaim_work(struct work_struct *work); /* fs_info */ diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index f3d0576dd327..57a2ac721985 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -35,6 +35,22 @@ #define RBIO_CACHE_SIZE 1024 +#define BTRFS_STRIPE_HASH_TABLE_BITS 11 + +/* Used by the raid56 code to lock stripes for read/modify/write */ +struct btrfs_stripe_hash { + struct list_head hash_list; + spinlock_t lock; +}; + +/* Used by the raid56 code to lock stripes for read/modify/write */ +struct btrfs_stripe_hash_table { + struct list_head stripe_cache; + spinlock_t cache_lock; + int cache_size; + struct btrfs_stripe_hash table[]; +}; + enum btrfs_rbio_ops { BTRFS_RBIO_WRITE, BTRFS_RBIO_READ_REBUILD, From 4b231ae47417d47a6bafab92b452ad629acdacb0 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 21 Aug 2019 19:16:27 +0200 Subject: [PATCH 109/138] btrfs: rename and export read_node_slot Preparatory work for code that will be moved out of ctree and uses this function. Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 22 +++++++++++----------- fs/btrfs/ctree.h | 3 +++ 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a2f3cd7a619c..3b585f3e4d11 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1792,8 +1792,8 @@ static void root_sub_used(struct btrfs_root *root, u32 size) /* given a node and slot number, this reads the blocks it points to. The * extent buffer is returned with a reference taken (but unlocked). */ -static noinline struct extent_buffer *read_node_slot( - struct extent_buffer *parent, int slot) +struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, + int slot) { int level = btrfs_header_level(parent); struct extent_buffer *eb; @@ -1862,7 +1862,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, return 0; /* promote the child to a root */ - child = read_node_slot(mid, 0); + child = btrfs_read_node_slot(mid, 0); if (IS_ERR(child)) { ret = PTR_ERR(child); btrfs_handle_fs_error(fs_info, ret, NULL); @@ -1902,7 +1902,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) return 0; - left = read_node_slot(parent, pslot - 1); + left = btrfs_read_node_slot(parent, pslot - 1); if (IS_ERR(left)) left = NULL; @@ -1917,7 +1917,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, } } - right = read_node_slot(parent, pslot + 1); + right = btrfs_read_node_slot(parent, pslot + 1); if (IS_ERR(right)) right = NULL; @@ -2077,7 +2077,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, if (!parent) return 1; - left = read_node_slot(parent, pslot - 1); + left = btrfs_read_node_slot(parent, pslot - 1); if (IS_ERR(left)) left = NULL; @@ -2129,7 +2129,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, btrfs_tree_unlock(left); free_extent_buffer(left); } - right = read_node_slot(parent, pslot + 1); + right = btrfs_read_node_slot(parent, pslot + 1); if (IS_ERR(right)) right = NULL; @@ -3783,7 +3783,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_assert_tree_locked(path->nodes[1]); - right = read_node_slot(upper, slot + 1); + right = btrfs_read_node_slot(upper, slot + 1); /* * slot + 1 is not valid or we fail to read the right node, * no big deal, just return. @@ -4017,7 +4017,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_assert_tree_locked(path->nodes[1]); - left = read_node_slot(path->nodes[1], slot - 1); + left = btrfs_read_node_slot(path->nodes[1], slot - 1); /* * slot - 1 is not valid or we fail to read the left node, * no big deal, just return. @@ -5224,7 +5224,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, goto out; } btrfs_set_path_blocking(path); - cur = read_node_slot(cur, slot); + cur = btrfs_read_node_slot(cur, slot); if (IS_ERR(cur)) { ret = PTR_ERR(cur); goto out; @@ -5251,7 +5251,7 @@ static int tree_move_down(struct btrfs_path *path, int *level) struct extent_buffer *eb; BUG_ON(*level == 0); - eb = read_node_slot(path->nodes[*level], path->slots[*level]); + eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]); if (IS_ERR(eb)) return PTR_ERR(eb); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 07c08831d6e7..dc465df47b32 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2587,6 +2587,9 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, struct btrfs_path *path, u64 min_trans); +struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, + int slot); + enum btrfs_compare_tree_result { BTRFS_COMPARE_TREE_NEW, BTRFS_COMPARE_TREE_DELETED, From 18d0f5c6e16ce762f92ab7879c30ff2e37cd9cef Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 21 Aug 2019 19:12:59 +0200 Subject: [PATCH 110/138] btrfs: move functions for tree compare to send.c Send is the only user of tree_compare, we can move it there along with the other helpers and definitions. Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 362 --------------------------------------------- fs/btrfs/ctree.h | 14 -- fs/btrfs/send.c | 374 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 374 insertions(+), 376 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 3b585f3e4d11..fbf94e28fba8 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -5246,368 +5246,6 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, return ret; } -static int tree_move_down(struct btrfs_path *path, int *level) -{ - struct extent_buffer *eb; - - BUG_ON(*level == 0); - eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]); - if (IS_ERR(eb)) - return PTR_ERR(eb); - - path->nodes[*level - 1] = eb; - path->slots[*level - 1] = 0; - (*level)--; - return 0; -} - -static int tree_move_next_or_upnext(struct btrfs_path *path, - int *level, int root_level) -{ - int ret = 0; - int nritems; - nritems = btrfs_header_nritems(path->nodes[*level]); - - path->slots[*level]++; - - while (path->slots[*level] >= nritems) { - if (*level == root_level) - return -1; - - /* move upnext */ - path->slots[*level] = 0; - free_extent_buffer(path->nodes[*level]); - path->nodes[*level] = NULL; - (*level)++; - path->slots[*level]++; - - nritems = btrfs_header_nritems(path->nodes[*level]); - ret = 1; - } - return ret; -} - -/* - * Returns 1 if it had to move up and next. 0 is returned if it moved only next - * or down. - */ -static int tree_advance(struct btrfs_path *path, - int *level, int root_level, - int allow_down, - struct btrfs_key *key) -{ - int ret; - - if (*level == 0 || !allow_down) { - ret = tree_move_next_or_upnext(path, level, root_level); - } else { - ret = tree_move_down(path, level); - } - if (ret >= 0) { - if (*level == 0) - btrfs_item_key_to_cpu(path->nodes[*level], key, - path->slots[*level]); - else - btrfs_node_key_to_cpu(path->nodes[*level], key, - path->slots[*level]); - } - return ret; -} - -static int tree_compare_item(struct btrfs_path *left_path, - struct btrfs_path *right_path, - char *tmp_buf) -{ - int cmp; - int len1, len2; - unsigned long off1, off2; - - len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]); - len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]); - if (len1 != len2) - return 1; - - off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); - off2 = btrfs_item_ptr_offset(right_path->nodes[0], - right_path->slots[0]); - - read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1); - - cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1); - if (cmp) - return 1; - return 0; -} - -#define ADVANCE 1 -#define ADVANCE_ONLY_NEXT -1 - -/* - * This function compares two trees and calls the provided callback for - * every changed/new/deleted item it finds. - * If shared tree blocks are encountered, whole subtrees are skipped, making - * the compare pretty fast on snapshotted subvolumes. - * - * This currently works on commit roots only. As commit roots are read only, - * we don't do any locking. The commit roots are protected with transactions. - * Transactions are ended and rejoined when a commit is tried in between. - * - * This function checks for modifications done to the trees while comparing. - * If it detects a change, it aborts immediately. - */ -int btrfs_compare_trees(struct btrfs_root *left_root, - struct btrfs_root *right_root, - btrfs_changed_cb_t changed_cb, void *ctx) -{ - struct btrfs_fs_info *fs_info = left_root->fs_info; - int ret; - int cmp; - struct btrfs_path *left_path = NULL; - struct btrfs_path *right_path = NULL; - struct btrfs_key left_key; - struct btrfs_key right_key; - char *tmp_buf = NULL; - int left_root_level; - int right_root_level; - int left_level; - int right_level; - int left_end_reached; - int right_end_reached; - int advance_left; - int advance_right; - u64 left_blockptr; - u64 right_blockptr; - u64 left_gen; - u64 right_gen; - - left_path = btrfs_alloc_path(); - if (!left_path) { - ret = -ENOMEM; - goto out; - } - right_path = btrfs_alloc_path(); - if (!right_path) { - ret = -ENOMEM; - goto out; - } - - tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); - if (!tmp_buf) { - ret = -ENOMEM; - goto out; - } - - left_path->search_commit_root = 1; - left_path->skip_locking = 1; - right_path->search_commit_root = 1; - right_path->skip_locking = 1; - - /* - * Strategy: Go to the first items of both trees. Then do - * - * If both trees are at level 0 - * Compare keys of current items - * If left < right treat left item as new, advance left tree - * and repeat - * If left > right treat right item as deleted, advance right tree - * and repeat - * If left == right do deep compare of items, treat as changed if - * needed, advance both trees and repeat - * If both trees are at the same level but not at level 0 - * Compare keys of current nodes/leafs - * If left < right advance left tree and repeat - * If left > right advance right tree and repeat - * If left == right compare blockptrs of the next nodes/leafs - * If they match advance both trees but stay at the same level - * and repeat - * If they don't match advance both trees while allowing to go - * deeper and repeat - * If tree levels are different - * Advance the tree that needs it and repeat - * - * Advancing a tree means: - * If we are at level 0, try to go to the next slot. If that's not - * possible, go one level up and repeat. Stop when we found a level - * where we could go to the next slot. We may at this point be on a - * node or a leaf. - * - * If we are not at level 0 and not on shared tree blocks, go one - * level deeper. - * - * If we are not at level 0 and on shared tree blocks, go one slot to - * the right if possible or go up and right. - */ - - down_read(&fs_info->commit_root_sem); - left_level = btrfs_header_level(left_root->commit_root); - left_root_level = left_level; - left_path->nodes[left_level] = - btrfs_clone_extent_buffer(left_root->commit_root); - if (!left_path->nodes[left_level]) { - up_read(&fs_info->commit_root_sem); - ret = -ENOMEM; - goto out; - } - - right_level = btrfs_header_level(right_root->commit_root); - right_root_level = right_level; - right_path->nodes[right_level] = - btrfs_clone_extent_buffer(right_root->commit_root); - if (!right_path->nodes[right_level]) { - up_read(&fs_info->commit_root_sem); - ret = -ENOMEM; - goto out; - } - up_read(&fs_info->commit_root_sem); - - if (left_level == 0) - btrfs_item_key_to_cpu(left_path->nodes[left_level], - &left_key, left_path->slots[left_level]); - else - btrfs_node_key_to_cpu(left_path->nodes[left_level], - &left_key, left_path->slots[left_level]); - if (right_level == 0) - btrfs_item_key_to_cpu(right_path->nodes[right_level], - &right_key, right_path->slots[right_level]); - else - btrfs_node_key_to_cpu(right_path->nodes[right_level], - &right_key, right_path->slots[right_level]); - - left_end_reached = right_end_reached = 0; - advance_left = advance_right = 0; - - while (1) { - if (advance_left && !left_end_reached) { - ret = tree_advance(left_path, &left_level, - left_root_level, - advance_left != ADVANCE_ONLY_NEXT, - &left_key); - if (ret == -1) - left_end_reached = ADVANCE; - else if (ret < 0) - goto out; - advance_left = 0; - } - if (advance_right && !right_end_reached) { - ret = tree_advance(right_path, &right_level, - right_root_level, - advance_right != ADVANCE_ONLY_NEXT, - &right_key); - if (ret == -1) - right_end_reached = ADVANCE; - else if (ret < 0) - goto out; - advance_right = 0; - } - - if (left_end_reached && right_end_reached) { - ret = 0; - goto out; - } else if (left_end_reached) { - if (right_level == 0) { - ret = changed_cb(left_path, right_path, - &right_key, - BTRFS_COMPARE_TREE_DELETED, - ctx); - if (ret < 0) - goto out; - } - advance_right = ADVANCE; - continue; - } else if (right_end_reached) { - if (left_level == 0) { - ret = changed_cb(left_path, right_path, - &left_key, - BTRFS_COMPARE_TREE_NEW, - ctx); - if (ret < 0) - goto out; - } - advance_left = ADVANCE; - continue; - } - - if (left_level == 0 && right_level == 0) { - cmp = btrfs_comp_cpu_keys(&left_key, &right_key); - if (cmp < 0) { - ret = changed_cb(left_path, right_path, - &left_key, - BTRFS_COMPARE_TREE_NEW, - ctx); - if (ret < 0) - goto out; - advance_left = ADVANCE; - } else if (cmp > 0) { - ret = changed_cb(left_path, right_path, - &right_key, - BTRFS_COMPARE_TREE_DELETED, - ctx); - if (ret < 0) - goto out; - advance_right = ADVANCE; - } else { - enum btrfs_compare_tree_result result; - - WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); - ret = tree_compare_item(left_path, right_path, - tmp_buf); - if (ret) - result = BTRFS_COMPARE_TREE_CHANGED; - else - result = BTRFS_COMPARE_TREE_SAME; - ret = changed_cb(left_path, right_path, - &left_key, result, ctx); - if (ret < 0) - goto out; - advance_left = ADVANCE; - advance_right = ADVANCE; - } - } else if (left_level == right_level) { - cmp = btrfs_comp_cpu_keys(&left_key, &right_key); - if (cmp < 0) { - advance_left = ADVANCE; - } else if (cmp > 0) { - advance_right = ADVANCE; - } else { - left_blockptr = btrfs_node_blockptr( - left_path->nodes[left_level], - left_path->slots[left_level]); - right_blockptr = btrfs_node_blockptr( - right_path->nodes[right_level], - right_path->slots[right_level]); - left_gen = btrfs_node_ptr_generation( - left_path->nodes[left_level], - left_path->slots[left_level]); - right_gen = btrfs_node_ptr_generation( - right_path->nodes[right_level], - right_path->slots[right_level]); - if (left_blockptr == right_blockptr && - left_gen == right_gen) { - /* - * As we're on a shared block, don't - * allow to go deeper. - */ - advance_left = ADVANCE_ONLY_NEXT; - advance_right = ADVANCE_ONLY_NEXT; - } else { - advance_left = ADVANCE; - advance_right = ADVANCE; - } - } - } else if (left_level < right_level) { - advance_right = ADVANCE; - } else { - advance_left = ADVANCE; - } - } - -out: - btrfs_free_path(left_path); - btrfs_free_path(right_path); - kvfree(tmp_buf); - return ret; -} - /* * this is similar to btrfs_next_leaf, but does not try to preserve * and fixup the path. It looks for and returns the next key in the diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index dc465df47b32..17cd88521ad2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2590,20 +2590,6 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, int slot); -enum btrfs_compare_tree_result { - BTRFS_COMPARE_TREE_NEW, - BTRFS_COMPARE_TREE_DELETED, - BTRFS_COMPARE_TREE_CHANGED, - BTRFS_COMPARE_TREE_SAME, -}; -typedef int (*btrfs_changed_cb_t)(struct btrfs_path *left_path, - struct btrfs_path *right_path, - struct btrfs_key *key, - enum btrfs_compare_tree_result result, - void *ctx); -int btrfs_compare_trees(struct btrfs_root *left_root, - struct btrfs_root *right_root, - btrfs_changed_cb_t cb, void *ctx); int btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index c3c0c064c25d..f856d6ca3771 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -260,6 +260,21 @@ struct name_cache_entry { char name[]; }; +#define ADVANCE 1 +#define ADVANCE_ONLY_NEXT -1 + +enum btrfs_compare_tree_result { + BTRFS_COMPARE_TREE_NEW, + BTRFS_COMPARE_TREE_DELETED, + BTRFS_COMPARE_TREE_CHANGED, + BTRFS_COMPARE_TREE_SAME, +}; +typedef int (*btrfs_changed_cb_t)(struct btrfs_path *left_path, + struct btrfs_path *right_path, + struct btrfs_key *key, + enum btrfs_compare_tree_result result, + void *ctx); + __cold static void inconsistent_snapshot_error(struct send_ctx *sctx, enum btrfs_compare_tree_result result, @@ -6514,6 +6529,365 @@ static int full_send_tree(struct send_ctx *sctx) return ret; } +static int tree_move_down(struct btrfs_path *path, int *level) +{ + struct extent_buffer *eb; + + BUG_ON(*level == 0); + eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]); + if (IS_ERR(eb)) + return PTR_ERR(eb); + + path->nodes[*level - 1] = eb; + path->slots[*level - 1] = 0; + (*level)--; + return 0; +} + +static int tree_move_next_or_upnext(struct btrfs_path *path, + int *level, int root_level) +{ + int ret = 0; + int nritems; + nritems = btrfs_header_nritems(path->nodes[*level]); + + path->slots[*level]++; + + while (path->slots[*level] >= nritems) { + if (*level == root_level) + return -1; + + /* move upnext */ + path->slots[*level] = 0; + free_extent_buffer(path->nodes[*level]); + path->nodes[*level] = NULL; + (*level)++; + path->slots[*level]++; + + nritems = btrfs_header_nritems(path->nodes[*level]); + ret = 1; + } + return ret; +} + +/* + * Returns 1 if it had to move up and next. 0 is returned if it moved only next + * or down. + */ +static int tree_advance(struct btrfs_path *path, + int *level, int root_level, + int allow_down, + struct btrfs_key *key) +{ + int ret; + + if (*level == 0 || !allow_down) { + ret = tree_move_next_or_upnext(path, level, root_level); + } else { + ret = tree_move_down(path, level); + } + if (ret >= 0) { + if (*level == 0) + btrfs_item_key_to_cpu(path->nodes[*level], key, + path->slots[*level]); + else + btrfs_node_key_to_cpu(path->nodes[*level], key, + path->slots[*level]); + } + return ret; +} + +static int tree_compare_item(struct btrfs_path *left_path, + struct btrfs_path *right_path, + char *tmp_buf) +{ + int cmp; + int len1, len2; + unsigned long off1, off2; + + len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]); + len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]); + if (len1 != len2) + return 1; + + off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); + off2 = btrfs_item_ptr_offset(right_path->nodes[0], + right_path->slots[0]); + + read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1); + + cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1); + if (cmp) + return 1; + return 0; +} + +/* + * This function compares two trees and calls the provided callback for + * every changed/new/deleted item it finds. + * If shared tree blocks are encountered, whole subtrees are skipped, making + * the compare pretty fast on snapshotted subvolumes. + * + * This currently works on commit roots only. As commit roots are read only, + * we don't do any locking. The commit roots are protected with transactions. + * Transactions are ended and rejoined when a commit is tried in between. + * + * This function checks for modifications done to the trees while comparing. + * If it detects a change, it aborts immediately. + */ +static int btrfs_compare_trees(struct btrfs_root *left_root, + struct btrfs_root *right_root, + btrfs_changed_cb_t changed_cb, void *ctx) +{ + struct btrfs_fs_info *fs_info = left_root->fs_info; + int ret; + int cmp; + struct btrfs_path *left_path = NULL; + struct btrfs_path *right_path = NULL; + struct btrfs_key left_key; + struct btrfs_key right_key; + char *tmp_buf = NULL; + int left_root_level; + int right_root_level; + int left_level; + int right_level; + int left_end_reached; + int right_end_reached; + int advance_left; + int advance_right; + u64 left_blockptr; + u64 right_blockptr; + u64 left_gen; + u64 right_gen; + + left_path = btrfs_alloc_path(); + if (!left_path) { + ret = -ENOMEM; + goto out; + } + right_path = btrfs_alloc_path(); + if (!right_path) { + ret = -ENOMEM; + goto out; + } + + tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); + if (!tmp_buf) { + ret = -ENOMEM; + goto out; + } + + left_path->search_commit_root = 1; + left_path->skip_locking = 1; + right_path->search_commit_root = 1; + right_path->skip_locking = 1; + + /* + * Strategy: Go to the first items of both trees. Then do + * + * If both trees are at level 0 + * Compare keys of current items + * If left < right treat left item as new, advance left tree + * and repeat + * If left > right treat right item as deleted, advance right tree + * and repeat + * If left == right do deep compare of items, treat as changed if + * needed, advance both trees and repeat + * If both trees are at the same level but not at level 0 + * Compare keys of current nodes/leafs + * If left < right advance left tree and repeat + * If left > right advance right tree and repeat + * If left == right compare blockptrs of the next nodes/leafs + * If they match advance both trees but stay at the same level + * and repeat + * If they don't match advance both trees while allowing to go + * deeper and repeat + * If tree levels are different + * Advance the tree that needs it and repeat + * + * Advancing a tree means: + * If we are at level 0, try to go to the next slot. If that's not + * possible, go one level up and repeat. Stop when we found a level + * where we could go to the next slot. We may at this point be on a + * node or a leaf. + * + * If we are not at level 0 and not on shared tree blocks, go one + * level deeper. + * + * If we are not at level 0 and on shared tree blocks, go one slot to + * the right if possible or go up and right. + */ + + down_read(&fs_info->commit_root_sem); + left_level = btrfs_header_level(left_root->commit_root); + left_root_level = left_level; + left_path->nodes[left_level] = + btrfs_clone_extent_buffer(left_root->commit_root); + if (!left_path->nodes[left_level]) { + up_read(&fs_info->commit_root_sem); + ret = -ENOMEM; + goto out; + } + + right_level = btrfs_header_level(right_root->commit_root); + right_root_level = right_level; + right_path->nodes[right_level] = + btrfs_clone_extent_buffer(right_root->commit_root); + if (!right_path->nodes[right_level]) { + up_read(&fs_info->commit_root_sem); + ret = -ENOMEM; + goto out; + } + up_read(&fs_info->commit_root_sem); + + if (left_level == 0) + btrfs_item_key_to_cpu(left_path->nodes[left_level], + &left_key, left_path->slots[left_level]); + else + btrfs_node_key_to_cpu(left_path->nodes[left_level], + &left_key, left_path->slots[left_level]); + if (right_level == 0) + btrfs_item_key_to_cpu(right_path->nodes[right_level], + &right_key, right_path->slots[right_level]); + else + btrfs_node_key_to_cpu(right_path->nodes[right_level], + &right_key, right_path->slots[right_level]); + + left_end_reached = right_end_reached = 0; + advance_left = advance_right = 0; + + while (1) { + if (advance_left && !left_end_reached) { + ret = tree_advance(left_path, &left_level, + left_root_level, + advance_left != ADVANCE_ONLY_NEXT, + &left_key); + if (ret == -1) + left_end_reached = ADVANCE; + else if (ret < 0) + goto out; + advance_left = 0; + } + if (advance_right && !right_end_reached) { + ret = tree_advance(right_path, &right_level, + right_root_level, + advance_right != ADVANCE_ONLY_NEXT, + &right_key); + if (ret == -1) + right_end_reached = ADVANCE; + else if (ret < 0) + goto out; + advance_right = 0; + } + + if (left_end_reached && right_end_reached) { + ret = 0; + goto out; + } else if (left_end_reached) { + if (right_level == 0) { + ret = changed_cb(left_path, right_path, + &right_key, + BTRFS_COMPARE_TREE_DELETED, + ctx); + if (ret < 0) + goto out; + } + advance_right = ADVANCE; + continue; + } else if (right_end_reached) { + if (left_level == 0) { + ret = changed_cb(left_path, right_path, + &left_key, + BTRFS_COMPARE_TREE_NEW, + ctx); + if (ret < 0) + goto out; + } + advance_left = ADVANCE; + continue; + } + + if (left_level == 0 && right_level == 0) { + cmp = btrfs_comp_cpu_keys(&left_key, &right_key); + if (cmp < 0) { + ret = changed_cb(left_path, right_path, + &left_key, + BTRFS_COMPARE_TREE_NEW, + ctx); + if (ret < 0) + goto out; + advance_left = ADVANCE; + } else if (cmp > 0) { + ret = changed_cb(left_path, right_path, + &right_key, + BTRFS_COMPARE_TREE_DELETED, + ctx); + if (ret < 0) + goto out; + advance_right = ADVANCE; + } else { + enum btrfs_compare_tree_result result; + + WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); + ret = tree_compare_item(left_path, right_path, + tmp_buf); + if (ret) + result = BTRFS_COMPARE_TREE_CHANGED; + else + result = BTRFS_COMPARE_TREE_SAME; + ret = changed_cb(left_path, right_path, + &left_key, result, ctx); + if (ret < 0) + goto out; + advance_left = ADVANCE; + advance_right = ADVANCE; + } + } else if (left_level == right_level) { + cmp = btrfs_comp_cpu_keys(&left_key, &right_key); + if (cmp < 0) { + advance_left = ADVANCE; + } else if (cmp > 0) { + advance_right = ADVANCE; + } else { + left_blockptr = btrfs_node_blockptr( + left_path->nodes[left_level], + left_path->slots[left_level]); + right_blockptr = btrfs_node_blockptr( + right_path->nodes[right_level], + right_path->slots[right_level]); + left_gen = btrfs_node_ptr_generation( + left_path->nodes[left_level], + left_path->slots[left_level]); + right_gen = btrfs_node_ptr_generation( + right_path->nodes[right_level], + right_path->slots[right_level]); + if (left_blockptr == right_blockptr && + left_gen == right_gen) { + /* + * As we're on a shared block, don't + * allow to go deeper. + */ + advance_left = ADVANCE_ONLY_NEXT; + advance_right = ADVANCE_ONLY_NEXT; + } else { + advance_left = ADVANCE; + advance_right = ADVANCE; + } + } + } else if (left_level < right_level) { + advance_right = ADVANCE; + } else { + advance_left = ADVANCE; + } + } + +out: + btrfs_free_path(left_path); + btrfs_free_path(right_path); + kvfree(tmp_buf); + return ret; +} + static int send_subvol(struct send_ctx *sctx) { int ret; From 67b61aefcef3842a360e6c603860a785fd971c7a Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 21 Aug 2019 19:57:04 +0200 Subject: [PATCH 111/138] btrfs: move struct io_ctl to free-space-cache.h The io_ctl structure is used for free space management, and used only by the v1 space cache code, but unfortunatlly the full definition is required by block-group.h so it can't be moved to free-space-cache.c without additional changes. Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/block-group.h | 2 ++ fs/btrfs/ctree.h | 14 -------------- fs/btrfs/free-space-cache.h | 14 +++++++++++++- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h index 5c6e2fb23e35..c391800388dd 100644 --- a/fs/btrfs/block-group.h +++ b/fs/btrfs/block-group.h @@ -3,6 +3,8 @@ #ifndef BTRFS_BLOCK_GROUP_H #define BTRFS_BLOCK_GROUP_H +#include "free-space-cache.h" + enum btrfs_disk_cache_state { BTRFS_DC_WRITTEN, BTRFS_DC_ERROR, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 17cd88521ad2..0b6eca746fd4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -434,20 +434,6 @@ enum btrfs_caching_type { BTRFS_CACHE_ERROR, }; -struct btrfs_io_ctl { - void *cur, *orig; - struct page *page; - struct page **pages; - struct btrfs_fs_info *fs_info; - struct inode *inode; - unsigned long size; - int index; - int num_pages; - int entries; - int bitmaps; - unsigned check_crcs:1; -}; - /* * Tree to record all locked full stripes of a RAID5/6 block group */ diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 2205a4113ef3..39c32c8fc24f 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -36,7 +36,19 @@ struct btrfs_free_space_op { struct btrfs_free_space *info); }; -struct btrfs_io_ctl; +struct btrfs_io_ctl { + void *cur, *orig; + struct page *page; + struct page **pages; + struct btrfs_fs_info *fs_info; + struct inode *inode; + unsigned long size; + int index; + int num_pages; + int entries; + int bitmaps; + unsigned check_crcs:1; +}; struct inode *lookup_free_space_inode( struct btrfs_block_group_cache *block_group, From 1dc990dfd31096176788312d39e1275645901fcb Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 21 Aug 2019 20:05:32 +0200 Subject: [PATCH 112/138] btrfs: move dev_stats helpers to volumes.c The other dev stats functions are already there and the helpers are not used by anything else. Reviewed-by: Johannes Thumshirn Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 24 ------------------------ fs/btrfs/volumes.c | 23 +++++++++++++++++++++++ 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0b6eca746fd4..ba34f7b435a2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2258,30 +2258,6 @@ static inline u32 btrfs_file_extent_inline_item_len( return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; } -/* btrfs_dev_stats_item */ -static inline u64 btrfs_dev_stats_value(const struct extent_buffer *eb, - const struct btrfs_dev_stats_item *ptr, - int index) -{ - u64 val; - - read_extent_buffer(eb, &val, - offsetof(struct btrfs_dev_stats_item, values) + - ((unsigned long)ptr) + (index * sizeof(u64)), - sizeof(val)); - return val; -} - -static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, - struct btrfs_dev_stats_item *ptr, - int index, u64 val) -{ - write_extent_buffer(eb, &val, - offsetof(struct btrfs_dev_stats_item, values) + - ((unsigned long)ptr) + (index * sizeof(u64)), - sizeof(val)); -} - /* btrfs_qgroup_status_item */ BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item, generation, 64); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 02976c174f32..a324480bc88b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7285,6 +7285,29 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) } } +static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, + const struct btrfs_dev_stats_item *ptr, + int index) +{ + u64 val; + + read_extent_buffer(eb, &val, + offsetof(struct btrfs_dev_stats_item, values) + + ((unsigned long)ptr) + (index * sizeof(u64)), + sizeof(val)); + return val; +} + +static void btrfs_set_dev_stats_value(struct extent_buffer *eb, + struct btrfs_dev_stats_item *ptr, + int index, u64 val) +{ + write_extent_buffer(eb, &val, + offsetof(struct btrfs_dev_stats_item, values) + + ((unsigned long)ptr) + (index * sizeof(u64)), + sizeof(val)); +} + int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) { struct btrfs_key key; From 9bb8407f54f63242d822d6c57f4edb7d1ae2b901 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 27 Aug 2019 14:46:28 +0300 Subject: [PATCH 113/138] btrfs: Make btrfs_find_name_in_backref return btrfs_inode_ref struct btrfs_find_name_in_backref returns either 0/1 depending on whether it found a backref for the given name. If it returns true then the actual inode_ref struct is returned in one of its parameters. That's pointless, instead refactor the function such that it returns either a pointer to the btrfs_inode_ref or NULL it it didn't find anything. This streamlines the function calling convention. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 6 +++--- fs/btrfs/inode-item.c | 29 ++++++++++++++--------------- fs/btrfs/tree-log.c | 8 ++++---- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ba34f7b435a2..31ed47d195c2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2800,9 +2800,9 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, u64 inode_objectid, u64 ref_objectid, int ins_len, int cow); -int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot, - const char *name, - int name_len, struct btrfs_inode_ref **ref_ret); +struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, + int slot, const char *name, + int name_len); int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot, u64 ref_objectid, const char *name, int name_len, diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 30d62ef918b9..e13cfdcc1cd1 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -8,9 +8,9 @@ #include "transaction.h" #include "print-tree.h" -int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot, - const char *name, - int name_len, struct btrfs_inode_ref **ref_ret) +struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, + int slot, const char *name, + int name_len) { struct btrfs_inode_ref *ref; unsigned long ptr; @@ -28,13 +28,10 @@ int btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot, cur_offset += len + sizeof(*ref); if (len != name_len) continue; - if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) { - if (ref_ret) - *ref_ret = ref; - return 1; - } + if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) + return ref; } - return 0; + return NULL; } int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot, @@ -213,8 +210,10 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, } else if (ret < 0) { goto out; } - if (!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], - name, name_len, &ref)) { + + ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name, + name_len); + if (!ref) { ret = -ENOENT; search_ext_refs = 1; goto out; @@ -341,9 +340,9 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, ins_len); if (ret == -EEXIST) { u32 old_size; - - if (btrfs_find_name_in_backref(path->nodes[0], path->slots[0], - name, name_len, &ref)) + ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], + name, name_len); + if (ref) goto out; old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); @@ -359,7 +358,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, if (ret == -EOVERFLOW) { if (btrfs_find_name_in_backref(path->nodes[0], path->slots[0], - name, name_len, &ref)) + name, name_len)) ret = -EEXIST; else ret = -EMLINK; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 34d087008b72..7a7ae33bea51 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1271,8 +1271,8 @@ static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, parent_id, name, namelen, NULL); else - ret = btrfs_find_name_in_backref(log_eb, log_slot, name, - namelen, NULL); + ret = !!btrfs_find_name_in_backref(log_eb, log_slot, + name, namelen); if (!ret) { struct inode *dir; @@ -1338,8 +1338,8 @@ static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir, path->slots[0], parent_id, name, namelen, NULL); else - ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], - name, namelen, NULL); + ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], + name, namelen); out: btrfs_free_path(path); From 6ff49c6ad285160b8ba48589ebdbf6cebdd42f74 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Tue, 27 Aug 2019 14:46:29 +0300 Subject: [PATCH 114/138] btrfs: Make btrfs_find_name_in_ext_backref return struct btrfs_inode_extref btrfs_find_name_in_ext_backref returns either 0/1 depending on whether it found a backref for the given name. If it returns true then the actual inode_ref struct is returned in one of its parameters. That's pointless, instead refactor the function such that it returns either a pointer to the btrfs_inode_extref or NULL it it didn't find anything. This streamlines the function calling convention. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 8 +++----- fs/btrfs/inode-item.c | 33 +++++++++++++-------------------- fs/btrfs/tree-log.c | 13 ++++++------- 3 files changed, 22 insertions(+), 32 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 31ed47d195c2..292e21b23217 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2803,11 +2803,9 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, int slot, const char *name, int name_len); -int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot, - u64 ref_objectid, const char *name, - int name_len, - struct btrfs_inode_extref **extref_ret); - +struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( + struct extent_buffer *leaf, int slot, u64 ref_objectid, + const char *name, int name_len); /* file-item.c */ struct btrfs_dio_private; int btrfs_del_csums(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index e13cfdcc1cd1..668701832845 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -34,10 +34,9 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, return NULL; } -int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot, - u64 ref_objectid, - const char *name, int name_len, - struct btrfs_inode_extref **extref_ret) +struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( + struct extent_buffer *leaf, int slot, u64 ref_objectid, + const char *name, int name_len) { struct btrfs_inode_extref *extref; unsigned long ptr; @@ -62,15 +61,12 @@ int btrfs_find_name_in_ext_backref(struct extent_buffer *leaf, int slot, if (ref_name_len == name_len && btrfs_inode_extref_parent(leaf, extref) == ref_objectid && - (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)) { - if (extref_ret) - *extref_ret = extref; - return 1; - } + (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)) + return extref; cur_offset += ref_name_len + sizeof(*extref); } - return 0; + return NULL; } /* Returns NULL if no extref found */ @@ -84,7 +80,6 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, { int ret; struct btrfs_key key; - struct btrfs_inode_extref *extref; key.objectid = inode_objectid; key.type = BTRFS_INODE_EXTREF_KEY; @@ -95,11 +90,9 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, return ERR_PTR(ret); if (ret > 0) return NULL; - if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], - ref_objectid, name, name_len, - &extref)) - return NULL; - return extref; + return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], + ref_objectid, name, name_len); + } static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, @@ -139,9 +132,9 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, * This should always succeed so error here will make the FS * readonly. */ - if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], - ref_objectid, - name, name_len, &extref)) { + extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], + ref_objectid, name, name_len); + if (!extref) { btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL); ret = -EROFS; goto out; @@ -284,7 +277,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans, if (btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], ref_objectid, - name, name_len, NULL)) + name, name_len)) goto out; btrfs_extend_item(path, ins_len); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 7a7ae33bea51..7a0e3f8dec5c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -968,7 +968,7 @@ static noinline int backref_in_log(struct btrfs_root *log, if (btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], ref_objectid, - name, namelen, NULL)) + name, namelen)) match = 1; goto out; @@ -1267,9 +1267,9 @@ static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, goto out; if (key->type == BTRFS_INODE_EXTREF_KEY) - ret = btrfs_find_name_in_ext_backref(log_eb, log_slot, - parent_id, name, - namelen, NULL); + ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot, + parent_id, name, + namelen); else ret = !!btrfs_find_name_in_backref(log_eb, log_slot, name, namelen); @@ -1334,9 +1334,8 @@ static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir, goto out; } if (key.type == BTRFS_INODE_EXTREF_KEY) - ret = btrfs_find_name_in_ext_backref(path->nodes[0], - path->slots[0], parent_id, - name, namelen, NULL); + ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], + path->slots[0], parent_id, name, namelen); else ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name, namelen); From cb49511328dcce73840a54661622950d7fa6384e Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 9 Aug 2019 17:12:38 +0200 Subject: [PATCH 115/138] btrfs: define separate btrfs_set/get_XX helpers There are helpers for all type widths defined via macro and optionally can use a token which is a cached pointer to avoid repeated mapping of the extent buffer. The token value is known at compile time, when it's valid it's always address of a local variable, otherwise it's NULL passed by the token-less helpers. This can be utilized to remove some branching as the helpers are used frequenlty. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 15 ++++-------- fs/btrfs/struct-funcs.c | 51 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 292e21b23217..8e18fb062215 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1335,17 +1335,10 @@ u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \ unsigned long off, u##bits val, \ struct btrfs_map_token *token); \ -static inline u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ - const void *ptr, \ - unsigned long off) \ -{ \ - return btrfs_get_token_##bits(eb, ptr, off, NULL); \ -} \ -static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr,\ - unsigned long off, u##bits val) \ -{ \ - btrfs_set_token_##bits(eb, ptr, off, val, NULL); \ -} +u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ + const void *ptr, unsigned long off); \ +void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ + unsigned long off, u##bits val); DECLARE_BTRFS_SETGET_BITS(8) DECLARE_BTRFS_SETGET_BITS(16) diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index 4c13b737f568..e63936e4c1e0 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -33,6 +33,8 @@ static inline void put_unaligned_le8(u8 val, void *p) * * The extent buffer api is used to do the page spanning work required to * have a metadata blocksize different from the page size. + * + * There are 2 variants defined, one with a token pointer and one without. */ #define DEFINE_BTRFS_SETGET_BITS(bits) \ @@ -75,6 +77,31 @@ u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ } \ return res; \ } \ +u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ + const void *ptr, unsigned long off) \ +{ \ + unsigned long part_offset = (unsigned long)ptr; \ + unsigned long offset = part_offset + off; \ + void *p; \ + int err; \ + char *kaddr; \ + unsigned long map_start; \ + unsigned long map_len; \ + int size = sizeof(u##bits); \ + u##bits res; \ + \ + err = map_private_extent_buffer(eb, offset, size, \ + &kaddr, &map_start, &map_len); \ + if (err) { \ + __le##bits leres; \ + \ + read_extent_buffer(eb, &leres, offset, size); \ + return le##bits##_to_cpu(leres); \ + } \ + p = kaddr + part_offset - map_start; \ + res = get_unaligned_le##bits(p + off); \ + return res; \ +} \ void btrfs_set_token_##bits(struct extent_buffer *eb, \ const void *ptr, unsigned long off, \ u##bits val, \ @@ -113,6 +140,30 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \ token->offset = map_start; \ token->eb = eb; \ } \ +} \ +void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ + unsigned long off, u##bits val) \ +{ \ + unsigned long part_offset = (unsigned long)ptr; \ + unsigned long offset = part_offset + off; \ + void *p; \ + int err; \ + char *kaddr; \ + unsigned long map_start; \ + unsigned long map_len; \ + int size = sizeof(u##bits); \ + \ + err = map_private_extent_buffer(eb, offset, size, \ + &kaddr, &map_start, &map_len); \ + if (err) { \ + __le##bits val2; \ + \ + val2 = cpu_to_le##bits(val); \ + write_extent_buffer(eb, &val2, offset, size); \ + return; \ + } \ + p = kaddr + part_offset - map_start; \ + put_unaligned_le##bits(val, p + off); \ } DEFINE_BTRFS_SETGET_BITS(8) From 48bc39501a549ec978c8aad28eb89ca3a2a7ca03 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 9 Aug 2019 17:30:23 +0200 Subject: [PATCH 116/138] btrfs: assume valid token for btrfs_set/get_token helpers Now that we can safely assume that the token is always a valid pointer, remove the branches that check that. Signed-off-by: David Sterba --- fs/btrfs/struct-funcs.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index e63936e4c1e0..3a29b911d2e2 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -52,7 +52,9 @@ u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ int size = sizeof(u##bits); \ u##bits res; \ \ - if (token && token->kaddr && token->offset <= offset && \ + ASSERT(token); \ + \ + if (token->kaddr && token->offset <= offset && \ token->eb == eb && \ (token->offset + PAGE_SIZE >= offset + size)) { \ kaddr = token->kaddr; \ @@ -70,11 +72,9 @@ u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ } \ p = kaddr + part_offset - map_start; \ res = get_unaligned_le##bits(p + off); \ - if (token) { \ - token->kaddr = kaddr; \ - token->offset = map_start; \ - token->eb = eb; \ - } \ + token->kaddr = kaddr; \ + token->offset = map_start; \ + token->eb = eb; \ return res; \ } \ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ @@ -116,7 +116,9 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \ unsigned long map_len; \ int size = sizeof(u##bits); \ \ - if (token && token->kaddr && token->offset <= offset && \ + ASSERT(token); \ + \ + if (token->kaddr && token->offset <= offset && \ token->eb == eb && \ (token->offset + PAGE_SIZE >= offset + size)) { \ kaddr = token->kaddr; \ @@ -135,11 +137,9 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \ } \ p = kaddr + part_offset - map_start; \ put_unaligned_le##bits(val, p + off); \ - if (token) { \ - token->kaddr = kaddr; \ - token->offset = map_start; \ - token->eb = eb; \ - } \ + token->kaddr = kaddr; \ + token->offset = map_start; \ + token->eb = eb; \ } \ void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ unsigned long off, u##bits val) \ From c82f823c9b006c31059341af41da9f8b2e3e64d9 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 9 Aug 2019 17:48:21 +0200 Subject: [PATCH 117/138] btrfs: tie extent buffer and it's token together Further simplifaction of the get/set helpers is possible when the token is uniquely tied to an extent buffer. A condition and an assignment can be avoided. The initializations are moved closer to the first use when the extent buffer is valid. There's one exception in __push_leaf_left where the token is reused. Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 27 +++++++++++---------------- fs/btrfs/ctree.h | 4 +++- fs/btrfs/inode.c | 2 +- fs/btrfs/struct-funcs.c | 6 ++---- fs/btrfs/tree-log.c | 7 +++---- 5 files changed, 20 insertions(+), 26 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index fbf94e28fba8..88c3b338508d 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3574,7 +3574,7 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr) if (!nr) return 0; - btrfs_init_map_token(&token); + btrfs_init_map_token(&token, l); start_item = btrfs_item_nr(start); end_item = btrfs_item_nr(end); data_len = btrfs_token_item_offset(l, start_item, &token) + @@ -3632,8 +3632,6 @@ static noinline int __push_leaf_right(struct btrfs_path *path, u32 data_end; u32 this_item_size; - btrfs_init_map_token(&token); - if (empty) nr = 0; else @@ -3706,6 +3704,7 @@ static noinline int __push_leaf_right(struct btrfs_path *path, push_items * sizeof(struct btrfs_item)); /* update the item pointers */ + btrfs_init_map_token(&token, right); right_nritems += push_items; btrfs_set_header_nritems(right, right_nritems); push_space = BTRFS_LEAF_DATA_SIZE(fs_info); @@ -3860,8 +3859,6 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, u32 old_left_item_size; struct btrfs_map_token token; - btrfs_init_map_token(&token); - if (empty) nr = min(right_nritems, max_slot); else @@ -3915,6 +3912,7 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, old_left_nritems = btrfs_header_nritems(left); BUG_ON(old_left_nritems <= 0); + btrfs_init_map_token(&token, left); old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { u32 ioff; @@ -3946,6 +3944,8 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, (btrfs_header_nritems(right) - push_items) * sizeof(struct btrfs_item)); } + + btrfs_init_map_token(&token, right); right_nritems -= push_items; btrfs_set_header_nritems(right, right_nritems); push_space = BTRFS_LEAF_DATA_SIZE(fs_info); @@ -4076,8 +4076,6 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans, struct btrfs_disk_key disk_key; struct btrfs_map_token token; - btrfs_init_map_token(&token); - nritems = nritems - mid; btrfs_set_header_nritems(right, nritems); data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l); @@ -4093,6 +4091,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans, rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid); + btrfs_init_map_token(&token, right); for (i = 0; i < nritems; i++) { struct btrfs_item *item = btrfs_item_nr(i); u32 ioff; @@ -4576,8 +4575,6 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) int i; struct btrfs_map_token token; - btrfs_init_map_token(&token); - leaf = path->nodes[0]; slot = path->slots[0]; @@ -4599,6 +4596,7 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ + btrfs_init_map_token(&token, leaf); for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(i); @@ -4673,8 +4671,6 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size) int i; struct btrfs_map_token token; - btrfs_init_map_token(&token); - leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); @@ -4699,6 +4695,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size) * item0..itemN ... dataN.offset..dataN.size .. data0.size */ /* first correct the data pointers */ + btrfs_init_map_token(&token, leaf); for (i = slot; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(i); @@ -4750,8 +4747,6 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, } btrfs_unlock_up_safe(path, 1); - btrfs_init_map_token(&token); - leaf = path->nodes[0]; slot = path->slots[0]; @@ -4765,6 +4760,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, BUG(); } + btrfs_init_map_token(&token, leaf); if (slot != nritems) { unsigned int old_data = btrfs_item_end_nr(leaf, slot); @@ -4971,9 +4967,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, int wret; int i; u32 nritems; - struct btrfs_map_token token; - - btrfs_init_map_token(&token); leaf = path->nodes[0]; last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); @@ -4985,12 +4978,14 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (slot + nr != nritems) { int data_end = leaf_data_end(leaf); + struct btrfs_map_token token; memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET + data_end + dsize, BTRFS_LEAF_DATA_OFFSET + data_end, last_off - data_end); + btrfs_init_map_token(&token, leaf); for (i = slot + nr; i < nritems; i++) { u32 ioff; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8e18fb062215..033a0d5d1789 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1303,8 +1303,10 @@ struct btrfs_map_token { #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \ ((bytes) >> (fs_info)->sb->s_blocksize_bits) -static inline void btrfs_init_map_token (struct btrfs_map_token *token) +static inline void btrfs_init_map_token(struct btrfs_map_token *token, + struct extent_buffer *eb) { + token->eb = eb; token->kaddr = NULL; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 07f77c7e6b22..e0e940fe01df 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3890,7 +3890,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, { struct btrfs_map_token token; - btrfs_init_map_token(&token); + btrfs_init_map_token(&token, leaf); btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index 3a29b911d2e2..73f7987143df 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -53,9 +53,9 @@ u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ u##bits res; \ \ ASSERT(token); \ + ASSERT(token->eb == eb); \ \ if (token->kaddr && token->offset <= offset && \ - token->eb == eb && \ (token->offset + PAGE_SIZE >= offset + size)) { \ kaddr = token->kaddr; \ p = kaddr + part_offset - token->offset; \ @@ -74,7 +74,6 @@ u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ res = get_unaligned_le##bits(p + off); \ token->kaddr = kaddr; \ token->offset = map_start; \ - token->eb = eb; \ return res; \ } \ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ @@ -117,9 +116,9 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \ int size = sizeof(u##bits); \ \ ASSERT(token); \ + ASSERT(token->eb == eb); \ \ if (token->kaddr && token->offset <= offset && \ - token->eb == eb && \ (token->offset + PAGE_SIZE >= offset + size)) { \ kaddr = token->kaddr; \ p = kaddr + part_offset - token->offset; \ @@ -139,7 +138,6 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \ put_unaligned_le##bits(val, p + off); \ token->kaddr = kaddr; \ token->offset = map_start; \ - token->eb = eb; \ } \ void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ unsigned long off, u##bits val) \ diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 7a0e3f8dec5c..77b6797fcac3 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -506,7 +506,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, ino_size != 0) { struct btrfs_map_token token; - btrfs_init_map_token(&token); + btrfs_init_map_token(&token, dst_eb); btrfs_set_token_inode_size(dst_eb, dst_item, ino_size, &token); } @@ -3842,7 +3842,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, { struct btrfs_map_token token; - btrfs_init_map_token(&token); + btrfs_init_map_token(&token, leaf); if (log_inode_only) { /* set the generation to zero so the recover code @@ -4302,8 +4302,6 @@ static int log_one_extent(struct btrfs_trans_handle *trans, if (ret) return ret; - btrfs_init_map_token(&token); - ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start, em->start + em->len, NULL, 0, 1, sizeof(*fi), &extent_inserted); @@ -4321,6 +4319,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, return ret; } leaf = path->nodes[0]; + btrfs_init_map_token(&token, leaf); fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); From c09767a8960ca0500fb636bf73686723337debf4 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Thu, 15 Aug 2019 14:04:02 -0700 Subject: [PATCH 118/138] btrfs: use correct count in btrfs_file_write_iter() generic_write_checks() may modify iov_iter_count(), so we must get the count after the call, not before. Using the wrong one has a couple of consequences: 1. We check a longer range in check_can_nocow() for nowait than we're actually writing. 2. We create extra hole extent maps in btrfs_cont_expand(). As far as I can tell, this is harmless, but I might be missing something. These issues are pretty minor, but let's fix it before something more important trips on it. Fixes: edf064e7c6fe ("btrfs: nowait aio support") Reviewed-by: Josef Bacik Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/file.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 1cb694c96500..25df5b03b591 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1885,7 +1885,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host); ssize_t err; loff_t pos; - size_t count = iov_iter_count(from); + size_t count; loff_t oldsize; int clean_page = 0; @@ -1906,6 +1906,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, } pos = iocb->ki_pos; + count = iov_iter_count(from); if (iocb->ki_flags & IOCB_NOWAIT) { /* * We will allocate space in case nodatacow is not set, From f50cb7aff9645998b3269eac903a5e01cd6ba689 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Thu, 15 Aug 2019 14:04:03 -0700 Subject: [PATCH 119/138] btrfs: treat RWF_{,D}SYNC writes as sync for CRCs The VFS indicates a synchronous write to ->write_iter() via iocb->ki_flags. The IOCB_{,D}SYNC flags may be set based on the file (see iocb_flags()) or the RWF_* flags passed to a syscall like pwritev2() (see kiocb_set_rw_flags()). However, in btrfs_file_write_iter(), we're checking if a write is synchronous based only on the file; we use this to decide when to bump the sync_writers counter and thus do CRCs synchronously. Make sure we do this for all synchronous writes as determined by the VFS. Reviewed-by: Josef Bacik Signed-off-by: Omar Sandoval Reviewed-by: David Sterba [ add const ] Signed-off-by: David Sterba --- fs/btrfs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 25df5b03b591..702c30a28a43 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1882,7 +1882,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, u64 start_pos; u64 end_pos; ssize_t num_written = 0; - bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host); + const bool sync = iocb->ki_flags & IOCB_DSYNC; ssize_t err; loff_t pos; size_t count; From e182163d9cbe86bc0f754068628df55e6dc073d3 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Thu, 15 Aug 2019 14:04:04 -0700 Subject: [PATCH 120/138] btrfs: stop clearing EXTENT_DIRTY in inode I/O tree Since commit fee187d9d9dd ("Btrfs: do not set EXTENT_DIRTY along with EXTENT_DELALLOC"), we never set EXTENT_DIRTY in inode->io_tree, so we can simplify and stop trying to clear it. Reviewed-by: Josef Bacik Signed-off-by: Omar Sandoval Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 6 ++---- fs/btrfs/file.c | 4 ++-- fs/btrfs/free-space-cache.c | 9 ++++---- fs/btrfs/inode.c | 41 ++++++++++++++---------------------- fs/btrfs/ioctl.c | 5 ++--- fs/btrfs/tests/inode-tests.c | 12 ++++------- 6 files changed, 30 insertions(+), 47 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index bac59d721b54..4dc5e6939856 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4322,10 +4322,8 @@ int extent_invalidatepage(struct extent_io_tree *tree, lock_extent_bits(tree, start, end, &cached_state); wait_on_page_writeback(page); - clear_extent_bit(tree, start, end, - EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, - 1, 1, &cached_state); + clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 1, 1, &cached_state); return 0; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 702c30a28a43..8fe4eb7e5045 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -537,8 +537,8 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, * we can set things up properly */ clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached); + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, + 0, 0, cached); if (!btrfs_is_free_space_inode(BTRFS_I(inode))) { if (start_pos >= isize && diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ab806d82fe12..d54dcd0ab230 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1006,7 +1006,7 @@ update_cache_item(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) { clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, - EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL); + EXTENT_DELALLOC, 0, 0, NULL); goto fail; } leaf = path->nodes[0]; @@ -1018,9 +1018,8 @@ update_cache_item(struct btrfs_trans_handle *trans, if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || found_key.offset != offset) { clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, - inode->i_size - 1, - EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, - NULL); + inode->i_size - 1, EXTENT_DELALLOC, 0, + 0, NULL); btrfs_release_path(path); goto fail; } @@ -1116,7 +1115,7 @@ static int flush_dirty_cache(struct inode *inode) ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); if (ret) clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, - EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL); + EXTENT_DELALLOC, 0, 0, NULL); return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e0e940fe01df..a0546401bc0a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4986,9 +4986,8 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, } clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, - 0, 0, &cached_state); + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, + 0, 0, &cached_state); ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0, &cached_state); @@ -5372,9 +5371,9 @@ static void evict_inode_truncate_pages(struct inode *inode) btrfs_qgroup_free_data(inode, NULL, start, end - start + 1); clear_extent_bit(io_tree, start, end, - EXTENT_LOCKED | EXTENT_DIRTY | - EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | - EXTENT_DEFRAG, 1, 1, &cached_state); + EXTENT_LOCKED | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, + &cached_state); cond_resched(); spin_lock(&io_tree->lock); @@ -7732,12 +7731,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, u64 start = iblock << inode->i_blkbits; u64 lockstart, lockend; u64 len = bh_result->b_size; - int unlock_bits = EXTENT_LOCKED; int ret = 0; - if (create) - unlock_bits |= EXTENT_DIRTY; - else + if (!create) len = min_t(u64, len, fs_info->sectorsize); lockstart = start; @@ -7796,9 +7792,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, if (ret < 0) goto unlock_err; - /* clear and unlock the entire range */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, - unlock_bits, 1, 0, &cached_state); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, + lockend, &cached_state); } else { ret = btrfs_get_blocks_direct_read(em, bh_result, inode, start, len); @@ -7814,9 +7809,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, */ lockstart = start + bh_result->b_size; if (lockstart < lockend) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockend, unlock_bits, 1, 0, - &cached_state); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + lockstart, lockend, &cached_state); } else { free_extent_state(cached_state); } @@ -7827,8 +7821,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, return 0; unlock_err: - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, - unlock_bits, 1, 0, &cached_state); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, + &cached_state); err: if (dio_data) current->journal_info = dio_data; @@ -8843,8 +8837,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, */ if (!inode_evicting) clear_extent_bit(tree, start, end, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DELALLOC_NEW | + EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 0, &cached_state); /* @@ -8899,8 +8892,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, if (PageDirty(page)) btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); if (!inode_evicting) { - clear_extent_bit(tree, page_start, page_end, - EXTENT_LOCKED | EXTENT_DIRTY | + clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, &cached_state); @@ -9028,9 +9020,8 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) * reserve data&meta space before lock_page() (see above comments). */ clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, - 0, 0, &cached_state); + EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | + EXTENT_DEFRAG, 0, 0, &cached_state); ret2 = btrfs_set_extent_delalloc(inode, page_start, end, 0, &cached_state); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5942615be398..de730e56d3f5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1333,9 +1333,8 @@ static int cluster_pages_for_defrag(struct inode *inode, lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state); clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, - page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, - &cached_state); + page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | + EXTENT_DEFRAG, 0, 0, &cached_state); if (i_done != page_cnt) { spin_lock(&BTRFS_I(inode)->lock); diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index b363fb990cec..09ecf7dc7b08 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -988,8 +988,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, BTRFS_MAX_EXTENT_SIZE >> 1, (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, - EXTENT_DELALLOC | EXTENT_DIRTY | - EXTENT_UPTODATE, 0, 0, NULL); + EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; @@ -1056,8 +1055,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, BTRFS_MAX_EXTENT_SIZE + sectorsize, BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_UPTODATE, 0, 0, NULL); + EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; @@ -1089,8 +1087,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) /* Empty */ ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_UPTODATE, 0, 0, NULL); + EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL); if (ret) { test_err("clear_extent_bit returned %d", ret); goto out; @@ -1105,8 +1102,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize) out: if (ret) clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, - EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_UPTODATE, 0, 0, NULL); + EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL); iput(inode); btrfs_free_dummy_root(root); btrfs_free_dummy_fs_info(fs_info); From ef1317a1b9a347cdc3967a2048046e8fb4db94ba Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:10:54 -0400 Subject: [PATCH 121/138] btrfs: do not allow reservations if we have pending tickets If we already have tickets on the list we don't want to steal their reservations. This is a preparation patch for upcoming changes, technically this shouldn't happen today because of the way we add bytes to tickets before adding them to the space_info in most cases. This does not change the FIFO nature of reserve tickets, it simply allows us to enforce it in a different way. Previously it was enforced because any new space would be added to the first ticket on the list, which would result in new reservations getting a reserve ticket. This replaces that mechanism by simply checking to see if we have outstanding reserve tickets and skipping straight to adding a ticket for our reservation. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index bea7ae0a9739..918bffb6ba30 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -993,6 +993,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, struct reserve_ticket ticket; u64 used; int ret = 0; + bool pending_tickets; ASSERT(orig_bytes); ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); @@ -1000,14 +1001,17 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, spin_lock(&space_info->lock); ret = -ENOSPC; used = btrfs_space_info_used(space_info, true); + pending_tickets = !list_empty(&space_info->tickets) || + !list_empty(&space_info->priority_tickets); /* * Carry on if we have enough space (short-circuit) OR call * can_overcommit() to ensure we can overcommit to continue. */ - if ((used + orig_bytes <= space_info->total_bytes) || - can_overcommit(fs_info, space_info, orig_bytes, flush, - system_chunk)) { + if (!pending_tickets && + ((used + orig_bytes <= space_info->total_bytes) || + can_overcommit(fs_info, space_info, orig_bytes, flush, + system_chunk))) { btrfs_space_info_update_bytes_may_use(fs_info, space_info, orig_bytes); trace_btrfs_space_reservation(fs_info, "space_info", From f3e75e3805e1da4f7812f731b5396430fa3f2a08 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:10:55 -0400 Subject: [PATCH 122/138] btrfs: roll tracepoint into btrfs_space_info_update helper We duplicate this tracepoint everywhere we call these helpers, so update the helper to have the tracepoint as well. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 3 --- fs/btrfs/block-rsv.c | 5 ----- fs/btrfs/delalloc-space.c | 4 ---- fs/btrfs/extent-tree.c | 9 --------- fs/btrfs/space-info.c | 10 ---------- fs/btrfs/space-info.h | 10 +++++++--- 6 files changed, 7 insertions(+), 34 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 9a09f459337b..d61aa94884f0 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2696,9 +2696,6 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - trace_btrfs_space_reservation(info, "pinned", - cache->space_info->flags, - num_bytes, 1); percpu_counter_add_batch( &cache->space_info->total_bytes_pinned, num_bytes, diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index ef8b8ae27386..c808f93b743a 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -283,16 +283,11 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) block_rsv->reserved += num_bytes; btrfs_space_info_update_bytes_may_use(fs_info, sinfo, num_bytes); - trace_btrfs_space_reservation(fs_info, "space_info", - sinfo->flags, num_bytes, - 1); } } else if (block_rsv->reserved > block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; btrfs_space_info_update_bytes_may_use(fs_info, sinfo, -num_bytes); - trace_btrfs_space_reservation(fs_info, "space_info", - sinfo->flags, num_bytes, 0); block_rsv->reserved = block_rsv->size; } diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c index 391dcb217098..d949d7d2abed 100644 --- a/fs/btrfs/delalloc-space.c +++ b/fs/btrfs/delalloc-space.c @@ -130,8 +130,6 @@ int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes) return -ENOSPC; } btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, bytes); - trace_btrfs_space_reservation(fs_info, "space_info", - data_sinfo->flags, bytes, 1); spin_unlock(&data_sinfo->lock); return 0; @@ -183,8 +181,6 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, data_sinfo = fs_info->data_sinfo; spin_lock(&data_sinfo->lock); btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, -len); - trace_btrfs_space_reservation(fs_info, "space_info", - data_sinfo->flags, len, 0); spin_unlock(&data_sinfo->lock); } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2bf5dad82bf1..7d7e91f329ce 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2583,8 +2583,6 @@ static int pin_down_extent(struct btrfs_block_group_cache *cache, spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - trace_btrfs_space_reservation(fs_info, "pinned", - cache->space_info->flags, num_bytes, 1); percpu_counter_add_batch(&cache->space_info->total_bytes_pinned, num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH); set_extent_dirty(fs_info->pinned_extents, bytenr, @@ -2842,9 +2840,6 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, spin_lock(&cache->lock); cache->pinned -= len; btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len); - - trace_btrfs_space_reservation(fs_info, "pinned", - space_info->flags, len, 0); space_info->max_extent_size = 0; percpu_counter_add_batch(&space_info->total_bytes_pinned, -len, BTRFS_TOTAL_BYTES_PINNED_BATCH); @@ -2866,10 +2861,6 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, space_info, to_add); if (global_rsv->reserved >= global_rsv->size) global_rsv->full = 1; - trace_btrfs_space_reservation(fs_info, - "space_info", - space_info->flags, - to_add, 1); len -= to_add; } spin_unlock(&global_rsv->lock); diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 918bffb6ba30..2aa0b2040ac3 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -279,8 +279,6 @@ void btrfs_space_info_add_old_bytes(struct btrfs_fs_info *fs_info, goto again; } btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes); - trace_btrfs_space_reservation(fs_info, "space_info", - space_info->flags, num_bytes, 0); spin_unlock(&space_info->lock); } @@ -301,9 +299,6 @@ void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info, ticket = list_first_entry(head, struct reserve_ticket, list); if (num_bytes >= ticket->bytes) { - trace_btrfs_space_reservation(fs_info, "space_info", - space_info->flags, - ticket->bytes, 1); list_del_init(&ticket->list); num_bytes -= ticket->bytes; btrfs_space_info_update_bytes_may_use(fs_info, @@ -313,9 +308,6 @@ void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info, space_info->tickets_id++; wake_up(&ticket->wait); } else { - trace_btrfs_space_reservation(fs_info, "space_info", - space_info->flags, - num_bytes, 1); btrfs_space_info_update_bytes_may_use(fs_info, space_info, num_bytes); @@ -1014,8 +1006,6 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, system_chunk))) { btrfs_space_info_update_bytes_may_use(fs_info, space_info, orig_bytes); - trace_btrfs_space_reservation(fs_info, "space_info", - space_info->flags, orig_bytes, 1); ret = 0; } diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index c2b54b8e1a14..66d73c290983 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -87,14 +87,18 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) * * Declare a helper function to detect underflow of various space info members */ -#define DECLARE_SPACE_INFO_UPDATE(name) \ +#define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \ static inline void \ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \ struct btrfs_space_info *sinfo, \ s64 bytes) \ { \ + const u64 abs_bytes = (bytes < 0) ? -bytes : bytes; \ lockdep_assert_held(&sinfo->lock); \ trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \ + trace_btrfs_space_reservation(fs_info, trace_name, \ + sinfo->flags, abs_bytes, \ + bytes > 0); \ if (bytes < 0 && sinfo->name < -bytes) { \ WARN_ON(1); \ sinfo->name = 0; \ @@ -103,8 +107,8 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \ sinfo->name += bytes; \ } -DECLARE_SPACE_INFO_UPDATE(bytes_may_use); -DECLARE_SPACE_INFO_UPDATE(bytes_pinned); +DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info"); +DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned"); void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, From a43c383574d80503f3ba9cd08de16dad2590ca1b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:10:56 -0400 Subject: [PATCH 123/138] btrfs: add space reservation tracepoint for reserved bytes I noticed when folding the trace_btrfs_space_reservation() tracepoint into the btrfs_space_info_update_* helpers that we didn't emit a tracepoint when doing btrfs_add_reserved_bytes(). I know this is because we were swapping bytes_may_use for bytes_reserved, so in my mind there was no reason to have the tracepoint there. But now there is because we always emit the unreserve for the bytes_may_use side, and this would have broken if compression was on anyway. Add a tracepoint to cover the bytes_reserved counter so the math still comes out right. Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index d61aa94884f0..bf7e3f23bba7 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2758,6 +2758,8 @@ int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, } else { cache->reserved += num_bytes; space_info->bytes_reserved += num_bytes; + trace_btrfs_space_reservation(cache->fs_info, "space_info", + space_info->flags, num_bytes, 1); btrfs_space_info_update_bytes_may_use(cache->fs_info, space_info, -ram_bytes); if (delalloc) From 91182645075f9a41953bea703a7d10e9f661cd13 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 28 Aug 2019 11:15:24 -0400 Subject: [PATCH 124/138] btrfs: stop partially refilling tickets when releasing space btrfs_space_info_add_old_bytes is used when adding the extra space from an existing reservation back into the space_info to be used by any waiting tickets. In order to keep us from overcommitting we check to make sure that we can still use this space for our reserve ticket, and if we cannot we'll simply subtract it from space_info->bytes_may_use. However this is problematic, because it assumes that only changes to bytes_may_use would affect our ability to make reservations. Any changes to bytes_reserved would be missed. If we were unable to make a reservation prior because of reserved space, but that reserved space was free'd due to unlink or truncate and we were allowed to immediately reclaim that metadata space we would still ENOSPC. Consider the example where we create a file with a bunch of extents, using up 2MiB of actual space for the new tree blocks. Then we try to make a reservation of 2MiB but we do not have enough space to make this reservation. The iput() occurs in another thread and we remove this space, and since we did not write the blocks we simply do space_info->bytes_reserved -= 2MiB. We would never see this because we do not check our space info used, we just try to re-use the freed reservations. To fix this problem, and to greatly simplify the wakeup code, do away with this partial refilling nonsense. Use btrfs_space_info_add_old_bytes to subtract the reservation from space_info->bytes_may_use, and then check the ticket against the total used of the space_info the same way we do with the initial reservation attempt. This keeps the reservation logic consistent and solves the problem of early ENOSPC in the case that we free up space in places other than bytes_may_use and bytes_pinned. Thanks, Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 43 ++++++++++++++++--------------------------- 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 2aa0b2040ac3..71fdc50b5900 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -233,52 +233,41 @@ void btrfs_space_info_add_old_bytes(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 num_bytes) { - struct reserve_ticket *ticket; struct list_head *head; - u64 used; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; - bool check_overcommit = false; spin_lock(&space_info->lock); head = &space_info->priority_tickets; + btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes); - /* - * If we are over our limit then we need to check and see if we can - * overcommit, and if we can't then we just need to free up our space - * and not satisfy any requests. - */ - used = btrfs_space_info_used(space_info, true); - if (used - num_bytes >= space_info->total_bytes) - check_overcommit = true; again: - while (!list_empty(head) && num_bytes) { - ticket = list_first_entry(head, struct reserve_ticket, - list); - /* - * We use 0 bytes because this space is already reserved, so - * adding the ticket space would be a double count. - */ - if (check_overcommit && - !can_overcommit(fs_info, space_info, 0, flush, false)) - break; - if (num_bytes >= ticket->bytes) { + while (!list_empty(head)) { + struct reserve_ticket *ticket; + u64 used = btrfs_space_info_used(space_info, true); + + ticket = list_first_entry(head, struct reserve_ticket, list); + + /* Check and see if our ticket can be satisified now. */ + if ((used + ticket->bytes <= space_info->total_bytes) || + can_overcommit(fs_info, space_info, ticket->bytes, flush, + false)) { + btrfs_space_info_update_bytes_may_use(fs_info, + space_info, + ticket->bytes); list_del_init(&ticket->list); - num_bytes -= ticket->bytes; ticket->bytes = 0; space_info->tickets_id++; wake_up(&ticket->wait); } else { - ticket->bytes -= num_bytes; - num_bytes = 0; + break; } } - if (num_bytes && head == &space_info->priority_tickets) { + if (head == &space_info->priority_tickets) { head = &space_info->tickets; flush = BTRFS_RESERVE_FLUSH_ALL; goto again; } - btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes); spin_unlock(&space_info->lock); } From 18fa2284aac3f1071f51a897dc5585178cae458f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:10:58 -0400 Subject: [PATCH 125/138] btrfs: refactor the ticket wakeup code Now that btrfs_space_info_add_old_bytes simply checks if we can make the reservation and updates bytes_may_use, there's no reason to have both helpers in place. Factor out the ticket wakeup logic into it's own helper, make btrfs_space_info_add_old_bytes() update bytes_may_use and then call the wakeup helper, and replace all calls to btrfs_space_info_add_new_bytes() with the wakeup helper. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 4 +-- fs/btrfs/space-info.c | 55 ++++-------------------------------------- fs/btrfs/space-info.h | 19 ++++++++++----- 3 files changed, 20 insertions(+), 58 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7d7e91f329ce..49cb26fa7c63 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2866,8 +2866,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info, spin_unlock(&global_rsv->lock); /* Add to any tickets we may have */ if (len) - btrfs_space_info_add_new_bytes(fs_info, - space_info, len); + btrfs_try_granting_tickets(fs_info, + space_info); } spin_unlock(&space_info->lock); } diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 71fdc50b5900..fe3d7e30bfca 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -131,9 +131,7 @@ void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, found->bytes_readonly += bytes_readonly; if (total_bytes > 0) found->full = 0; - btrfs_space_info_add_new_bytes(info, found, - total_bytes - bytes_used - - bytes_readonly); + btrfs_try_granting_tickets(info, found); spin_unlock(&found->lock); *space_info = found; } @@ -229,17 +227,15 @@ static int can_overcommit(struct btrfs_fs_info *fs_info, * This is for space we already have accounted in space_info->bytes_may_use, so * basically when we're returning space from block_rsv's. */ -void btrfs_space_info_add_old_bytes(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - u64 num_bytes) +void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info) { struct list_head *head; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; - spin_lock(&space_info->lock); - head = &space_info->priority_tickets; - btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes); + lockdep_assert_held(&space_info->lock); + head = &space_info->priority_tickets; again: while (!list_empty(head)) { struct reserve_ticket *ticket; @@ -268,47 +264,6 @@ void btrfs_space_info_add_old_bytes(struct btrfs_fs_info *fs_info, flush = BTRFS_RESERVE_FLUSH_ALL; goto again; } - spin_unlock(&space_info->lock); -} - -/* - * This is for newly allocated space that isn't accounted in - * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent - * we use this helper. - */ -void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - u64 num_bytes) -{ - struct reserve_ticket *ticket; - struct list_head *head = &space_info->priority_tickets; - -again: - while (!list_empty(head) && num_bytes) { - ticket = list_first_entry(head, struct reserve_ticket, - list); - if (num_bytes >= ticket->bytes) { - list_del_init(&ticket->list); - num_bytes -= ticket->bytes; - btrfs_space_info_update_bytes_may_use(fs_info, - space_info, - ticket->bytes); - ticket->bytes = 0; - space_info->tickets_id++; - wake_up(&ticket->wait); - } else { - btrfs_space_info_update_bytes_may_use(fs_info, - space_info, - num_bytes); - ticket->bytes -= num_bytes; - num_bytes = 0; - } - } - - if (num_bytes && head == &space_info->priority_tickets) { - head = &space_info->tickets; - goto again; - } } #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index 66d73c290983..b844e50a6a49 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -110,12 +110,6 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \ DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info"); DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned"); -void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - u64 num_bytes); -void btrfs_space_info_add_old_bytes(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *space_info, - u64 num_bytes); int btrfs_init_space_info(struct btrfs_fs_info *fs_info); void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags, u64 total_bytes, u64 bytes_used, @@ -133,5 +127,18 @@ int btrfs_reserve_metadata_bytes(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 orig_bytes, enum btrfs_reserve_flush_enum flush); +void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info); + +static inline void btrfs_space_info_add_old_bytes( + struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info, + u64 num_bytes) +{ + spin_lock(&space_info->lock); + btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes); + btrfs_try_granting_tickets(fs_info, space_info); + spin_unlock(&space_info->lock); +} #endif /* BTRFS_SPACE_INFO_H */ From 2341ccd1bf05b3d844c10ad224cd38b06fd53219 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 28 Aug 2019 11:12:47 -0400 Subject: [PATCH 126/138] btrfs: rework wake_all_tickets Now that we no longer partially fill tickets we need to rework wake_all_tickets to call btrfs_try_to_wakeup_tickets() in order to see if any subsequent tickets are able to be satisfied. If our tickets_id changes we know something happened and we can keep flushing. Also if we find a ticket that is smaller than the first ticket in our queue then we want to retry the flushing loop again in case may_commit_transaction() decides we could satisfy the ticket by committing the transaction. Rename this to maybe_fail_all_tickets() while we're at it, to better reflect what the function is actually doing. Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 56 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 49 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index fe3d7e30bfca..eb4a926ea7b9 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -679,19 +679,61 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); } -static bool wake_all_tickets(struct list_head *head) +/* + * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets + * @fs_info - fs_info for this fs + * @space_info - the space info we were flushing + * + * We call this when we've exhausted our flushing ability and haven't made + * progress in satisfying tickets. The reservation code handles tickets in + * order, so if there is a large ticket first and then smaller ones we could + * very well satisfy the smaller tickets. This will attempt to wake up any + * tickets in the list to catch this case. + * + * This function returns true if it was able to make progress by clearing out + * other tickets, or if it stumbles across a ticket that was smaller than the + * first ticket. + */ +static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *space_info) { struct reserve_ticket *ticket; + u64 tickets_id = space_info->tickets_id; + u64 first_ticket_bytes = 0; + + while (!list_empty(&space_info->tickets) && + tickets_id == space_info->tickets_id) { + ticket = list_first_entry(&space_info->tickets, + struct reserve_ticket, list); + + /* + * may_commit_transaction will avoid committing the transaction + * if it doesn't feel like the space reclaimed by the commit + * would result in the ticket succeeding. However if we have a + * smaller ticket in the queue it may be small enough to be + * satisified by committing the transaction, so if any + * subsequent ticket is smaller than the first ticket go ahead + * and send us back for another loop through the enospc flushing + * code. + */ + if (first_ticket_bytes == 0) + first_ticket_bytes = ticket->bytes; + else if (first_ticket_bytes > ticket->bytes) + return true; - while (!list_empty(head)) { - ticket = list_first_entry(head, struct reserve_ticket, list); list_del_init(&ticket->list); ticket->error = -ENOSPC; wake_up(&ticket->wait); - if (ticket->bytes != ticket->orig_bytes) - return true; + + /* + * We're just throwing tickets away, so more flushing may not + * trip over btrfs_try_granting_tickets, so we need to call it + * here to see if we can make progress with the next ticket in + * the list. + */ + btrfs_try_granting_tickets(fs_info, space_info); } - return false; + return (tickets_id != space_info->tickets_id); } /* @@ -759,7 +801,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) if (flush_state > COMMIT_TRANS) { commit_cycles++; if (commit_cycles > 2) { - if (wake_all_tickets(&space_info->tickets)) { + if (maybe_fail_all_tickets(fs_info, space_info)) { flush_state = FLUSH_DELAYED_ITEMS_NR; commit_cycles--; } else { From 00c0135eb8b8172de48e1accbb09ebfa3aa8bf25 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:11:00 -0400 Subject: [PATCH 127/138] btrfs: fix may_commit_transaction to deal with no partial filling Now that we aren't partially filling tickets we may have some slack space left in the space_info. We need to account for this in may_commit_transaction, otherwise we may choose to not commit the transaction despite it actually having enough space to satisfy our ticket. Calculate the free space we have in the space_info, if any, and subtract this from the ticket we have and use that amount to determine if we will need to commit to reclaim enough space. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index eb4a926ea7b9..3ab7a4810bef 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -473,12 +473,19 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans; u64 bytes_needed; u64 reclaim_bytes = 0; + u64 cur_free_bytes = 0; trans = (struct btrfs_trans_handle *)current->journal_info; if (trans) return -EAGAIN; spin_lock(&space_info->lock); + cur_free_bytes = btrfs_space_info_used(space_info, true); + if (cur_free_bytes < space_info->total_bytes) + cur_free_bytes = space_info->total_bytes - cur_free_bytes; + else + cur_free_bytes = 0; + if (!list_empty(&space_info->priority_tickets)) ticket = list_first_entry(&space_info->priority_tickets, struct reserve_ticket, list); @@ -486,6 +493,11 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info, ticket = list_first_entry(&space_info->tickets, struct reserve_ticket, list); bytes_needed = (ticket) ? ticket->bytes : 0; + + if (bytes_needed > cur_free_bytes) + bytes_needed -= cur_free_bytes; + else + bytes_needed = 0; spin_unlock(&space_info->lock); if (!bytes_needed) From def936e53530321bc5454883fa5c7ffdaa635837 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:11:01 -0400 Subject: [PATCH 128/138] btrfs: remove orig_bytes from reserve_ticket Now that we do not do partial filling of tickets simply remove orig_bytes, it is no longer needed. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 8 -------- fs/btrfs/space-info.h | 1 - 2 files changed, 9 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 3ab7a4810bef..af53c3344598 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -920,7 +920,6 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, struct reserve_ticket *ticket, enum btrfs_reserve_flush_enum flush) { - u64 reclaim_bytes = 0; int ret; switch (flush) { @@ -945,17 +944,11 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, spin_lock(&space_info->lock); ret = ticket->error; if (ticket->bytes || ticket->error) { - if (ticket->bytes < ticket->orig_bytes) - reclaim_bytes = ticket->orig_bytes - ticket->bytes; list_del_init(&ticket->list); if (!ret) ret = -ENOSPC; } spin_unlock(&space_info->lock); - - if (reclaim_bytes) - btrfs_space_info_add_old_bytes(fs_info, space_info, - reclaim_bytes); ASSERT(list_empty(&ticket->list)); return ret; } @@ -1015,7 +1008,6 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info, * the list and we will do our own flushing further down. */ if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { - ticket.orig_bytes = orig_bytes; ticket.bytes = orig_bytes; ticket.error = 0; init_waitqueue_head(&ticket.wait); diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index b844e50a6a49..dc6ee66ed600 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -70,7 +70,6 @@ struct btrfs_space_info { }; struct reserve_ticket { - u64 orig_bytes; u64 bytes; int error; struct list_head list; From d05e46497f86175f85a4f0b9346cae2260e97c8d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:11:02 -0400 Subject: [PATCH 129/138] btrfs: rename btrfs_space_info_add_old_bytes This name doesn't really fit with how the space reservation stuff works now, rename it to btrfs_space_info_free_bytes_may_use so it's clear what the function is doing. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/block-rsv.c | 5 +++-- fs/btrfs/delayed-ref.c | 2 +- fs/btrfs/space-info.h | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index c808f93b743a..01b1ce2240bc 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -54,8 +54,9 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, spin_unlock(&dest->lock); } if (num_bytes) - btrfs_space_info_add_old_bytes(fs_info, space_info, - num_bytes); + btrfs_space_info_free_bytes_may_use(fs_info, + space_info, + num_bytes); } if (qgroup_to_release_ret) *qgroup_to_release_ret = qgroup_to_release; diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 951a60c740e7..df3bd880061d 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -158,7 +158,7 @@ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0, num_bytes, 1); if (to_free) - btrfs_space_info_add_old_bytes(fs_info, + btrfs_space_info_free_bytes_may_use(fs_info, delayed_refs_rsv->space_info, to_free); } diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h index dc6ee66ed600..8867e84aa33d 100644 --- a/fs/btrfs/space-info.h +++ b/fs/btrfs/space-info.h @@ -129,7 +129,7 @@ int btrfs_reserve_metadata_bytes(struct btrfs_root *root, void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info); -static inline void btrfs_space_info_add_old_bytes( +static inline void btrfs_space_info_free_bytes_may_use( struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 num_bytes) From 3593ce30b5b4a8fbe84dd1c275e9be01af28511a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:19:00 -0400 Subject: [PATCH 130/138] btrfs: change the minimum global reserve size It made sense to have the global reserve set at 16M in the past, but since it is used less nowadays set the minimum size to the number of items we'll need to update the main trees we update during a transaction commit, plus some slop area so we can do unlinks if we need to. In practice this doesn't affect normal file systems, but for xfstests where we do things like fill up a fs and then rm * it can fall over in weird ways. This enables us for more sane behavior at extremely small file system sizes. Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/block-rsv.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index 01b1ce2240bc..c8b7995c33ee 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -259,6 +259,7 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; struct btrfs_space_info *sinfo = block_rsv->space_info; u64 num_bytes; + unsigned min_items; /* * The global block rsv is based on the size of the extent tree, the @@ -268,7 +269,26 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) + btrfs_root_used(&fs_info->csum_root->root_item) + btrfs_root_used(&fs_info->tree_root->root_item); - num_bytes = max_t(u64, num_bytes, SZ_16M); + + /* + * We at a minimum are going to modify the csum root, the tree root, and + * the extent root. + */ + min_items = 3; + + /* + * But we also want to reserve enough space so we can do the fallback + * global reserve for an unlink, which is an additional 5 items (see the + * comment in __unlink_start_trans for what we're modifying.) + * + * But we also need space for the delayed ref updates from the unlink, + * so its 10, 5 for the actual operation, and 5 for the delayed ref + * updates. + */ + min_items += 10; + + num_bytes = max_t(u64, num_bytes, + btrfs_calc_insert_metadata_size(fs_info, min_items)); spin_lock(&sinfo->lock); spin_lock(&block_rsv->lock); From d792b0f197116b7093c119df29cb174ccfde94b9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:19:01 -0400 Subject: [PATCH 131/138] btrfs: always reserve our entire size for the global reserve While messing with the overcommit logic I noticed that sometimes we'd ENOSPC out when really we should have run out of space much earlier. It turns out it's because we'll only reserve up to the free amount left in the space info for the global reserve, but that doesn't make sense with overcommit because we could be well above our actual size. This results in the global reserve not carving out it's entire reservation, and thus not putting enough pressure on the rest of the infrastructure to do the right thing and ENOSPC out at a convenient time. Fix this by always taking our full reservation amount for the global reserve. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/block-rsv.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index c8b7995c33ee..bea66b499e6f 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -296,15 +296,10 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) block_rsv->size = min_t(u64, num_bytes, SZ_512M); if (block_rsv->reserved < block_rsv->size) { - num_bytes = btrfs_space_info_used(sinfo, true); - if (sinfo->total_bytes > num_bytes) { - num_bytes = sinfo->total_bytes - num_bytes; - num_bytes = min(num_bytes, - block_rsv->size - block_rsv->reserved); - block_rsv->reserved += num_bytes; - btrfs_space_info_update_bytes_may_use(fs_info, sinfo, - num_bytes); - } + num_bytes = block_rsv->size - block_rsv->reserved; + block_rsv->reserved += num_bytes; + btrfs_space_info_update_bytes_may_use(fs_info, sinfo, + num_bytes); } else if (block_rsv->reserved > block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; btrfs_space_info_update_bytes_may_use(fs_info, sinfo, From 426551f6866a369c045c77a16725f41a097fac99 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:19:02 -0400 Subject: [PATCH 132/138] btrfs: use btrfs_try_granting_tickets in update_global_rsv We have some annoying xfstests tests that will create a very small fs, fill it up, delete it, and repeat to make sure everything works right. This trips btrfs up sometimes because we may commit a transaction to free space, but most of the free metadata space was being reserved by the global reserve. So we commit and update the global reserve, but the space is simply added to bytes_may_use directly, instead of trying to add it to existing tickets. This results in ENOSPC when we really did have space. Fix this by calling btrfs_try_granting_tickets once we add back our excess space to wake any pending tickets. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/block-rsv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c index bea66b499e6f..d07bd41a7c1e 100644 --- a/fs/btrfs/block-rsv.c +++ b/fs/btrfs/block-rsv.c @@ -305,6 +305,7 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) btrfs_space_info_update_bytes_may_use(fs_info, sinfo, -num_bytes); block_rsv->reserved = block_rsv->size; + btrfs_try_granting_tickets(fs_info, sinfo); } if (block_rsv->reserved == block_rsv->size) From 0096420adb036b143d6c42ad7c02a945b3e1119c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:19:03 -0400 Subject: [PATCH 133/138] btrfs: do not account global reserve in can_overcommit We ran into a problem in production where a box with plenty of space was getting wedged doing ENOSPC flushing. These boxes only had 20% of the disk allocated, but their metadata space + global reserve was right at the size of their metadata chunk. In this case can_overcommit should be allowing allocations without problem, but there's logic in can_overcommit that doesn't allow us to overcommit if there's not enough real space to satisfy the global reserve. This is for historical reasons. Before there were only certain places we could allocate chunks. We could go to commit the transaction and not have enough space for our pending delayed refs and such and be unable to allocate a new chunk. This would result in a abort because of ENOSPC. This code was added to solve this problem. However since then we've gained the ability to always be able to allocate a chunk. So we can easily overcommit in these cases without risking a transaction abort because of ENOSPC. Also prior to now the global reserve really would be used because that's the space we relied on for delayed refs. With delayed refs being tracked separately we no longer have to worry about running out of delayed refs space while committing. We are much less likely to exhaust our global reserve space during transaction commit. Fix the can_overcommit code to simply see if our current usage + what we want is less than our current free space plus whatever slack space we have in the disk is. This solves the problem we were seeing in production and keeps us from flushing as aggressively as we approach our actual metadata size usage. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index af53c3344598..55f0c288b631 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -165,9 +165,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info, enum btrfs_reserve_flush_enum flush, bool system_chunk) { - struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; u64 profile; - u64 space_size; u64 avail; u64 used; int factor; @@ -181,22 +179,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info, else profile = btrfs_metadata_alloc_profile(fs_info); - used = btrfs_space_info_used(space_info, false); - - /* - * We only want to allow over committing if we have lots of actual space - * free, but if we don't have enough space to handle the global reserve - * space then we could end up having a real enospc problem when trying - * to allocate a chunk or some other such important allocation. - */ - spin_lock(&global_rsv->lock); - space_size = calc_global_rsv_need_space(global_rsv); - spin_unlock(&global_rsv->lock); - if (used + space_size >= space_info->total_bytes) - return 0; - - used += space_info->bytes_may_use; - + used = btrfs_space_info_used(space_info, true); avail = atomic64_read(&fs_info->free_chunk_space); /* From 84fe47a4be931d506670a1f126c477f5c17d3a76 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 22 Aug 2019 15:19:04 -0400 Subject: [PATCH 134/138] btrfs: add enospc debug messages for ticket failure When debugging weird enospc problems it's handy to be able to dump the space info when we wake up all tickets, and see what the ticket values are. This helped me figure out cases where we were enospc'ing when we shouldn't have been. Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/space-info.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index 55f0c288b631..98dc092a905e 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -258,14 +258,11 @@ do { \ spin_unlock(&__rsv->lock); \ } while (0) -void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, - struct btrfs_space_info *info, u64 bytes, - int dump_block_groups) +static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *info) { - struct btrfs_block_group_cache *cache; - int index = 0; + lockdep_assert_held(&info->lock); - spin_lock(&info->lock); btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", info->flags, info->total_bytes - btrfs_space_info_used(info, true), @@ -275,7 +272,6 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, info->total_bytes, info->bytes_used, info->bytes_pinned, info->bytes_reserved, info->bytes_may_use, info->bytes_readonly); - spin_unlock(&info->lock); DUMP_BLOCK_RSV(fs_info, global_block_rsv); DUMP_BLOCK_RSV(fs_info, trans_block_rsv); @@ -283,6 +279,19 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); +} + +void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *info, u64 bytes, + int dump_block_groups) +{ + struct btrfs_block_group_cache *cache; + int index = 0; + + spin_lock(&info->lock); + __btrfs_dump_space_info(fs_info, info); + spin_unlock(&info->lock); + if (!dump_block_groups) return; @@ -696,6 +705,11 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, u64 tickets_id = space_info->tickets_id; u64 first_ticket_bytes = 0; + if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { + btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); + __btrfs_dump_space_info(fs_info, space_info); + } + while (!list_empty(&space_info->tickets) && tickets_id == space_info->tickets_id) { ticket = list_first_entry(&space_info->tickets, @@ -716,6 +730,10 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, else if (first_ticket_bytes > ticket->bytes) return true; + if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) + btrfs_info(fs_info, "failing ticket with %llu bytes", + ticket->bytes); + list_del_init(&ticket->list); ticket->error = -ENOSPC; wake_up(&ticket->wait); From e35b79a1070d681b4842dad27b1edaf9811da7e9 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 30 Aug 2019 13:36:08 +0200 Subject: [PATCH 135/138] btrfs: turn checksum type define into an enum Turn the checksum type definition into a enum. This eases later addition of new checksums. Reviewed-by: Nikolay Borisov Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/uapi/linux/btrfs_tree.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 71246c1941aa..b65c7ee75bc7 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -300,7 +300,9 @@ #define BTRFS_CSUM_SIZE 32 /* csum types */ -#define BTRFS_CSUM_TYPE_CRC32 0 +enum btrfs_csum_type { + BTRFS_CSUM_TYPE_CRC32 = 0, +}; /* * flags definitions for directory entry item type From af024ed2e0e56f27279cdba4d27a23dbb7677e40 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 30 Aug 2019 13:36:09 +0200 Subject: [PATCH 136/138] btrfs: create structure to encode checksum type and length Create a structure to encode the type and length for the known on-disk checksums. This makes it easier to add new checksums later. The structure and helpers are moved from ctree.h so they don't occupy space in all headers including ctree.h. This save some space in the final object. Reviewed-by: Nikolay Borisov Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 22 ++++++++++++++++++++++ fs/btrfs/ctree.h | 20 ++------------------ 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 88c3b338508d..98f741c85905 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -29,6 +29,28 @@ static int balance_node_right(struct btrfs_trans_handle *trans, static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, int level, int slot); +static const struct btrfs_csums { + u16 size; + const char *name; +} btrfs_csums[] = { + [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, +}; + +int btrfs_super_csum_size(const struct btrfs_super_block *s) +{ + u16 t = btrfs_super_csum_type(s); + /* + * csum type is validated at mount time + */ + return btrfs_csums[t].size; +} + +const char *btrfs_super_csum_name(u16 csum_type) +{ + /* csum type is validated at mount time */ + return btrfs_csums[csum_type].name; +} + struct btrfs_path *btrfs_alloc_path(void) { return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 033a0d5d1789..19d669d12ca1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -83,10 +83,6 @@ struct btrfs_ref; */ #define BTRFS_LINK_MAX 65535U -/* four bytes for CRC32 */ -static const int btrfs_csum_sizes[] = { 4 }; -static const char *btrfs_csum_names[] = { "crc32c" }; - #define BTRFS_EMPTY_DIR_SIZE 0 /* ioprio of readahead is set to idle */ @@ -2167,20 +2163,8 @@ BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, uuid_tree_generation, 64); -static inline int btrfs_super_csum_size(const struct btrfs_super_block *s) -{ - u16 t = btrfs_super_csum_type(s); - /* - * csum type is validated at mount time - */ - return btrfs_csum_sizes[t]; -} - -static inline const char *btrfs_super_csum_name(u16 csum_type) -{ - /* csum type is validated at mount time */ - return btrfs_csum_names[csum_type]; -} +int btrfs_super_csum_size(const struct btrfs_super_block *s); +const char *btrfs_super_csum_name(u16 csum_type); /* * The leaf data grows from end-to-front in the node. From 65e99c43e9c2fee1a1f02c100154730fbeae9717 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 4 Sep 2019 20:22:39 +0300 Subject: [PATCH 137/138] btrfs: Don't assign retval of btrfs_try_tree_write_lock/btrfs_tree_read_lock_atomic Those function are simple boolean predicates there is no need to assign their return values to interim variables. Use them directly as predicates. No functional changes. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 98f741c85905..e59cde204b2f 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2913,15 +2913,13 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (!p->skip_locking) { level = btrfs_header_level(b); if (level <= write_lock_level) { - err = btrfs_try_tree_write_lock(b); - if (!err) { + if (!btrfs_try_tree_write_lock(b)) { btrfs_set_path_blocking(p); btrfs_tree_lock(b); } p->locks[level] = BTRFS_WRITE_LOCK; } else { - err = btrfs_tree_read_lock_atomic(b); - if (!err) { + if (!btrfs_tree_read_lock_atomic(b)) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); } @@ -3055,8 +3053,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, } level = btrfs_header_level(b); - err = btrfs_tree_read_lock_atomic(b); - if (!err) { + if (!btrfs_tree_read_lock_atomic(b)) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); } From 6af112b11a4bc1b560f60a618ac9c1dcefe9836e Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 4 Sep 2019 19:33:58 +0300 Subject: [PATCH 138/138] btrfs: Relinquish CPUs in btrfs_compare_trees When doing any form of incremental send the parent and the child trees need to be compared via btrfs_compare_trees. This can result in long loop chains without ever relinquishing the CPU. This causes softlockup detector to trigger when comparing trees with a lot of items. Example report: watchdog: BUG: soft lockup - CPU#0 stuck for 24s! [snapperd:16153] CPU: 0 PID: 16153 Comm: snapperd Not tainted 5.2.9-1-default #1 openSUSE Tumbleweed (unreleased) Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 pstate: 40000005 (nZcv daif -PAN -UAO) pc : __ll_sc_arch_atomic_sub_return+0x14/0x20 lr : btrfs_release_extent_buffer_pages+0xe0/0x1e8 [btrfs] sp : ffff00001273b7e0 Call trace: __ll_sc_arch_atomic_sub_return+0x14/0x20 release_extent_buffer+0xdc/0x120 [btrfs] free_extent_buffer.part.0+0xb0/0x118 [btrfs] free_extent_buffer+0x24/0x30 [btrfs] btrfs_release_path+0x4c/0xa0 [btrfs] btrfs_free_path.part.0+0x20/0x40 [btrfs] btrfs_free_path+0x24/0x30 [btrfs] get_inode_info+0xa8/0xf8 [btrfs] finish_inode_if_needed+0xe0/0x6d8 [btrfs] changed_cb+0x9c/0x410 [btrfs] btrfs_compare_trees+0x284/0x648 [btrfs] send_subvol+0x33c/0x520 [btrfs] btrfs_ioctl_send+0x8a0/0xaf0 [btrfs] btrfs_ioctl+0x199c/0x2288 [btrfs] do_vfs_ioctl+0x4b0/0x820 ksys_ioctl+0x84/0xb8 __arm64_sys_ioctl+0x28/0x38 el0_svc_common.constprop.0+0x7c/0x188 el0_svc_handler+0x34/0x90 el0_svc+0x8/0xc Fix this by adding a call to cond_resched at the beginning of the main loop in btrfs_compare_trees. Fixes: 7069830a9e38 ("Btrfs: add btrfs_compare_trees function") CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Johannes Thumshirn Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/send.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index f856d6ca3771..f3215028235c 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -6757,6 +6757,7 @@ static int btrfs_compare_trees(struct btrfs_root *left_root, advance_left = advance_right = 0; while (1) { + cond_resched(); if (advance_left && !left_end_reached) { ret = tree_advance(left_path, &left_level, left_root_level,