Btrfs: take into account total references when doing backref lookup

I added an optimization for large files where we would stop searching for
backrefs once we had looked at the number of references we currently had for
this extent.  This works great most of the time, but for snapshots that point to
this extent and has changes in the original root this assumption falls on it
face.  So keep track of any delayed ref mods made and add in the actual ref
count as reported by the extent item and use that to limit how far down an inode
we'll search for extents.  Thanks,

Reportedy-by: Hugo Mills <hugo@carfax.org.uk>
Signed-off-by: Josef Bacik <jbacik@fb.com>
Reported-by: Hugo Mills <hugo@carfax.org.uk>
Tested-by: Hugo Mills <hugo@carfax.org.uk>
Signed-off-by: Chris Mason <clm@fb.com>
This commit is contained in:
Josef Bacik 2014-03-19 13:35:14 -04:00 committed by Chris Mason
parent bfa7e1f8be
commit 4485386853
1 changed files with 18 additions and 11 deletions

View File

@ -220,7 +220,8 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
struct ulist *parents, struct __prelim_ref *ref,
int level, u64 time_seq, const u64 *extent_item_pos)
int level, u64 time_seq, const u64 *extent_item_pos,
u64 total_refs)
{
int ret = 0;
int slot;
@ -249,7 +250,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
ret = btrfs_next_old_leaf(root, path, time_seq);
while (!ret && count < ref->count) {
while (!ret && count < total_refs) {
eb = path->nodes[0];
slot = path->slots[0];
@ -306,7 +307,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 time_seq,
struct __prelim_ref *ref,
struct ulist *parents,
const u64 *extent_item_pos)
const u64 *extent_item_pos, u64 total_refs)
{
struct btrfs_root *root;
struct btrfs_key root_key;
@ -361,7 +362,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
}
ret = add_all_parents(root, path, parents, ref, level, time_seq,
extent_item_pos);
extent_item_pos, total_refs);
out:
path->lowest_level = 0;
btrfs_release_path(path);
@ -374,7 +375,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 time_seq,
struct list_head *head,
const u64 *extent_item_pos)
const u64 *extent_item_pos, u64 total_refs)
{
int err;
int ret = 0;
@ -400,7 +401,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
if (ref->count == 0)
continue;
err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
parents, extent_item_pos);
parents, extent_item_pos,
total_refs);
/*
* we can only tolerate ENOENT,otherwise,we should catch error
* and return directly.
@ -557,7 +559,7 @@ static void __merge_refs(struct list_head *head, int mode)
* smaller or equal that seq to the list
*/
static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
struct list_head *prefs)
struct list_head *prefs, u64 *total_refs)
{
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
struct rb_node *n = &head->node.rb_node;
@ -593,6 +595,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
default:
BUG_ON(1);
}
*total_refs += (node->ref_mod * sgn);
switch (node->type) {
case BTRFS_TREE_BLOCK_REF_KEY: {
struct btrfs_delayed_tree_ref *ref;
@ -653,7 +656,8 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
*/
static int __add_inline_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 bytenr,
int *info_level, struct list_head *prefs)
int *info_level, struct list_head *prefs,
u64 *total_refs)
{
int ret = 0;
int slot;
@ -677,6 +681,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
flags = btrfs_extent_flags(leaf, ei);
*total_refs += btrfs_extent_refs(leaf, ei);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
ptr = (unsigned long)(ei + 1);
@ -859,6 +864,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct list_head prefs;
struct __prelim_ref *ref;
struct extent_inode_elem *eie = NULL;
u64 total_refs = 0;
INIT_LIST_HEAD(&prefs);
INIT_LIST_HEAD(&prefs_delayed);
@ -917,7 +923,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
}
spin_unlock(&delayed_refs->lock);
ret = __add_delayed_refs(head, time_seq,
&prefs_delayed);
&prefs_delayed, &total_refs);
mutex_unlock(&head->mutex);
if (ret)
goto out;
@ -938,7 +944,8 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
(key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY)) {
ret = __add_inline_refs(fs_info, path, bytenr,
&info_level, &prefs);
&info_level, &prefs,
&total_refs);
if (ret)
goto out;
ret = __add_keyed_refs(fs_info, path, bytenr,
@ -958,7 +965,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
__merge_refs(&prefs, 1);
ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
extent_item_pos);
extent_item_pos, total_refs);
if (ret)
goto out;