Btrfs: refactor caching_thread()

We're also going to load the free space tree from caching_thread(), so
we should refactor some of the common code.

Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Chris Mason <clm@fb.com>
This commit is contained in:
Omar Sandoval 2015-09-29 20:50:33 -07:00 committed by Chris Mason
parent 1abfbcdf56
commit 73fa48b674
2 changed files with 36 additions and 26 deletions

View File

@ -1262,6 +1262,9 @@ struct btrfs_caching_control {
atomic_t count;
};
/* Once caching_thread() finds this much free space, it will wake up waiters. */
#define CACHING_CTL_WAKE_UP (1024 * 1024 * 2)
struct btrfs_io_ctl {
void *cur, *orig;
struct page *page;

View File

@ -375,11 +375,10 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
return total_added;
}
static noinline void caching_thread(struct btrfs_work *work)
static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_fs_info *fs_info;
struct btrfs_caching_control *caching_ctl;
struct btrfs_root *extent_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
@ -387,16 +386,15 @@ static noinline void caching_thread(struct btrfs_work *work)
u64 total_found = 0;
u64 last = 0;
u32 nritems;
int ret = -ENOMEM;
int ret;
caching_ctl = container_of(work, struct btrfs_caching_control, work);
block_group = caching_ctl->block_group;
fs_info = block_group->fs_info;
extent_root = fs_info->extent_root;
path = btrfs_alloc_path();
if (!path)
goto out;
return -ENOMEM;
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
@ -413,15 +411,11 @@ static noinline void caching_thread(struct btrfs_work *work)
key.objectid = last;
key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY;
again:
mutex_lock(&caching_ctl->mutex);
/* need to make sure the commit_root doesn't disappear */
down_read(&fs_info->commit_root_sem);
next:
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0)
goto err;
goto out;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
@ -446,12 +440,14 @@ static noinline void caching_thread(struct btrfs_work *work)
up_read(&fs_info->commit_root_sem);
mutex_unlock(&caching_ctl->mutex);
cond_resched();
goto again;
mutex_lock(&caching_ctl->mutex);
down_read(&fs_info->commit_root_sem);
goto next;
}
ret = btrfs_next_leaf(extent_root, path);
if (ret < 0)
goto err;
goto out;
if (ret)
break;
leaf = path->nodes[0];
@ -489,7 +485,7 @@ static noinline void caching_thread(struct btrfs_work *work)
else
last = key.objectid + key.offset;
if (total_found > (1024 * 1024 * 2)) {
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
wake_up(&caching_ctl->wait);
}
@ -503,25 +499,36 @@ static noinline void caching_thread(struct btrfs_work *work)
block_group->key.offset);
caching_ctl->progress = (u64)-1;
out:
btrfs_free_path(path);
return ret;
}
static noinline void caching_thread(struct btrfs_work *work)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_fs_info *fs_info;
struct btrfs_caching_control *caching_ctl;
int ret;
caching_ctl = container_of(work, struct btrfs_caching_control, work);
block_group = caching_ctl->block_group;
fs_info = block_group->fs_info;
mutex_lock(&caching_ctl->mutex);
down_read(&fs_info->commit_root_sem);
ret = load_extent_tree_free(caching_ctl);
spin_lock(&block_group->lock);
block_group->caching_ctl = NULL;
block_group->cached = BTRFS_CACHE_FINISHED;
block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
spin_unlock(&block_group->lock);
err:
btrfs_free_path(path);
up_read(&fs_info->commit_root_sem);
free_excluded_extents(extent_root, block_group);
free_excluded_extents(fs_info->extent_root, block_group);
mutex_unlock(&caching_ctl->mutex);
out:
if (ret) {
spin_lock(&block_group->lock);
block_group->caching_ctl = NULL;
block_group->cached = BTRFS_CACHE_ERROR;
spin_unlock(&block_group->lock);
}
wake_up(&caching_ctl->wait);
put_caching_control(caching_ctl);