btrfs: migrate the block group space accounting helpers
We can now easily migrate this code as well. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
ade4b5169f
commit
606d1bf10d
|
@ -2519,3 +2519,178 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
|
|||
btrfs_free_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
|
||||
u64 bytenr, u64 num_bytes, int alloc)
|
||||
{
|
||||
struct btrfs_fs_info *info = trans->fs_info;
|
||||
struct btrfs_block_group_cache *cache = NULL;
|
||||
u64 total = num_bytes;
|
||||
u64 old_val;
|
||||
u64 byte_in_group;
|
||||
int factor;
|
||||
int ret = 0;
|
||||
|
||||
/* Block accounting for super block */
|
||||
spin_lock(&info->delalloc_root_lock);
|
||||
old_val = btrfs_super_bytes_used(info->super_copy);
|
||||
if (alloc)
|
||||
old_val += num_bytes;
|
||||
else
|
||||
old_val -= num_bytes;
|
||||
btrfs_set_super_bytes_used(info->super_copy, old_val);
|
||||
spin_unlock(&info->delalloc_root_lock);
|
||||
|
||||
while (total) {
|
||||
cache = btrfs_lookup_block_group(info, bytenr);
|
||||
if (!cache) {
|
||||
ret = -ENOENT;
|
||||
break;
|
||||
}
|
||||
factor = btrfs_bg_type_to_factor(cache->flags);
|
||||
|
||||
/*
|
||||
* If this block group has free space cache written out, we
|
||||
* need to make sure to load it if we are removing space. This
|
||||
* is because we need the unpinning stage to actually add the
|
||||
* space back to the block group, otherwise we will leak space.
|
||||
*/
|
||||
if (!alloc && cache->cached == BTRFS_CACHE_NO)
|
||||
btrfs_cache_block_group(cache, 1);
|
||||
|
||||
byte_in_group = bytenr - cache->key.objectid;
|
||||
WARN_ON(byte_in_group > cache->key.offset);
|
||||
|
||||
spin_lock(&cache->space_info->lock);
|
||||
spin_lock(&cache->lock);
|
||||
|
||||
if (btrfs_test_opt(info, SPACE_CACHE) &&
|
||||
cache->disk_cache_state < BTRFS_DC_CLEAR)
|
||||
cache->disk_cache_state = BTRFS_DC_CLEAR;
|
||||
|
||||
old_val = btrfs_block_group_used(&cache->item);
|
||||
num_bytes = min(total, cache->key.offset - byte_in_group);
|
||||
if (alloc) {
|
||||
old_val += num_bytes;
|
||||
btrfs_set_block_group_used(&cache->item, old_val);
|
||||
cache->reserved -= num_bytes;
|
||||
cache->space_info->bytes_reserved -= num_bytes;
|
||||
cache->space_info->bytes_used += num_bytes;
|
||||
cache->space_info->disk_used += num_bytes * factor;
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&cache->space_info->lock);
|
||||
} else {
|
||||
old_val -= num_bytes;
|
||||
btrfs_set_block_group_used(&cache->item, old_val);
|
||||
cache->pinned += num_bytes;
|
||||
btrfs_space_info_update_bytes_pinned(info,
|
||||
cache->space_info, num_bytes);
|
||||
cache->space_info->bytes_used -= num_bytes;
|
||||
cache->space_info->disk_used -= num_bytes * factor;
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&cache->space_info->lock);
|
||||
|
||||
trace_btrfs_space_reservation(info, "pinned",
|
||||
cache->space_info->flags,
|
||||
num_bytes, 1);
|
||||
percpu_counter_add_batch(
|
||||
&cache->space_info->total_bytes_pinned,
|
||||
num_bytes,
|
||||
BTRFS_TOTAL_BYTES_PINNED_BATCH);
|
||||
set_extent_dirty(info->pinned_extents,
|
||||
bytenr, bytenr + num_bytes - 1,
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
}
|
||||
|
||||
spin_lock(&trans->transaction->dirty_bgs_lock);
|
||||
if (list_empty(&cache->dirty_list)) {
|
||||
list_add_tail(&cache->dirty_list,
|
||||
&trans->transaction->dirty_bgs);
|
||||
trans->delayed_ref_updates++;
|
||||
btrfs_get_block_group(cache);
|
||||
}
|
||||
spin_unlock(&trans->transaction->dirty_bgs_lock);
|
||||
|
||||
/*
|
||||
* No longer have used bytes in this block group, queue it for
|
||||
* deletion. We do this after adding the block group to the
|
||||
* dirty list to avoid races between cleaner kthread and space
|
||||
* cache writeout.
|
||||
*/
|
||||
if (!alloc && old_val == 0)
|
||||
btrfs_mark_bg_unused(cache);
|
||||
|
||||
btrfs_put_block_group(cache);
|
||||
total -= num_bytes;
|
||||
bytenr += num_bytes;
|
||||
}
|
||||
|
||||
/* Modified block groups are accounted for in the delayed_refs_rsv. */
|
||||
btrfs_update_delayed_refs_rsv(trans);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_add_reserved_bytes - update the block_group and space info counters
|
||||
* @cache: The cache we are manipulating
|
||||
* @ram_bytes: The number of bytes of file content, and will be same to
|
||||
* @num_bytes except for the compress path.
|
||||
* @num_bytes: The number of bytes in question
|
||||
* @delalloc: The blocks are allocated for the delalloc write
|
||||
*
|
||||
* This is called by the allocator when it reserves space. If this is a
|
||||
* reservation and the block group has become read only we cannot make the
|
||||
* reservation and return -EAGAIN, otherwise this function always succeeds.
|
||||
*/
|
||||
int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
|
||||
u64 ram_bytes, u64 num_bytes, int delalloc)
|
||||
{
|
||||
struct btrfs_space_info *space_info = cache->space_info;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&space_info->lock);
|
||||
spin_lock(&cache->lock);
|
||||
if (cache->ro) {
|
||||
ret = -EAGAIN;
|
||||
} else {
|
||||
cache->reserved += num_bytes;
|
||||
space_info->bytes_reserved += num_bytes;
|
||||
btrfs_space_info_update_bytes_may_use(cache->fs_info,
|
||||
space_info, -ram_bytes);
|
||||
if (delalloc)
|
||||
cache->delalloc_bytes += num_bytes;
|
||||
}
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&space_info->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_free_reserved_bytes - update the block_group and space info counters
|
||||
* @cache: The cache we are manipulating
|
||||
* @num_bytes: The number of bytes in question
|
||||
* @delalloc: The blocks are allocated for the delalloc write
|
||||
*
|
||||
* This is called by somebody who is freeing space that was never actually used
|
||||
* on disk. For example if you reserve some space for a new leaf in transaction
|
||||
* A and before transaction A commits you free that leaf, you call this with
|
||||
* reserve set to 0 in order to clear the reservation.
|
||||
*/
|
||||
void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
|
||||
u64 num_bytes, int delalloc)
|
||||
{
|
||||
struct btrfs_space_info *space_info = cache->space_info;
|
||||
|
||||
spin_lock(&space_info->lock);
|
||||
spin_lock(&cache->lock);
|
||||
if (cache->ro)
|
||||
space_info->bytes_readonly += num_bytes;
|
||||
cache->reserved -= num_bytes;
|
||||
space_info->bytes_reserved -= num_bytes;
|
||||
space_info->max_extent_size = 0;
|
||||
|
||||
if (delalloc)
|
||||
cache->delalloc_bytes -= num_bytes;
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&space_info->lock);
|
||||
}
|
||||
|
|
|
@ -2898,115 +2898,6 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_update_block_group(struct btrfs_trans_handle *trans,
|
||||
u64 bytenr, u64 num_bytes, int alloc)
|
||||
{
|
||||
struct btrfs_fs_info *info = trans->fs_info;
|
||||
struct btrfs_block_group_cache *cache = NULL;
|
||||
u64 total = num_bytes;
|
||||
u64 old_val;
|
||||
u64 byte_in_group;
|
||||
int factor;
|
||||
int ret = 0;
|
||||
|
||||
/* block accounting for super block */
|
||||
spin_lock(&info->delalloc_root_lock);
|
||||
old_val = btrfs_super_bytes_used(info->super_copy);
|
||||
if (alloc)
|
||||
old_val += num_bytes;
|
||||
else
|
||||
old_val -= num_bytes;
|
||||
btrfs_set_super_bytes_used(info->super_copy, old_val);
|
||||
spin_unlock(&info->delalloc_root_lock);
|
||||
|
||||
while (total) {
|
||||
cache = btrfs_lookup_block_group(info, bytenr);
|
||||
if (!cache) {
|
||||
ret = -ENOENT;
|
||||
break;
|
||||
}
|
||||
factor = btrfs_bg_type_to_factor(cache->flags);
|
||||
|
||||
/*
|
||||
* If this block group has free space cache written out, we
|
||||
* need to make sure to load it if we are removing space. This
|
||||
* is because we need the unpinning stage to actually add the
|
||||
* space back to the block group, otherwise we will leak space.
|
||||
*/
|
||||
if (!alloc && cache->cached == BTRFS_CACHE_NO)
|
||||
btrfs_cache_block_group(cache, 1);
|
||||
|
||||
byte_in_group = bytenr - cache->key.objectid;
|
||||
WARN_ON(byte_in_group > cache->key.offset);
|
||||
|
||||
spin_lock(&cache->space_info->lock);
|
||||
spin_lock(&cache->lock);
|
||||
|
||||
if (btrfs_test_opt(info, SPACE_CACHE) &&
|
||||
cache->disk_cache_state < BTRFS_DC_CLEAR)
|
||||
cache->disk_cache_state = BTRFS_DC_CLEAR;
|
||||
|
||||
old_val = btrfs_block_group_used(&cache->item);
|
||||
num_bytes = min(total, cache->key.offset - byte_in_group);
|
||||
if (alloc) {
|
||||
old_val += num_bytes;
|
||||
btrfs_set_block_group_used(&cache->item, old_val);
|
||||
cache->reserved -= num_bytes;
|
||||
cache->space_info->bytes_reserved -= num_bytes;
|
||||
cache->space_info->bytes_used += num_bytes;
|
||||
cache->space_info->disk_used += num_bytes * factor;
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&cache->space_info->lock);
|
||||
} else {
|
||||
old_val -= num_bytes;
|
||||
btrfs_set_block_group_used(&cache->item, old_val);
|
||||
cache->pinned += num_bytes;
|
||||
btrfs_space_info_update_bytes_pinned(info,
|
||||
cache->space_info, num_bytes);
|
||||
cache->space_info->bytes_used -= num_bytes;
|
||||
cache->space_info->disk_used -= num_bytes * factor;
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&cache->space_info->lock);
|
||||
|
||||
trace_btrfs_space_reservation(info, "pinned",
|
||||
cache->space_info->flags,
|
||||
num_bytes, 1);
|
||||
percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
|
||||
num_bytes,
|
||||
BTRFS_TOTAL_BYTES_PINNED_BATCH);
|
||||
set_extent_dirty(info->pinned_extents,
|
||||
bytenr, bytenr + num_bytes - 1,
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
}
|
||||
|
||||
spin_lock(&trans->transaction->dirty_bgs_lock);
|
||||
if (list_empty(&cache->dirty_list)) {
|
||||
list_add_tail(&cache->dirty_list,
|
||||
&trans->transaction->dirty_bgs);
|
||||
trans->delayed_ref_updates++;
|
||||
btrfs_get_block_group(cache);
|
||||
}
|
||||
spin_unlock(&trans->transaction->dirty_bgs_lock);
|
||||
|
||||
/*
|
||||
* No longer have used bytes in this block group, queue it for
|
||||
* deletion. We do this after adding the block group to the
|
||||
* dirty list to avoid races between cleaner kthread and space
|
||||
* cache writeout.
|
||||
*/
|
||||
if (!alloc && old_val == 0)
|
||||
btrfs_mark_bg_unused(cache);
|
||||
|
||||
btrfs_put_block_group(cache);
|
||||
total -= num_bytes;
|
||||
bytenr += num_bytes;
|
||||
}
|
||||
|
||||
/* Modified block groups are accounted for in the delayed_refs_rsv. */
|
||||
btrfs_update_delayed_refs_rsv(trans);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
|
@ -3187,70 +3078,6 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
|
|||
atomic_inc(&bg->reservations);
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_add_reserved_bytes - update the block_group and space info counters
|
||||
* @cache: The cache we are manipulating
|
||||
* @ram_bytes: The number of bytes of file content, and will be same to
|
||||
* @num_bytes except for the compress path.
|
||||
* @num_bytes: The number of bytes in question
|
||||
* @delalloc: The blocks are allocated for the delalloc write
|
||||
*
|
||||
* This is called by the allocator when it reserves space. If this is a
|
||||
* reservation and the block group has become read only we cannot make the
|
||||
* reservation and return -EAGAIN, otherwise this function always succeeds.
|
||||
*/
|
||||
int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
|
||||
u64 ram_bytes, u64 num_bytes, int delalloc)
|
||||
{
|
||||
struct btrfs_space_info *space_info = cache->space_info;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&space_info->lock);
|
||||
spin_lock(&cache->lock);
|
||||
if (cache->ro) {
|
||||
ret = -EAGAIN;
|
||||
} else {
|
||||
cache->reserved += num_bytes;
|
||||
space_info->bytes_reserved += num_bytes;
|
||||
btrfs_space_info_update_bytes_may_use(cache->fs_info,
|
||||
space_info, -ram_bytes);
|
||||
if (delalloc)
|
||||
cache->delalloc_bytes += num_bytes;
|
||||
}
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&space_info->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_free_reserved_bytes - update the block_group and space info counters
|
||||
* @cache: The cache we are manipulating
|
||||
* @num_bytes: The number of bytes in question
|
||||
* @delalloc: The blocks are allocated for the delalloc write
|
||||
*
|
||||
* This is called by somebody who is freeing space that was never actually used
|
||||
* on disk. For example if you reserve some space for a new leaf in transaction
|
||||
* A and before transaction A commits you free that leaf, you call this with
|
||||
* reserve set to 0 in order to clear the reservation.
|
||||
*/
|
||||
void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
|
||||
u64 num_bytes, int delalloc)
|
||||
{
|
||||
struct btrfs_space_info *space_info = cache->space_info;
|
||||
|
||||
spin_lock(&space_info->lock);
|
||||
spin_lock(&cache->lock);
|
||||
if (cache->ro)
|
||||
space_info->bytes_readonly += num_bytes;
|
||||
cache->reserved -= num_bytes;
|
||||
space_info->bytes_reserved -= num_bytes;
|
||||
space_info->max_extent_size = 0;
|
||||
|
||||
if (delalloc)
|
||||
cache->delalloc_bytes -= num_bytes;
|
||||
spin_unlock(&cache->lock);
|
||||
spin_unlock(&space_info->lock);
|
||||
}
|
||||
void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_caching_control *next;
|
||||
|
|
Loading…
Reference in New Issue