btrfs: move dump_space_info to space-info.c
We'll need this exported so we can use it in all the various was we need to use it. This is prep work to move reserve_metadata_bytes. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
c2a67a76ec
commit
5da6afeb32
|
@ -50,9 +50,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_delayed_extent_op *extent_op);
|
||||
static int find_next_key(struct btrfs_path *path, int level,
|
||||
struct btrfs_key *key);
|
||||
static void dump_space_info(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *info, u64 bytes,
|
||||
int dump_block_groups);
|
||||
|
||||
static noinline int
|
||||
block_group_cache_done(struct btrfs_block_group_cache *cache)
|
||||
|
@ -4196,7 +4193,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
|
|||
if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
|
||||
btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
|
||||
left, thresh, type);
|
||||
dump_space_info(fs_info, info, 0, 0);
|
||||
btrfs_dump_space_info(fs_info, info, 0, 0);
|
||||
}
|
||||
|
||||
if (left < thresh) {
|
||||
|
@ -5038,8 +5035,8 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
|
|||
orig_bytes, 1);
|
||||
|
||||
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
|
||||
dump_space_info(fs_info, block_rsv->space_info,
|
||||
orig_bytes, 0);
|
||||
btrfs_dump_space_info(fs_info, block_rsv->space_info,
|
||||
orig_bytes, 0);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -7644,60 +7641,6 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#define DUMP_BLOCK_RSV(fs_info, rsv_name) \
|
||||
do { \
|
||||
struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
|
||||
spin_lock(&__rsv->lock); \
|
||||
btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
|
||||
__rsv->size, __rsv->reserved); \
|
||||
spin_unlock(&__rsv->lock); \
|
||||
} while (0)
|
||||
|
||||
static void dump_space_info(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *info, u64 bytes,
|
||||
int dump_block_groups)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
int index = 0;
|
||||
|
||||
spin_lock(&info->lock);
|
||||
btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
|
||||
info->flags,
|
||||
info->total_bytes - btrfs_space_info_used(info, true),
|
||||
info->full ? "" : "not ");
|
||||
btrfs_info(fs_info,
|
||||
"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
|
||||
info->total_bytes, info->bytes_used, info->bytes_pinned,
|
||||
info->bytes_reserved, info->bytes_may_use,
|
||||
info->bytes_readonly);
|
||||
spin_unlock(&info->lock);
|
||||
|
||||
DUMP_BLOCK_RSV(fs_info, global_block_rsv);
|
||||
DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
|
||||
DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
|
||||
DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
|
||||
DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
|
||||
|
||||
if (!dump_block_groups)
|
||||
return;
|
||||
|
||||
down_read(&info->groups_sem);
|
||||
again:
|
||||
list_for_each_entry(cache, &info->block_groups[index], list) {
|
||||
spin_lock(&cache->lock);
|
||||
btrfs_info(fs_info,
|
||||
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
|
||||
cache->key.objectid, cache->key.offset,
|
||||
btrfs_block_group_used(&cache->item), cache->pinned,
|
||||
cache->reserved, cache->ro ? "[readonly]" : "");
|
||||
btrfs_dump_free_space(cache, bytes);
|
||||
spin_unlock(&cache->lock);
|
||||
}
|
||||
if (++index < BTRFS_NR_RAID_TYPES)
|
||||
goto again;
|
||||
up_read(&info->groups_sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
|
||||
* hole that is at least as big as @num_bytes.
|
||||
|
@ -7778,7 +7721,8 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
|
|||
"allocation failed flags %llu, wanted %llu",
|
||||
flags, num_bytes);
|
||||
if (sinfo)
|
||||
dump_space_info(fs_info, sinfo, num_bytes, 1);
|
||||
btrfs_dump_space_info(fs_info, sinfo,
|
||||
num_bytes, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9295,7 +9239,7 @@ static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
|
|||
btrfs_info(cache->fs_info,
|
||||
"sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
|
||||
sinfo_used, num_bytes, min_allocable_bytes);
|
||||
dump_space_info(cache->fs_info, cache->space_info, 0, 0);
|
||||
btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -9776,7 +9720,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
|||
if (WARN_ON(space_info->bytes_pinned > 0 ||
|
||||
space_info->bytes_reserved > 0 ||
|
||||
space_info->bytes_may_use > 0))
|
||||
dump_space_info(info, space_info, 0, 0);
|
||||
btrfs_dump_space_info(info, space_info, 0, 0);
|
||||
list_del(&space_info->list);
|
||||
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
|
||||
struct kobject *kobj;
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "space-info.h"
|
||||
#include "sysfs.h"
|
||||
#include "volumes.h"
|
||||
#include "free-space-cache.h"
|
||||
|
||||
u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
|
||||
bool may_use_included)
|
||||
|
@ -346,3 +347,57 @@ void btrfs_space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
|
|||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
#define DUMP_BLOCK_RSV(fs_info, rsv_name) \
|
||||
do { \
|
||||
struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
|
||||
spin_lock(&__rsv->lock); \
|
||||
btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
|
||||
__rsv->size, __rsv->reserved); \
|
||||
spin_unlock(&__rsv->lock); \
|
||||
} while (0)
|
||||
|
||||
void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *info, u64 bytes,
|
||||
int dump_block_groups)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
int index = 0;
|
||||
|
||||
spin_lock(&info->lock);
|
||||
btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
|
||||
info->flags,
|
||||
info->total_bytes - btrfs_space_info_used(info, true),
|
||||
info->full ? "" : "not ");
|
||||
btrfs_info(fs_info,
|
||||
"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
|
||||
info->total_bytes, info->bytes_used, info->bytes_pinned,
|
||||
info->bytes_reserved, info->bytes_may_use,
|
||||
info->bytes_readonly);
|
||||
spin_unlock(&info->lock);
|
||||
|
||||
DUMP_BLOCK_RSV(fs_info, global_block_rsv);
|
||||
DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
|
||||
DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
|
||||
DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
|
||||
DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
|
||||
|
||||
if (!dump_block_groups)
|
||||
return;
|
||||
|
||||
down_read(&info->groups_sem);
|
||||
again:
|
||||
list_for_each_entry(cache, &info->block_groups[index], list) {
|
||||
spin_lock(&cache->lock);
|
||||
btrfs_info(fs_info,
|
||||
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
|
||||
cache->key.objectid, cache->key.offset,
|
||||
btrfs_block_group_used(&cache->item), cache->pinned,
|
||||
cache->reserved, cache->ro ? "[readonly]" : "");
|
||||
btrfs_dump_free_space(cache, bytes);
|
||||
spin_unlock(&cache->lock);
|
||||
}
|
||||
if (++index < BTRFS_NR_RAID_TYPES)
|
||||
goto again;
|
||||
up_read(&info->groups_sem);
|
||||
}
|
||||
|
|
|
@ -126,5 +126,8 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_space_info *space_info, u64 bytes,
|
||||
enum btrfs_reserve_flush_enum flush,
|
||||
bool system_chunk);
|
||||
void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *info, u64 bytes,
|
||||
int dump_block_groups);
|
||||
|
||||
#endif /* BTRFS_SPACE_INFO_H */
|
||||
|
|
Loading…
Reference in New Issue