mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull more btrfs updates from Chris Mason: "Btrfs round two. These are mostly a continuation of Dave Sterba's collection of cleanups, but Filipe also has some bug fixes and performance improvements" * 'for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (69 commits) btrfs: add dummy callback for readpage_io_failed and drop checks btrfs: drop checks for mandatory extent_io_ops callbacks btrfs: document existence of extent_io ops callbacks btrfs: let writepage_end_io_hook return void btrfs: do proper error handling in btrfs_insert_xattr_item btrfs: handle allocation error in update_dev_stat_item btrfs: remove BUG_ON from __tree_mod_log_insert btrfs: derive maximum output size in the compression implementation btrfs: use predefined limits for calculating maximum number of pages for compression btrfs: export compression buffer limits in a header btrfs: merge nr_pages input and output parameter in compress_pages btrfs: merge length input and output parameter in compress_pages btrfs: constify name of subvolume in creation helpers btrfs: constify buffers used by compression helpers btrfs: constify input buffer of btrfs_csum_data btrfs: constify device path passed to relevant helpers btrfs: make btrfs_inode_resume_unlocked_dio take btrfs_inode btrfs: make btrfs_inode_block_unlocked_dio take btrfs_inode btrfs: Make btrfs_add_nondir take btrfs_inode btrfs: Make btrfs_add_link take btrfs_inode ...
This commit is contained in:
commit
bbe08c0a43
|
@ -237,20 +237,20 @@ static inline u64 btrfs_ino(struct btrfs_inode *inode)
|
|||
return ino;
|
||||
}
|
||||
|
||||
static inline void btrfs_i_size_write(struct inode *inode, u64 size)
|
||||
static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
|
||||
{
|
||||
i_size_write(inode, size);
|
||||
BTRFS_I(inode)->disk_i_size = size;
|
||||
i_size_write(&inode->vfs_inode, size);
|
||||
inode->disk_i_size = size;
|
||||
}
|
||||
|
||||
static inline bool btrfs_is_free_space_inode(struct inode *inode)
|
||||
static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_root *root = inode->root;
|
||||
|
||||
if (root == root->fs_info->tree_root &&
|
||||
btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID)
|
||||
btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
|
||||
return true;
|
||||
if (BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
|
||||
if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
@ -311,34 +311,33 @@ struct btrfs_dio_private {
|
|||
* to grab i_mutex. It is used to avoid the endless truncate due to
|
||||
* nonlocked dio read.
|
||||
*/
|
||||
static inline void btrfs_inode_block_unlocked_dio(struct inode *inode)
|
||||
static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode)
|
||||
{
|
||||
set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags);
|
||||
set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
|
||||
static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode)
|
||||
{
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
|
||||
}
|
||||
|
||||
static inline void btrfs_print_data_csum_error(struct inode *inode,
|
||||
static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
|
||||
u64 logical_start, u32 csum, u32 csum_expected, int mirror_num)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_root *root = inode->root;
|
||||
|
||||
/* Output minus objectid, which is more meaningful */
|
||||
if (root->objectid >= BTRFS_LAST_FREE_OBJECTID)
|
||||
btrfs_warn_rl(root->fs_info,
|
||||
"csum failed root %lld ino %lld off %llu csum 0x%08x expected csum 0x%08x mirror %d",
|
||||
root->objectid, btrfs_ino(BTRFS_I(inode)),
|
||||
root->objectid, btrfs_ino(inode),
|
||||
logical_start, csum, csum_expected, mirror_num);
|
||||
else
|
||||
btrfs_warn_rl(root->fs_info,
|
||||
"csum failed root %llu ino %llu off %llu csum 0x%08x expected csum 0x%08x mirror %d",
|
||||
root->objectid, btrfs_ino(BTRFS_I(inode)),
|
||||
root->objectid, btrfs_ino(inode),
|
||||
logical_start, csum, csum_expected, mirror_num);
|
||||
}
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ static struct bio *compressed_bio_alloc(struct block_device *bdev,
|
|||
return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
|
||||
}
|
||||
|
||||
static int check_compressed_csum(struct inode *inode,
|
||||
static int check_compressed_csum(struct btrfs_inode *inode,
|
||||
struct compressed_bio *cb,
|
||||
u64 disk_start)
|
||||
{
|
||||
|
@ -111,7 +111,7 @@ static int check_compressed_csum(struct inode *inode,
|
|||
u32 csum;
|
||||
u32 *cb_sum = &cb->sums;
|
||||
|
||||
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
|
||||
if (inode->flags & BTRFS_INODE_NODATASUM)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < cb->nr_pages; i++) {
|
||||
|
@ -125,7 +125,7 @@ static int check_compressed_csum(struct inode *inode,
|
|||
|
||||
if (csum != *cb_sum) {
|
||||
btrfs_print_data_csum_error(inode, disk_start, csum,
|
||||
*cb_sum, cb->mirror_num);
|
||||
*cb_sum, cb->mirror_num);
|
||||
ret = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ static void end_compressed_bio_read(struct bio *bio)
|
|||
goto out;
|
||||
|
||||
inode = cb->inode;
|
||||
ret = check_compressed_csum(inode, cb,
|
||||
ret = check_compressed_csum(BTRFS_I(inode), cb,
|
||||
(u64)bio->bi_iter.bi_sector << 9);
|
||||
if (ret)
|
||||
goto csum_failed;
|
||||
|
@ -911,32 +911,28 @@ static void free_workspaces(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* given an address space and start/len, compress the bytes.
|
||||
* Given an address space and start and length, compress the bytes into @pages
|
||||
* that are allocated on demand.
|
||||
*
|
||||
* pages are allocated to hold the compressed result and stored
|
||||
* in 'pages'
|
||||
* @out_pages is an in/out parameter, holds maximum number of pages to allocate
|
||||
* and returns number of actually allocated pages
|
||||
*
|
||||
* out_pages is used to return the number of pages allocated. There
|
||||
* may be pages allocated even if we return an error
|
||||
*
|
||||
* total_in is used to return the number of bytes actually read. It
|
||||
* may be smaller then len if we had to exit early because we
|
||||
* @total_in is used to return the number of bytes actually read. It
|
||||
* may be smaller than the input length if we had to exit early because we
|
||||
* ran out of room in the pages array or because we cross the
|
||||
* max_out threshold.
|
||||
*
|
||||
* total_out is used to return the total number of compressed bytes
|
||||
* @total_out is an in/out parameter, must be set to the input length and will
|
||||
* be also used to return the total number of compressed bytes
|
||||
*
|
||||
* max_out tells us the max number of bytes that we're allowed to
|
||||
* @max_out tells us the max number of bytes that we're allowed to
|
||||
* stuff into pages
|
||||
*/
|
||||
int btrfs_compress_pages(int type, struct address_space *mapping,
|
||||
u64 start, unsigned long len,
|
||||
struct page **pages,
|
||||
unsigned long nr_dest_pages,
|
||||
u64 start, struct page **pages,
|
||||
unsigned long *out_pages,
|
||||
unsigned long *total_in,
|
||||
unsigned long *total_out,
|
||||
unsigned long max_out)
|
||||
unsigned long *total_out)
|
||||
{
|
||||
struct list_head *workspace;
|
||||
int ret;
|
||||
|
@ -944,10 +940,9 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
|
|||
workspace = find_workspace(type);
|
||||
|
||||
ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
|
||||
start, len, pages,
|
||||
nr_dest_pages, out_pages,
|
||||
total_in, total_out,
|
||||
max_out);
|
||||
start, pages,
|
||||
out_pages,
|
||||
total_in, total_out);
|
||||
free_workspace(type, workspace);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1015,7 +1010,7 @@ void btrfs_exit_compress(void)
|
|||
*
|
||||
* total_out is the last byte of the buffer
|
||||
*/
|
||||
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
|
||||
int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
|
||||
unsigned long total_out, u64 disk_start,
|
||||
struct bio *bio)
|
||||
{
|
||||
|
|
|
@ -19,20 +19,32 @@
|
|||
#ifndef __BTRFS_COMPRESSION_
|
||||
#define __BTRFS_COMPRESSION_
|
||||
|
||||
/*
|
||||
* We want to make sure that amount of RAM required to uncompress an extent is
|
||||
* reasonable, so we limit the total size in ram of a compressed extent to
|
||||
* 128k. This is a crucial number because it also controls how easily we can
|
||||
* spread reads across cpus for decompression.
|
||||
*
|
||||
* We also want to make sure the amount of IO required to do a random read is
|
||||
* reasonably small, so we limit the size of a compressed extent to 128k.
|
||||
*/
|
||||
|
||||
/* Maximum length of compressed data stored on disk */
|
||||
#define BTRFS_MAX_COMPRESSED (SZ_128K)
|
||||
/* Maximum size of data before compression */
|
||||
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
|
||||
|
||||
void btrfs_init_compress(void);
|
||||
void btrfs_exit_compress(void);
|
||||
|
||||
int btrfs_compress_pages(int type, struct address_space *mapping,
|
||||
u64 start, unsigned long len,
|
||||
struct page **pages,
|
||||
unsigned long nr_dest_pages,
|
||||
u64 start, struct page **pages,
|
||||
unsigned long *out_pages,
|
||||
unsigned long *total_in,
|
||||
unsigned long *total_out,
|
||||
unsigned long max_out);
|
||||
unsigned long *total_out);
|
||||
int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
|
||||
unsigned long start_byte, size_t srclen, size_t destlen);
|
||||
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
|
||||
int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
|
||||
unsigned long total_out, u64 disk_start,
|
||||
struct bio *bio);
|
||||
|
||||
|
@ -59,13 +71,11 @@ struct btrfs_compress_op {
|
|||
|
||||
int (*compress_pages)(struct list_head *workspace,
|
||||
struct address_space *mapping,
|
||||
u64 start, unsigned long len,
|
||||
u64 start,
|
||||
struct page **pages,
|
||||
unsigned long nr_dest_pages,
|
||||
unsigned long *out_pages,
|
||||
unsigned long *total_in,
|
||||
unsigned long *total_out,
|
||||
unsigned long max_out);
|
||||
unsigned long *total_out);
|
||||
|
||||
int (*decompress_bio)(struct list_head *workspace,
|
||||
struct page **pages_in,
|
||||
|
|
|
@ -453,8 +453,6 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
|
|||
struct rb_node *parent = NULL;
|
||||
struct tree_mod_elem *cur;
|
||||
|
||||
BUG_ON(!tm);
|
||||
|
||||
tm->seq = btrfs_inc_tree_mod_seq(fs_info);
|
||||
|
||||
tm_root = &fs_info->tree_mod_log;
|
||||
|
@ -4159,6 +4157,9 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
|
|||
|
||||
/* try to push all the items before our slot into the next leaf */
|
||||
slot = path->slots[0];
|
||||
space_needed = data_size;
|
||||
if (slot > 0)
|
||||
space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
|
||||
ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -4214,6 +4215,10 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
|
|||
if (wret < 0)
|
||||
return wret;
|
||||
if (wret) {
|
||||
space_needed = data_size;
|
||||
if (slot > 0)
|
||||
space_needed -= btrfs_leaf_free_space(fs_info,
|
||||
l);
|
||||
wret = push_leaf_left(trans, root, path, space_needed,
|
||||
space_needed, 0, (u32)-1);
|
||||
if (wret < 0)
|
||||
|
|
|
@ -2687,7 +2687,7 @@ enum btrfs_flush_state {
|
|||
};
|
||||
|
||||
int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
|
||||
int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes);
|
||||
int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes);
|
||||
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
|
||||
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
|
||||
u64 len);
|
||||
|
@ -2695,16 +2695,16 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_fs_info *fs_info);
|
||||
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
|
||||
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode);
|
||||
void btrfs_orphan_release_metadata(struct inode *inode);
|
||||
struct btrfs_inode *inode);
|
||||
void btrfs_orphan_release_metadata(struct btrfs_inode *inode);
|
||||
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *rsv,
|
||||
int nitems,
|
||||
u64 *qgroup_reserved, bool use_global_rsv);
|
||||
void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *rsv);
|
||||
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
|
||||
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
|
||||
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
|
||||
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes);
|
||||
int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
|
||||
void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
|
||||
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
|
||||
|
@ -2982,7 +2982,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
|
|||
const char *name, int name_len);
|
||||
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, const char *name,
|
||||
int name_len, struct inode *dir,
|
||||
int name_len, struct btrfs_inode *dir,
|
||||
struct btrfs_key *location, u8 type, u64 index);
|
||||
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
|
@ -3081,7 +3081,7 @@ int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
|
|||
u64 file_start, int contig);
|
||||
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
|
||||
struct list_head *list, int search_commit);
|
||||
void btrfs_extent_item_to_extent_map(struct inode *inode,
|
||||
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
|
||||
const struct btrfs_path *path,
|
||||
struct btrfs_file_extent_item *fi,
|
||||
const bool new_inline,
|
||||
|
@ -3100,9 +3100,9 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
|
|||
int delay_iput);
|
||||
void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work);
|
||||
|
||||
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
|
||||
size_t pg_offset, u64 start, u64 len,
|
||||
int create);
|
||||
struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
|
||||
struct page *page, size_t pg_offset, u64 start,
|
||||
u64 len, int create);
|
||||
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
|
||||
u64 *orig_start, u64 *orig_block_len,
|
||||
u64 *ram_bytes);
|
||||
|
@ -3123,13 +3123,13 @@ static inline void btrfs_force_ra(struct address_space *mapping,
|
|||
}
|
||||
|
||||
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
|
||||
int btrfs_set_inode_index(struct inode *dir, u64 *index);
|
||||
int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
|
||||
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_inode *dir, struct btrfs_inode *inode,
|
||||
const char *name, int name_len);
|
||||
int btrfs_add_link(struct btrfs_trans_handle *trans,
|
||||
struct inode *parent_inode, struct inode *inode,
|
||||
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
|
||||
const char *name, int name_len, int add_backref, u64 index);
|
||||
int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
|
@ -3166,15 +3166,16 @@ void btrfs_destroy_cachep(void);
|
|||
long btrfs_ioctl_trans_end(struct file *file);
|
||||
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
|
||||
struct btrfs_root *root, int *was_new);
|
||||
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
|
||||
size_t pg_offset, u64 start, u64 end,
|
||||
int create);
|
||||
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
|
||||
struct page *page, size_t pg_offset,
|
||||
u64 start, u64 end, int create);
|
||||
int btrfs_update_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct inode *inode);
|
||||
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode);
|
||||
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
|
||||
int btrfs_orphan_add(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *inode);
|
||||
int btrfs_orphan_cleanup(struct btrfs_root *root);
|
||||
void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root);
|
||||
|
@ -3215,11 +3216,11 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
|
|||
int btrfs_auto_defrag_init(void);
|
||||
void btrfs_auto_defrag_exit(void);
|
||||
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode);
|
||||
struct btrfs_inode *inode);
|
||||
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
|
||||
void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
|
||||
void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
|
||||
int skip_pinned);
|
||||
extern const struct file_operations btrfs_file_operations;
|
||||
int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
|
||||
|
@ -3233,7 +3234,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *root, struct inode *inode, u64 start,
|
||||
u64 end, int drop_cache);
|
||||
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode, u64 start, u64 end);
|
||||
struct btrfs_inode *inode, u64 start, u64 end);
|
||||
int btrfs_release_file(struct inode *inode, struct file *file);
|
||||
int btrfs_dirty_pages(struct inode *inode, struct page **pages,
|
||||
size_t num_pages, loff_t pos, size_t write_bytes,
|
||||
|
|
|
@ -1790,7 +1790,7 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
|
|||
|
||||
i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
|
||||
i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
|
||||
btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
|
||||
btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
|
||||
inode->i_mode = btrfs_stack_inode_mode(inode_item);
|
||||
set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
|
||||
inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
|
||||
|
|
|
@ -304,8 +304,9 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
|
|||
dev_replace->cursor_left_last_write_of_item;
|
||||
}
|
||||
|
||||
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
|
||||
u64 srcdevid, char *srcdev_name, int read_src)
|
||||
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
||||
const char *tgtdev_name, u64 srcdevid, const char *srcdev_name,
|
||||
int read_src)
|
||||
{
|
||||
struct btrfs_root *root = fs_info->dev_root;
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
|
|
@ -27,8 +27,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
|
|||
void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_ioctl_dev_replace_args *args);
|
||||
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
|
||||
u64 srcdevid, char *srcdev_name, int read_src);
|
||||
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
||||
const char *tgtdev_name, u64 srcdevid, const char *srcdev_name,
|
||||
int read_src);
|
||||
void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_ioctl_dev_replace_args *args);
|
||||
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info,
|
||||
|
|
|
@ -80,7 +80,8 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
|
|||
struct extent_buffer *leaf;
|
||||
u32 data_size;
|
||||
|
||||
BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info));
|
||||
if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info))
|
||||
return -ENOSPC;
|
||||
|
||||
key.objectid = objectid;
|
||||
key.type = BTRFS_XATTR_ITEM_KEY;
|
||||
|
@ -120,7 +121,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
|
||||
*root, const char *name, int name_len,
|
||||
struct inode *dir, struct btrfs_key *location,
|
||||
struct btrfs_inode *dir, struct btrfs_key *location,
|
||||
u8 type, u64 index)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -133,7 +134,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
|
|||
struct btrfs_disk_key disk_key;
|
||||
u32 data_size;
|
||||
|
||||
key.objectid = btrfs_ino(BTRFS_I(dir));
|
||||
key.objectid = btrfs_ino(dir);
|
||||
key.type = BTRFS_DIR_ITEM_KEY;
|
||||
key.offset = btrfs_name_hash(name, name_len);
|
||||
|
||||
|
@ -174,7 +175,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
|
|||
btrfs_release_path(path);
|
||||
|
||||
ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name,
|
||||
name_len, BTRFS_I(dir), &disk_key, type, index);
|
||||
name_len, dir, &disk_key, type, index);
|
||||
out_free:
|
||||
btrfs_free_path(path);
|
||||
if (ret)
|
||||
|
|
|
@ -219,12 +219,12 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
|
|||
* extents on the btree inode are pretty simple, there's one extent
|
||||
* that covers the entire device
|
||||
*/
|
||||
static struct extent_map *btree_get_extent(struct inode *inode,
|
||||
static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
|
||||
struct page *page, size_t pg_offset, u64 start, u64 len,
|
||||
int create)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct extent_map_tree *em_tree = &inode->extent_tree;
|
||||
struct extent_map *em;
|
||||
int ret;
|
||||
|
||||
|
@ -265,7 +265,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
|
|||
return em;
|
||||
}
|
||||
|
||||
u32 btrfs_csum_data(char *data, u32 seed, size_t len)
|
||||
u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
|
||||
{
|
||||
return btrfs_crc32c(seed, data, len);
|
||||
}
|
||||
|
@ -2205,11 +2205,9 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
|
|||
btrfs_destroy_workqueue(fs_info->delalloc_workers);
|
||||
btrfs_destroy_workqueue(fs_info->workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_meta_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_repair_workers);
|
||||
btrfs_destroy_workqueue(fs_info->rmw_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_write_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
|
||||
btrfs_destroy_workqueue(fs_info->submit_workers);
|
||||
|
@ -2219,6 +2217,13 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
|
|||
btrfs_destroy_workqueue(fs_info->flush_workers);
|
||||
btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
|
||||
btrfs_destroy_workqueue(fs_info->extent_workers);
|
||||
/*
|
||||
* Now that all other work queues are destroyed, we can safely destroy
|
||||
* the queues used for metadata I/O, since tasks from those other work
|
||||
* queues can do metadata I/O operations.
|
||||
*/
|
||||
btrfs_destroy_workqueue(fs_info->endio_meta_workers);
|
||||
btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
|
||||
}
|
||||
|
||||
static void free_root_extent_buffers(struct btrfs_root *root)
|
||||
|
@ -3261,7 +3266,6 @@ int open_ctree(struct super_block *sb,
|
|||
|
||||
fail_block_groups:
|
||||
btrfs_put_block_group_cache(fs_info);
|
||||
btrfs_free_block_groups(fs_info);
|
||||
|
||||
fail_tree_roots:
|
||||
free_root_pointers(fs_info, 1);
|
||||
|
@ -3269,6 +3273,7 @@ int open_ctree(struct super_block *sb,
|
|||
|
||||
fail_sb_buffer:
|
||||
btrfs_stop_all_workers(fs_info);
|
||||
btrfs_free_block_groups(fs_info);
|
||||
fail_alloc:
|
||||
fail_iput:
|
||||
btrfs_mapping_tree_free(&fs_info->mapping_tree);
|
||||
|
@ -3448,7 +3453,7 @@ static int write_dev_supers(struct btrfs_device *device,
|
|||
btrfs_set_super_bytenr(sb, bytenr);
|
||||
|
||||
crc = ~(u32)0;
|
||||
crc = btrfs_csum_data((char *)sb +
|
||||
crc = btrfs_csum_data((const char *)sb +
|
||||
BTRFS_CSUM_SIZE, crc,
|
||||
BTRFS_SUPER_INFO_SIZE -
|
||||
BTRFS_CSUM_SIZE);
|
||||
|
@ -3977,8 +3982,6 @@ void close_ctree(struct btrfs_fs_info *fs_info)
|
|||
|
||||
btrfs_put_block_group_cache(fs_info);
|
||||
|
||||
btrfs_free_block_groups(fs_info);
|
||||
|
||||
/*
|
||||
* we must make sure there is not any read request to
|
||||
* submit after we stopping all workers.
|
||||
|
@ -3986,6 +3989,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
|
|||
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
|
||||
btrfs_stop_all_workers(fs_info);
|
||||
|
||||
btrfs_free_block_groups(fs_info);
|
||||
|
||||
clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
|
||||
free_root_pointers(fs_info, 1);
|
||||
|
||||
|
@ -4653,9 +4658,12 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
|
|||
}
|
||||
|
||||
static const struct extent_io_ops btree_extent_io_ops = {
|
||||
.readpage_end_io_hook = btree_readpage_end_io_hook,
|
||||
.readpage_io_failed_hook = btree_io_failed_hook,
|
||||
/* mandatory callbacks */
|
||||
.submit_bio_hook = btree_submit_bio_hook,
|
||||
.readpage_end_io_hook = btree_readpage_end_io_hook,
|
||||
/* note we're sharing with inode.c for the merge bio hook */
|
||||
.merge_bio_hook = btrfs_merge_bio_hook,
|
||||
.readpage_io_failed_hook = btree_io_failed_hook,
|
||||
|
||||
/* optional callbacks */
|
||||
};
|
||||
|
|
|
@ -116,7 +116,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
|
|||
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
|
||||
int atomic);
|
||||
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
|
||||
u32 btrfs_csum_data(char *data, u32 seed, size_t len);
|
||||
u32 btrfs_csum_data(const char *data, u32 seed, size_t len);
|
||||
void btrfs_csum_final(u32 crc, u8 *result);
|
||||
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
||||
enum btrfs_wq_endio_type metadata);
|
||||
|
|
|
@ -4135,10 +4135,10 @@ static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
|
|||
(may_use_included ? s_info->bytes_may_use : 0);
|
||||
}
|
||||
|
||||
int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
|
||||
int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
|
||||
{
|
||||
struct btrfs_space_info *data_sinfo;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
u64 used;
|
||||
int ret = 0;
|
||||
|
@ -4281,7 +4281,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
|
|||
round_down(start, fs_info->sectorsize);
|
||||
start = round_down(start, fs_info->sectorsize);
|
||||
|
||||
ret = btrfs_alloc_data_chunk_ondemand(inode, len);
|
||||
ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -5742,10 +5742,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
|
|||
|
||||
/* Can only return 0 or -ENOSPC */
|
||||
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode)
|
||||
struct btrfs_inode *inode)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_root *root = inode->root;
|
||||
/*
|
||||
* We always use trans->block_rsv here as we will have reserved space
|
||||
* for our orphan when starting the transaction, using get_block_rsv()
|
||||
|
@ -5762,19 +5762,19 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||
|
||||
trace_btrfs_space_reservation(fs_info, "orphan",
|
||||
btrfs_ino(BTRFS_I(inode)), num_bytes, 1);
|
||||
trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
|
||||
num_bytes, 1);
|
||||
return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
|
||||
}
|
||||
|
||||
void btrfs_orphan_release_metadata(struct inode *inode)
|
||||
void btrfs_orphan_release_metadata(struct btrfs_inode *inode)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_root *root = inode->root;
|
||||
u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||
|
||||
trace_btrfs_space_reservation(fs_info, "orphan",
|
||||
btrfs_ino(BTRFS_I(inode)), num_bytes, 0);
|
||||
trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
|
||||
num_bytes, 0);
|
||||
btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
|
||||
}
|
||||
|
||||
|
@ -5846,7 +5846,8 @@ void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
|
|||
* reserved extents that need to be freed. This must be called with
|
||||
* BTRFS_I(inode)->lock held.
|
||||
*/
|
||||
static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
|
||||
static unsigned drop_outstanding_extent(struct btrfs_inode *inode,
|
||||
u64 num_bytes)
|
||||
{
|
||||
unsigned drop_inode_space = 0;
|
||||
unsigned dropped_extents = 0;
|
||||
|
@ -5854,25 +5855,23 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
|
|||
|
||||
num_extents = count_max_extents(num_bytes);
|
||||
ASSERT(num_extents);
|
||||
ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
|
||||
BTRFS_I(inode)->outstanding_extents -= num_extents;
|
||||
ASSERT(inode->outstanding_extents >= num_extents);
|
||||
inode->outstanding_extents -= num_extents;
|
||||
|
||||
if (BTRFS_I(inode)->outstanding_extents == 0 &&
|
||||
if (inode->outstanding_extents == 0 &&
|
||||
test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
|
||||
&BTRFS_I(inode)->runtime_flags))
|
||||
&inode->runtime_flags))
|
||||
drop_inode_space = 1;
|
||||
|
||||
/*
|
||||
* If we have more or the same amount of outstanding extents than we have
|
||||
* reserved then we need to leave the reserved extents count alone.
|
||||
*/
|
||||
if (BTRFS_I(inode)->outstanding_extents >=
|
||||
BTRFS_I(inode)->reserved_extents)
|
||||
if (inode->outstanding_extents >= inode->reserved_extents)
|
||||
return drop_inode_space;
|
||||
|
||||
dropped_extents = BTRFS_I(inode)->reserved_extents -
|
||||
BTRFS_I(inode)->outstanding_extents;
|
||||
BTRFS_I(inode)->reserved_extents -= dropped_extents;
|
||||
dropped_extents = inode->reserved_extents - inode->outstanding_extents;
|
||||
inode->reserved_extents -= dropped_extents;
|
||||
return dropped_extents + drop_inode_space;
|
||||
}
|
||||
|
||||
|
@ -5894,24 +5893,21 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
|
|||
*
|
||||
* This must be called with BTRFS_I(inode)->lock held.
|
||||
*/
|
||||
static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
|
||||
static u64 calc_csum_metadata_size(struct btrfs_inode *inode, u64 num_bytes,
|
||||
int reserve)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
u64 old_csums, num_csums;
|
||||
|
||||
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
|
||||
BTRFS_I(inode)->csum_bytes == 0)
|
||||
if (inode->flags & BTRFS_INODE_NODATASUM && inode->csum_bytes == 0)
|
||||
return 0;
|
||||
|
||||
old_csums = btrfs_csum_bytes_to_leaves(fs_info,
|
||||
BTRFS_I(inode)->csum_bytes);
|
||||
old_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
|
||||
if (reserve)
|
||||
BTRFS_I(inode)->csum_bytes += num_bytes;
|
||||
inode->csum_bytes += num_bytes;
|
||||
else
|
||||
BTRFS_I(inode)->csum_bytes -= num_bytes;
|
||||
num_csums = btrfs_csum_bytes_to_leaves(fs_info,
|
||||
BTRFS_I(inode)->csum_bytes);
|
||||
inode->csum_bytes -= num_bytes;
|
||||
num_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
|
||||
|
||||
/* No change, no need to reserve more */
|
||||
if (old_csums == num_csums)
|
||||
|
@ -5924,10 +5920,10 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
|
|||
return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums);
|
||||
}
|
||||
|
||||
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
||||
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv;
|
||||
u64 to_reserve = 0;
|
||||
u64 csum_bytes;
|
||||
|
@ -5959,25 +5955,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
schedule_timeout(1);
|
||||
|
||||
if (delalloc_lock)
|
||||
mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
|
||||
mutex_lock(&inode->delalloc_mutex);
|
||||
|
||||
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
spin_lock(&inode->lock);
|
||||
nr_extents = count_max_extents(num_bytes);
|
||||
BTRFS_I(inode)->outstanding_extents += nr_extents;
|
||||
inode->outstanding_extents += nr_extents;
|
||||
|
||||
nr_extents = 0;
|
||||
if (BTRFS_I(inode)->outstanding_extents >
|
||||
BTRFS_I(inode)->reserved_extents)
|
||||
nr_extents += BTRFS_I(inode)->outstanding_extents -
|
||||
BTRFS_I(inode)->reserved_extents;
|
||||
if (inode->outstanding_extents > inode->reserved_extents)
|
||||
nr_extents += inode->outstanding_extents -
|
||||
inode->reserved_extents;
|
||||
|
||||
/* We always want to reserve a slot for updating the inode. */
|
||||
to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1);
|
||||
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
|
||||
csum_bytes = BTRFS_I(inode)->csum_bytes;
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
csum_bytes = inode->csum_bytes;
|
||||
spin_unlock(&inode->lock);
|
||||
|
||||
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
|
||||
ret = btrfs_qgroup_reserve_meta(root,
|
||||
|
@ -5993,38 +5988,38 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
goto out_fail;
|
||||
}
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
spin_lock(&inode->lock);
|
||||
if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
|
||||
&BTRFS_I(inode)->runtime_flags)) {
|
||||
&inode->runtime_flags)) {
|
||||
to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||
release_extra = true;
|
||||
}
|
||||
BTRFS_I(inode)->reserved_extents += nr_extents;
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
inode->reserved_extents += nr_extents;
|
||||
spin_unlock(&inode->lock);
|
||||
|
||||
if (delalloc_lock)
|
||||
mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
|
||||
mutex_unlock(&inode->delalloc_mutex);
|
||||
|
||||
if (to_reserve)
|
||||
trace_btrfs_space_reservation(fs_info, "delalloc",
|
||||
btrfs_ino(BTRFS_I(inode)), to_reserve, 1);
|
||||
btrfs_ino(inode), to_reserve, 1);
|
||||
if (release_extra)
|
||||
btrfs_block_rsv_release(fs_info, block_rsv,
|
||||
btrfs_calc_trans_metadata_size(fs_info, 1));
|
||||
return 0;
|
||||
|
||||
out_fail:
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
spin_lock(&inode->lock);
|
||||
dropped = drop_outstanding_extent(inode, num_bytes);
|
||||
/*
|
||||
* If the inodes csum_bytes is the same as the original
|
||||
* csum_bytes then we know we haven't raced with any free()ers
|
||||
* so we can just reduce our inodes csum bytes and carry on.
|
||||
*/
|
||||
if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
|
||||
if (inode->csum_bytes == csum_bytes) {
|
||||
calc_csum_metadata_size(inode, num_bytes, 0);
|
||||
} else {
|
||||
u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
|
||||
u64 orig_csum_bytes = inode->csum_bytes;
|
||||
u64 bytes;
|
||||
|
||||
/*
|
||||
|
@ -6035,8 +6030,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
* number of bytes that were freed while we were trying our
|
||||
* reservation.
|
||||
*/
|
||||
bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
|
||||
BTRFS_I(inode)->csum_bytes = csum_bytes;
|
||||
bytes = csum_bytes - inode->csum_bytes;
|
||||
inode->csum_bytes = csum_bytes;
|
||||
to_free = calc_csum_metadata_size(inode, bytes, 0);
|
||||
|
||||
|
||||
|
@ -6045,7 +6040,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
* been making this reservation and our ->csum_bytes were not
|
||||
* artificially inflated.
|
||||
*/
|
||||
BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
|
||||
inode->csum_bytes = csum_bytes - num_bytes;
|
||||
bytes = csum_bytes - orig_csum_bytes;
|
||||
bytes = calc_csum_metadata_size(inode, bytes, 0);
|
||||
|
||||
|
@ -6057,23 +6052,23 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
* need to do anything, the other free-ers did the correct
|
||||
* thing.
|
||||
*/
|
||||
BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
|
||||
inode->csum_bytes = orig_csum_bytes - num_bytes;
|
||||
if (bytes > to_free)
|
||||
to_free = bytes - to_free;
|
||||
else
|
||||
to_free = 0;
|
||||
}
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
spin_unlock(&inode->lock);
|
||||
if (dropped)
|
||||
to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
|
||||
|
||||
if (to_free) {
|
||||
btrfs_block_rsv_release(fs_info, block_rsv, to_free);
|
||||
trace_btrfs_space_reservation(fs_info, "delalloc",
|
||||
btrfs_ino(BTRFS_I(inode)), to_free, 0);
|
||||
btrfs_ino(inode), to_free, 0);
|
||||
}
|
||||
if (delalloc_lock)
|
||||
mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
|
||||
mutex_unlock(&inode->delalloc_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -6086,27 +6081,27 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
* once we complete IO for a given set of bytes to release their metadata
|
||||
* reservations.
|
||||
*/
|
||||
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
|
||||
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
u64 to_free = 0;
|
||||
unsigned dropped;
|
||||
|
||||
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
spin_lock(&inode->lock);
|
||||
dropped = drop_outstanding_extent(inode, num_bytes);
|
||||
|
||||
if (num_bytes)
|
||||
to_free = calc_csum_metadata_size(inode, num_bytes, 0);
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
spin_unlock(&inode->lock);
|
||||
if (dropped > 0)
|
||||
to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
|
||||
|
||||
if (btrfs_is_testing(fs_info))
|
||||
return;
|
||||
|
||||
trace_btrfs_space_reservation(fs_info, "delalloc",
|
||||
btrfs_ino(BTRFS_I(inode)), to_free, 0);
|
||||
trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode),
|
||||
to_free, 0);
|
||||
|
||||
btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free);
|
||||
}
|
||||
|
@ -6141,7 +6136,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
|
|||
ret = btrfs_check_data_free_space(inode, start, len);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = btrfs_delalloc_reserve_metadata(inode, len);
|
||||
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
|
||||
if (ret < 0)
|
||||
btrfs_free_reserved_data_space(inode, start, len);
|
||||
return ret;
|
||||
|
@ -6164,7 +6159,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
|
|||
*/
|
||||
void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
|
||||
{
|
||||
btrfs_delalloc_release_metadata(inode, len);
|
||||
btrfs_delalloc_release_metadata(BTRFS_I(inode), len);
|
||||
btrfs_free_reserved_data_space(inode, start, len);
|
||||
}
|
||||
|
||||
|
@ -9740,6 +9735,11 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called only after stopping all workers, since we could have block
|
||||
* group caching kthreads running, and therefore they could race with us if we
|
||||
* freed the block groups before stopping them.
|
||||
*/
|
||||
int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
||||
{
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
|
@ -9779,9 +9779,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
|||
list_del(&block_group->list);
|
||||
up_write(&block_group->space_info->groups_sem);
|
||||
|
||||
if (block_group->cached == BTRFS_CACHE_STARTED)
|
||||
wait_block_group_cache_done(block_group);
|
||||
|
||||
/*
|
||||
* We haven't cached this block group, which means we could
|
||||
* possibly have excluded extents on this block group.
|
||||
|
@ -9791,6 +9788,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
|||
free_excluded_extents(info, block_group);
|
||||
|
||||
btrfs_remove_free_space_cache(block_group);
|
||||
ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
|
||||
ASSERT(list_empty(&block_group->dirty_list));
|
||||
ASSERT(list_empty(&block_group->io_list));
|
||||
ASSERT(list_empty(&block_group->bg_list));
|
||||
|
@ -10342,7 +10340,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
|
|||
mutex_unlock(&trans->transaction->cache_write_mutex);
|
||||
|
||||
if (!IS_ERR(inode)) {
|
||||
ret = btrfs_orphan_add(trans, inode);
|
||||
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
|
||||
if (ret) {
|
||||
btrfs_add_delayed_iput(inode);
|
||||
goto out;
|
||||
|
|
|
@ -428,7 +428,8 @@ static void clear_state_cb(struct extent_io_tree *tree,
|
|||
struct extent_state *state, unsigned *bits)
|
||||
{
|
||||
if (tree->ops && tree->ops->clear_bit_hook)
|
||||
tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
|
||||
tree->ops->clear_bit_hook(BTRFS_I(tree->mapping->host),
|
||||
state, bits);
|
||||
}
|
||||
|
||||
static void set_state_bits(struct extent_io_tree *tree,
|
||||
|
@ -1959,11 +1960,11 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
|
|||
SetPageUptodate(page);
|
||||
}
|
||||
|
||||
int free_io_failure(struct inode *inode, struct io_failure_record *rec)
|
||||
int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec)
|
||||
{
|
||||
int ret;
|
||||
int err = 0;
|
||||
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
||||
struct extent_io_tree *failure_tree = &inode->io_failure_tree;
|
||||
|
||||
set_state_failrec(failure_tree, rec->start, NULL);
|
||||
ret = clear_extent_bits(failure_tree, rec->start,
|
||||
|
@ -1972,7 +1973,7 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec)
|
|||
if (ret)
|
||||
err = ret;
|
||||
|
||||
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
|
||||
ret = clear_extent_bits(&inode->io_tree, rec->start,
|
||||
rec->start + rec->len - 1,
|
||||
EXTENT_DAMAGED);
|
||||
if (ret && !err)
|
||||
|
@ -1992,10 +1993,11 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec)
|
|||
* currently, there can be no more than two copies of every data bit. thus,
|
||||
* exactly one rewrite is required.
|
||||
*/
|
||||
int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
|
||||
struct page *page, unsigned int pg_offset, int mirror_num)
|
||||
int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
|
||||
u64 logical, struct page *page,
|
||||
unsigned int pg_offset, int mirror_num)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct bio *bio;
|
||||
struct btrfs_device *dev;
|
||||
u64 map_length = 0;
|
||||
|
@ -2054,7 +2056,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
|
|||
|
||||
btrfs_info_rl_in_rcu(fs_info,
|
||||
"read error corrected: ino %llu off %llu (dev %s sector %llu)",
|
||||
btrfs_ino(BTRFS_I(inode)), start,
|
||||
btrfs_ino(inode), start,
|
||||
rcu_str_deref(dev->name), sector);
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
bio_put(bio);
|
||||
|
@ -2074,7 +2076,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
|
|||
for (i = 0; i < num_pages; i++) {
|
||||
struct page *p = eb->pages[i];
|
||||
|
||||
ret = repair_io_failure(fs_info->btree_inode, start,
|
||||
ret = repair_io_failure(BTRFS_I(fs_info->btree_inode), start,
|
||||
PAGE_SIZE, start, p,
|
||||
start - page_offset(p), mirror_num);
|
||||
if (ret)
|
||||
|
@ -2089,23 +2091,23 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
|
|||
* each time an IO finishes, we do a fast check in the IO failure tree
|
||||
* to see if we need to process or clean up an io_failure_record
|
||||
*/
|
||||
int clean_io_failure(struct inode *inode, u64 start, struct page *page,
|
||||
int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page,
|
||||
unsigned int pg_offset)
|
||||
{
|
||||
u64 private;
|
||||
struct io_failure_record *failrec;
|
||||
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
struct extent_state *state;
|
||||
int num_copies;
|
||||
int ret;
|
||||
|
||||
private = 0;
|
||||
ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
|
||||
ret = count_range_bits(&inode->io_failure_tree, &private,
|
||||
(u64)-1, 1, EXTENT_DIRTY, 0);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ret = get_state_failrec(&BTRFS_I(inode)->io_failure_tree, start,
|
||||
ret = get_state_failrec(&inode->io_failure_tree, start,
|
||||
&failrec);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
@ -2122,11 +2124,11 @@ int clean_io_failure(struct inode *inode, u64 start, struct page *page,
|
|||
if (fs_info->sb->s_flags & MS_RDONLY)
|
||||
goto out;
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->io_tree.lock);
|
||||
state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
|
||||
spin_lock(&inode->io_tree.lock);
|
||||
state = find_first_extent_bit_state(&inode->io_tree,
|
||||
failrec->start,
|
||||
EXTENT_LOCKED);
|
||||
spin_unlock(&BTRFS_I(inode)->io_tree.lock);
|
||||
spin_unlock(&inode->io_tree.lock);
|
||||
|
||||
if (state && state->start <= failrec->start &&
|
||||
state->end >= failrec->start + failrec->len - 1) {
|
||||
|
@ -2151,9 +2153,9 @@ int clean_io_failure(struct inode *inode, u64 start, struct page *page,
|
|||
* - under ordered extent
|
||||
* - the inode is freeing
|
||||
*/
|
||||
void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
|
||||
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
|
||||
{
|
||||
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
|
||||
struct extent_io_tree *failure_tree = &inode->io_failure_tree;
|
||||
struct io_failure_record *failrec;
|
||||
struct extent_state *state, *next;
|
||||
|
||||
|
@ -2393,7 +2395,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
|||
|
||||
ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
|
||||
if (!ret) {
|
||||
free_io_failure(inode, failrec);
|
||||
free_io_failure(BTRFS_I(inode), failrec);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -2406,7 +2408,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
|||
(int)phy_offset, failed_bio->bi_end_io,
|
||||
NULL);
|
||||
if (!bio) {
|
||||
free_io_failure(inode, failrec);
|
||||
free_io_failure(BTRFS_I(inode), failrec);
|
||||
return -EIO;
|
||||
}
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
|
||||
|
@ -2418,7 +2420,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
|
|||
ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
|
||||
failrec->bio_flags, 0);
|
||||
if (ret) {
|
||||
free_io_failure(inode, failrec);
|
||||
free_io_failure(BTRFS_I(inode), failrec);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
|
@ -2435,12 +2437,9 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
|
|||
|
||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||
|
||||
if (tree->ops && tree->ops->writepage_end_io_hook) {
|
||||
ret = tree->ops->writepage_end_io_hook(page, start,
|
||||
end, NULL, uptodate);
|
||||
if (ret)
|
||||
uptodate = 0;
|
||||
}
|
||||
if (tree->ops && tree->ops->writepage_end_io_hook)
|
||||
tree->ops->writepage_end_io_hook(page, start, end, NULL,
|
||||
uptodate);
|
||||
|
||||
if (!uptodate) {
|
||||
ClearPageUptodate(page);
|
||||
|
@ -2568,21 +2567,21 @@ static void end_bio_extent_readpage(struct bio *bio)
|
|||
len = bvec->bv_len;
|
||||
|
||||
mirror = io_bio->mirror_num;
|
||||
if (likely(uptodate && tree->ops &&
|
||||
tree->ops->readpage_end_io_hook)) {
|
||||
if (likely(uptodate && tree->ops)) {
|
||||
ret = tree->ops->readpage_end_io_hook(io_bio, offset,
|
||||
page, start, end,
|
||||
mirror);
|
||||
if (ret)
|
||||
uptodate = 0;
|
||||
else
|
||||
clean_io_failure(inode, start, page, 0);
|
||||
clean_io_failure(BTRFS_I(inode), start,
|
||||
page, 0);
|
||||
}
|
||||
|
||||
if (likely(uptodate))
|
||||
goto readpage_ok;
|
||||
|
||||
if (tree->ops && tree->ops->readpage_io_failed_hook) {
|
||||
if (tree->ops) {
|
||||
ret = tree->ops->readpage_io_failed_hook(page, mirror);
|
||||
if (!ret && !bio->bi_error)
|
||||
uptodate = 1;
|
||||
|
@ -2731,7 +2730,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
|
|||
bio->bi_private = NULL;
|
||||
bio_get(bio);
|
||||
|
||||
if (tree->ops && tree->ops->submit_bio_hook)
|
||||
if (tree->ops)
|
||||
ret = tree->ops->submit_bio_hook(page->mapping->host, bio,
|
||||
mirror_num, bio_flags, start);
|
||||
else
|
||||
|
@ -2746,7 +2745,7 @@ static int merge_bio(struct extent_io_tree *tree, struct page *page,
|
|||
unsigned long bio_flags)
|
||||
{
|
||||
int ret = 0;
|
||||
if (tree->ops && tree->ops->merge_bio_hook)
|
||||
if (tree->ops)
|
||||
ret = tree->ops->merge_bio_hook(page, offset, size, bio,
|
||||
bio_flags);
|
||||
return ret;
|
||||
|
@ -2857,7 +2856,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
|
|||
*em_cached = NULL;
|
||||
}
|
||||
|
||||
em = get_extent(inode, page, pg_offset, start, len, 0);
|
||||
em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0);
|
||||
if (em_cached && !IS_ERR_OR_NULL(em)) {
|
||||
BUG_ON(*em_cached);
|
||||
atomic_inc(&em->refs);
|
||||
|
@ -3101,7 +3100,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
|
|||
inode = pages[0]->mapping->host;
|
||||
while (1) {
|
||||
lock_extent(tree, start, end);
|
||||
ordered = btrfs_lookup_ordered_range(inode, start,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
|
||||
end - start + 1);
|
||||
if (!ordered)
|
||||
break;
|
||||
|
@ -3173,7 +3172,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||
|
||||
while (1) {
|
||||
lock_extent(tree, start, end);
|
||||
ordered = btrfs_lookup_ordered_range(inode, start,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
|
||||
PAGE_SIZE);
|
||||
if (!ordered)
|
||||
break;
|
||||
|
@ -3370,7 +3369,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
|
|||
page_end, NULL, 1);
|
||||
break;
|
||||
}
|
||||
em = epd->get_extent(inode, page, pg_offset, cur,
|
||||
em = epd->get_extent(BTRFS_I(inode), page, pg_offset, cur,
|
||||
end - cur + 1, 1);
|
||||
if (IS_ERR_OR_NULL(em)) {
|
||||
SetPageError(page);
|
||||
|
@ -4335,7 +4334,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
|
|||
if (len == 0)
|
||||
break;
|
||||
len = ALIGN(len, sectorsize);
|
||||
em = get_extent(inode, NULL, 0, offset, len, 0);
|
||||
em = get_extent(BTRFS_I(inode), NULL, 0, offset, len, 0);
|
||||
if (IS_ERR_OR_NULL(em))
|
||||
return em;
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@ extern void le_bitmap_clear(u8 *map, unsigned int start, int len);
|
|||
|
||||
struct extent_state;
|
||||
struct btrfs_root;
|
||||
struct btrfs_inode;
|
||||
struct btrfs_io_bio;
|
||||
struct io_failure_record;
|
||||
|
||||
|
@ -91,24 +92,34 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio,
|
|||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset);
|
||||
struct extent_io_ops {
|
||||
int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
|
||||
u64 start, u64 end, int *page_started,
|
||||
unsigned long *nr_written);
|
||||
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
|
||||
/*
|
||||
* The following callbacks must be allways defined, the function
|
||||
* pointer will be called unconditionally.
|
||||
*/
|
||||
extent_submit_bio_hook_t *submit_bio_hook;
|
||||
int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
|
||||
struct page *page, u64 start, u64 end,
|
||||
int mirror);
|
||||
int (*merge_bio_hook)(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
|
||||
int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
|
||||
struct page *page, u64 start, u64 end,
|
||||
int mirror);
|
||||
int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
|
||||
|
||||
/*
|
||||
* Optional hooks, called if the pointer is not NULL
|
||||
*/
|
||||
int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
|
||||
u64 start, u64 end, int *page_started,
|
||||
unsigned long *nr_written);
|
||||
|
||||
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
|
||||
void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
|
||||
struct extent_state *state, int uptodate);
|
||||
void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
|
||||
unsigned *bits);
|
||||
void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
|
||||
unsigned *bits);
|
||||
void (*clear_bit_hook)(struct btrfs_inode *inode,
|
||||
struct extent_state *state,
|
||||
unsigned *bits);
|
||||
void (*merge_extent_hook)(struct inode *inode,
|
||||
struct extent_state *new,
|
||||
struct extent_state *other);
|
||||
|
@ -209,7 +220,7 @@ static inline int extent_compress_type(unsigned long bio_flags)
|
|||
|
||||
struct extent_map_tree;
|
||||
|
||||
typedef struct extent_map *(get_extent_t)(struct inode *inode,
|
||||
typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
|
||||
struct page *page,
|
||||
size_t pg_offset,
|
||||
u64 start, u64 len,
|
||||
|
@ -451,12 +462,13 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
|
|||
struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
|
||||
|
||||
struct btrfs_fs_info;
|
||||
struct btrfs_inode;
|
||||
|
||||
int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
|
||||
struct page *page, unsigned int pg_offset,
|
||||
int mirror_num);
|
||||
int clean_io_failure(struct inode *inode, u64 start, struct page *page,
|
||||
unsigned int pg_offset);
|
||||
int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
|
||||
u64 logical, struct page *page,
|
||||
unsigned int pg_offset, int mirror_num);
|
||||
int clean_io_failure(struct btrfs_inode *inode, u64 start,
|
||||
struct page *page, unsigned int pg_offset);
|
||||
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
|
||||
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
|
||||
struct extent_buffer *eb, int mirror_num);
|
||||
|
@ -480,7 +492,9 @@ struct io_failure_record {
|
|||
int in_validation;
|
||||
};
|
||||
|
||||
void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end);
|
||||
|
||||
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
|
||||
u64 end);
|
||||
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
|
||||
struct io_failure_record **failrec_ret);
|
||||
int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
|
||||
|
@ -489,7 +503,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
|
|||
struct io_failure_record *failrec,
|
||||
struct page *page, int pg_offset, int icsum,
|
||||
bio_end_io_t *endio_func, void *data);
|
||||
int free_io_failure(struct inode *inode, struct io_failure_record *rec);
|
||||
int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec);
|
||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
noinline u64 find_lock_delalloc_range(struct inode *inode,
|
||||
struct extent_io_tree *tree,
|
||||
|
|
|
@ -214,7 +214,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
|
|||
* read from the commit root and sidestep a nasty deadlock
|
||||
* between reading the free space cache and updating the csum tree.
|
||||
*/
|
||||
if (btrfs_is_free_space_inode(inode)) {
|
||||
if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
|
||||
path->search_commit_root = 1;
|
||||
path->skip_locking = 1;
|
||||
}
|
||||
|
@ -643,7 +643,33 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
|
|||
|
||||
/* delete the entire item, it is inside our range */
|
||||
if (key.offset >= bytenr && csum_end <= end_byte) {
|
||||
ret = btrfs_del_item(trans, root, path);
|
||||
int del_nr = 1;
|
||||
|
||||
/*
|
||||
* Check how many csum items preceding this one in this
|
||||
* leaf correspond to our range and then delete them all
|
||||
* at once.
|
||||
*/
|
||||
if (key.offset > bytenr && path->slots[0] > 0) {
|
||||
int slot = path->slots[0] - 1;
|
||||
|
||||
while (slot >= 0) {
|
||||
struct btrfs_key pk;
|
||||
|
||||
btrfs_item_key_to_cpu(leaf, &pk, slot);
|
||||
if (pk.offset < bytenr ||
|
||||
pk.type != BTRFS_EXTENT_CSUM_KEY ||
|
||||
pk.objectid !=
|
||||
BTRFS_EXTENT_CSUM_OBJECTID)
|
||||
break;
|
||||
path->slots[0] = slot;
|
||||
del_nr++;
|
||||
key.offset = pk.offset;
|
||||
slot--;
|
||||
}
|
||||
}
|
||||
ret = btrfs_del_items(trans, root, path,
|
||||
path->slots[0], del_nr);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (key.offset == bytenr)
|
||||
|
@ -904,14 +930,14 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
|
||||
void btrfs_extent_item_to_extent_map(struct inode *inode,
|
||||
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
|
||||
const struct btrfs_path *path,
|
||||
struct btrfs_file_extent_item *fi,
|
||||
const bool new_inline,
|
||||
struct extent_map *em)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct extent_buffer *leaf = path->nodes[0];
|
||||
const int slot = path->slots[0];
|
||||
struct btrfs_key key;
|
||||
|
@ -976,8 +1002,8 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
|
|||
}
|
||||
} else {
|
||||
btrfs_err(fs_info,
|
||||
"unknown file extent item type %d, inode %llu, offset %llu, root %llu",
|
||||
type, btrfs_ino(BTRFS_I(inode)), extent_start,
|
||||
"unknown file extent item type %d, inode %llu, offset %llu, "
|
||||
"root %llu", type, btrfs_ino(inode), extent_start,
|
||||
root->root_key.objectid);
|
||||
}
|
||||
}
|
||||
|
|
139
fs/btrfs/file.c
139
fs/btrfs/file.c
|
@ -92,10 +92,10 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
|
|||
* If an existing record is found the defrag item you
|
||||
* pass in is freed
|
||||
*/
|
||||
static int __btrfs_add_inode_defrag(struct inode *inode,
|
||||
static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
|
||||
struct inode_defrag *defrag)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct inode_defrag *entry;
|
||||
struct rb_node **p;
|
||||
struct rb_node *parent = NULL;
|
||||
|
@ -123,7 +123,7 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
|
|||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
|
||||
set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
|
||||
rb_link_node(&defrag->rb_node, parent, p);
|
||||
rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
|
||||
return 0;
|
||||
|
@ -145,10 +145,10 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
|
|||
* enabled
|
||||
*/
|
||||
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode)
|
||||
struct btrfs_inode *inode)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct inode_defrag *defrag;
|
||||
u64 transid;
|
||||
int ret;
|
||||
|
@ -156,24 +156,24 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
|||
if (!__need_auto_defrag(fs_info))
|
||||
return 0;
|
||||
|
||||
if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
|
||||
if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
|
||||
return 0;
|
||||
|
||||
if (trans)
|
||||
transid = trans->transid;
|
||||
else
|
||||
transid = BTRFS_I(inode)->root->last_trans;
|
||||
transid = inode->root->last_trans;
|
||||
|
||||
defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
|
||||
if (!defrag)
|
||||
return -ENOMEM;
|
||||
|
||||
defrag->ino = btrfs_ino(BTRFS_I(inode));
|
||||
defrag->ino = btrfs_ino(inode);
|
||||
defrag->transid = transid;
|
||||
defrag->root = root->root_key.objectid;
|
||||
|
||||
spin_lock(&fs_info->defrag_inodes_lock);
|
||||
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
|
||||
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
|
||||
/*
|
||||
* If we set IN_DEFRAG flag and evict the inode from memory,
|
||||
* and then re-read this inode, this new inode doesn't have
|
||||
|
@ -194,10 +194,10 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
|
|||
* the same inode in the tree, we will merge them together (by
|
||||
* __btrfs_add_inode_defrag()) and free the one that we want to requeue.
|
||||
*/
|
||||
static void btrfs_requeue_inode_defrag(struct inode *inode,
|
||||
static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
|
||||
struct inode_defrag *defrag)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
int ret;
|
||||
|
||||
if (!__need_auto_defrag(fs_info))
|
||||
|
@ -334,7 +334,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
|
|||
*/
|
||||
if (num_defrag == BTRFS_DEFRAG_BATCH) {
|
||||
defrag->last_offset = range.start;
|
||||
btrfs_requeue_inode_defrag(inode, defrag);
|
||||
btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
|
||||
} else if (defrag->last_offset && !defrag->cycled) {
|
||||
/*
|
||||
* we didn't fill our defrag batch, but
|
||||
|
@ -343,7 +343,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
|
|||
*/
|
||||
defrag->last_offset = 0;
|
||||
defrag->cycled = 1;
|
||||
btrfs_requeue_inode_defrag(inode, defrag);
|
||||
btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
|
||||
} else {
|
||||
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
||||
}
|
||||
|
@ -529,13 +529,13 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
|
|||
* this drops all the extents in the cache that intersect the range
|
||||
* [start, end]. Existing extents are split as required.
|
||||
*/
|
||||
void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
|
||||
void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
|
||||
int skip_pinned)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct extent_map *split = NULL;
|
||||
struct extent_map *split2 = NULL;
|
||||
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
||||
struct extent_map_tree *em_tree = &inode->extent_tree;
|
||||
u64 len = end - start + 1;
|
||||
u64 gen;
|
||||
int ret;
|
||||
|
@ -720,7 +720,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
|
|||
int leafs_visited = 0;
|
||||
|
||||
if (drop_cache)
|
||||
btrfs_drop_extent_cache(inode, start, end - 1, 0);
|
||||
btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
|
||||
|
||||
if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
|
||||
modify_tree = 0;
|
||||
|
@ -1082,10 +1082,10 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
|
|||
* two or three.
|
||||
*/
|
||||
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode, u64 start, u64 end)
|
||||
struct btrfs_inode *inode, u64 start, u64 end)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_file_extent_item *fi;
|
||||
|
@ -1102,7 +1102,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
|
|||
int del_slot = 0;
|
||||
int recow;
|
||||
int ret;
|
||||
u64 ino = btrfs_ino(BTRFS_I(inode));
|
||||
u64 ino = btrfs_ino(inode);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
|
@ -1415,13 +1415,13 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
|
|||
* the other < 0 number - Something wrong happens
|
||||
*/
|
||||
static noinline int
|
||||
lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
|
||||
lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
|
||||
size_t num_pages, loff_t pos,
|
||||
size_t write_bytes,
|
||||
u64 *lockstart, u64 *lockend,
|
||||
struct extent_state **cached_state)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
u64 start_pos;
|
||||
u64 last_pos;
|
||||
int i;
|
||||
|
@ -1432,30 +1432,30 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
|
|||
+ round_up(pos + write_bytes - start_pos,
|
||||
fs_info->sectorsize) - 1;
|
||||
|
||||
if (start_pos < inode->i_size) {
|
||||
if (start_pos < inode->vfs_inode.i_size) {
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
||||
start_pos, last_pos, cached_state);
|
||||
lock_extent_bits(&inode->io_tree, start_pos, last_pos,
|
||||
cached_state);
|
||||
ordered = btrfs_lookup_ordered_range(inode, start_pos,
|
||||
last_pos - start_pos + 1);
|
||||
if (ordered &&
|
||||
ordered->file_offset + ordered->len > start_pos &&
|
||||
ordered->file_offset <= last_pos) {
|
||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
|
||||
start_pos, last_pos,
|
||||
cached_state, GFP_NOFS);
|
||||
unlock_extent_cached(&inode->io_tree, start_pos,
|
||||
last_pos, cached_state, GFP_NOFS);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
unlock_page(pages[i]);
|
||||
put_page(pages[i]);
|
||||
}
|
||||
btrfs_start_ordered_extent(inode, ordered, 1);
|
||||
btrfs_start_ordered_extent(&inode->vfs_inode,
|
||||
ordered, 1);
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
return -EAGAIN;
|
||||
}
|
||||
if (ordered)
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
|
||||
clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
|
||||
clear_extent_bit(&inode->io_tree, start_pos,
|
||||
last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
|
||||
0, 0, cached_state, GFP_NOFS);
|
||||
|
@ -1474,11 +1474,11 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static noinline int check_can_nocow(struct inode *inode, loff_t pos,
|
||||
static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
|
||||
size_t *write_bytes)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
u64 lockstart, lockend;
|
||||
u64 num_bytes;
|
||||
|
@ -1493,19 +1493,20 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
|
|||
fs_info->sectorsize) - 1;
|
||||
|
||||
while (1) {
|
||||
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
|
||||
lock_extent(&inode->io_tree, lockstart, lockend);
|
||||
ordered = btrfs_lookup_ordered_range(inode, lockstart,
|
||||
lockend - lockstart + 1);
|
||||
if (!ordered) {
|
||||
break;
|
||||
}
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
|
||||
btrfs_start_ordered_extent(inode, ordered, 1);
|
||||
unlock_extent(&inode->io_tree, lockstart, lockend);
|
||||
btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
}
|
||||
|
||||
num_bytes = lockend - lockstart + 1;
|
||||
ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
|
||||
ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
|
||||
NULL, NULL, NULL);
|
||||
if (ret <= 0) {
|
||||
ret = 0;
|
||||
btrfs_end_write_no_snapshoting(root);
|
||||
|
@ -1514,7 +1515,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
|
|||
num_bytes - pos + lockstart);
|
||||
}
|
||||
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
|
||||
unlock_extent(&inode->io_tree, lockstart, lockend);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1579,7 +1580,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
if (ret < 0) {
|
||||
if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
|
||||
BTRFS_INODE_PREALLOC)) &&
|
||||
check_can_nocow(inode, pos, &write_bytes) > 0) {
|
||||
check_can_nocow(BTRFS_I(inode), pos,
|
||||
&write_bytes) > 0) {
|
||||
/*
|
||||
* For nodata cow case, no need to reserve
|
||||
* data space.
|
||||
|
@ -1599,7 +1601,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
}
|
||||
}
|
||||
|
||||
ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
|
||||
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
|
||||
reserve_bytes);
|
||||
if (ret) {
|
||||
if (!only_release_metadata)
|
||||
btrfs_free_reserved_data_space(inode, pos,
|
||||
|
@ -1623,9 +1626,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
if (ret)
|
||||
break;
|
||||
|
||||
ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
|
||||
pos, write_bytes, &lockstart,
|
||||
&lockend, &cached_state);
|
||||
ret = lock_and_cleanup_extent_if_need(BTRFS_I(inode), pages,
|
||||
num_pages, pos, write_bytes, &lockstart,
|
||||
&lockend, &cached_state);
|
||||
if (ret < 0) {
|
||||
if (ret == -EAGAIN)
|
||||
goto again;
|
||||
|
@ -1677,7 +1680,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
}
|
||||
if (only_release_metadata) {
|
||||
btrfs_delalloc_release_metadata(inode,
|
||||
btrfs_delalloc_release_metadata(BTRFS_I(inode),
|
||||
release_bytes);
|
||||
} else {
|
||||
u64 __pos;
|
||||
|
@ -1738,7 +1741,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
if (release_bytes) {
|
||||
if (only_release_metadata) {
|
||||
btrfs_end_write_no_snapshoting(root);
|
||||
btrfs_delalloc_release_metadata(inode, release_bytes);
|
||||
btrfs_delalloc_release_metadata(BTRFS_I(inode),
|
||||
release_bytes);
|
||||
} else {
|
||||
btrfs_delalloc_release_space(inode,
|
||||
round_down(pos, fs_info->sectorsize),
|
||||
|
@ -2193,7 +2197,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
|
||||
static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
|
||||
int slot, u64 start, u64 end)
|
||||
{
|
||||
struct btrfs_file_extent_item *fi;
|
||||
|
@ -2203,7 +2207,7 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
|
|||
return 0;
|
||||
|
||||
btrfs_item_key_to_cpu(leaf, &key, slot);
|
||||
if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
|
||||
if (key.objectid != btrfs_ino(inode) ||
|
||||
key.type != BTRFS_EXTENT_DATA_KEY)
|
||||
return 0;
|
||||
|
||||
|
@ -2222,22 +2226,23 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
|
||||
struct btrfs_path *path, u64 offset, u64 end)
|
||||
static int fill_holes(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *inode,
|
||||
struct btrfs_path *path, u64 offset, u64 end)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
struct btrfs_root *root = inode->root;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_file_extent_item *fi;
|
||||
struct extent_map *hole_em;
|
||||
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
||||
struct extent_map_tree *em_tree = &inode->extent_tree;
|
||||
struct btrfs_key key;
|
||||
int ret;
|
||||
|
||||
if (btrfs_fs_incompat(fs_info, NO_HOLES))
|
||||
goto out;
|
||||
|
||||
key.objectid = btrfs_ino(BTRFS_I(inode));
|
||||
key.objectid = btrfs_ino(inode);
|
||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||
key.offset = offset;
|
||||
|
||||
|
@ -2253,7 +2258,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
|
|||
}
|
||||
|
||||
leaf = path->nodes[0];
|
||||
if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
|
||||
if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
|
||||
u64 num_bytes;
|
||||
|
||||
path->slots[0]--;
|
||||
|
@ -2285,7 +2290,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
|
|||
}
|
||||
btrfs_release_path(path);
|
||||
|
||||
ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)),
|
||||
ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
|
||||
offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2296,8 +2301,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
|
|||
hole_em = alloc_extent_map();
|
||||
if (!hole_em) {
|
||||
btrfs_drop_extent_cache(inode, offset, end - 1, 0);
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
|
||||
} else {
|
||||
hole_em->start = offset;
|
||||
hole_em->len = end - offset;
|
||||
|
@ -2320,7 +2324,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
|
|||
free_extent_map(hole_em);
|
||||
if (ret)
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
&inode->runtime_flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2337,7 +2341,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
|
|||
struct extent_map *em;
|
||||
int ret = 0;
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0);
|
||||
if (IS_ERR_OR_NULL(em)) {
|
||||
if (!em)
|
||||
ret = -ENOMEM;
|
||||
|
@ -2550,8 +2554,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
trans->block_rsv = &fs_info->trans_block_rsv;
|
||||
|
||||
if (cur_offset < drop_end && cur_offset < ino_size) {
|
||||
ret = fill_holes(trans, inode, path, cur_offset,
|
||||
drop_end);
|
||||
ret = fill_holes(trans, BTRFS_I(inode), path,
|
||||
cur_offset, drop_end);
|
||||
if (ret) {
|
||||
/*
|
||||
* If we failed then we didn't insert our hole
|
||||
|
@ -2622,7 +2626,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
|||
* cur_offset == drop_end).
|
||||
*/
|
||||
if (cur_offset < ino_size && cur_offset < drop_end) {
|
||||
ret = fill_holes(trans, inode, path, cur_offset, drop_end);
|
||||
ret = fill_holes(trans, BTRFS_I(inode), path,
|
||||
cur_offset, drop_end);
|
||||
if (ret) {
|
||||
/* Same comment as above. */
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
|
@ -2747,7 +2752,8 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||
*
|
||||
* For qgroup space, it will be checked later.
|
||||
*/
|
||||
ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start);
|
||||
ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
|
||||
alloc_end - alloc_start);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -2827,7 +2833,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||
/* First, check if we exceed the qgroup limit */
|
||||
INIT_LIST_HEAD(&reserve_list);
|
||||
while (1) {
|
||||
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
|
||||
alloc_end - cur_offset, 0);
|
||||
if (IS_ERR_OR_NULL(em)) {
|
||||
if (!em)
|
||||
|
@ -2954,7 +2960,8 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
|
|||
&cached_state);
|
||||
|
||||
while (start < inode->i_size) {
|
||||
em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
|
||||
em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0,
|
||||
start, len, 0);
|
||||
if (IS_ERR(em)) {
|
||||
ret = PTR_ERR(em);
|
||||
em = NULL;
|
||||
|
|
|
@ -260,7 +260,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
|
|||
btrfs_free_path(path);
|
||||
}
|
||||
|
||||
btrfs_i_size_write(inode, 0);
|
||||
btrfs_i_size_write(BTRFS_I(inode), 0);
|
||||
truncate_pagecache(inode, 0);
|
||||
|
||||
/*
|
||||
|
@ -3545,7 +3545,8 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
|
|||
|
||||
if (ret) {
|
||||
if (release_metadata)
|
||||
btrfs_delalloc_release_metadata(inode, inode->i_size);
|
||||
btrfs_delalloc_release_metadata(BTRFS_I(inode),
|
||||
inode->i_size);
|
||||
#ifdef DEBUG
|
||||
btrfs_err(fs_info,
|
||||
"failed to write free ino cache for root %llu",
|
||||
|
|
|
@ -499,7 +499,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
|
|||
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
|
||||
prealloc, prealloc, &alloc_hint);
|
||||
if (ret) {
|
||||
btrfs_delalloc_release_metadata(inode, prealloc);
|
||||
btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
|
|
445
fs/btrfs/inode.c
445
fs/btrfs/inode.c
File diff suppressed because it is too large
Load Diff
|
@ -434,7 +434,7 @@ int btrfs_is_empty_uuid(u8 *uuid)
|
|||
|
||||
static noinline int create_subvol(struct inode *dir,
|
||||
struct dentry *dentry,
|
||||
char *name, int namelen,
|
||||
const char *name, int namelen,
|
||||
u64 *async_transid,
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
|
@ -580,21 +580,21 @@ static noinline int create_subvol(struct inode *dir,
|
|||
/*
|
||||
* insert the directory item
|
||||
*/
|
||||
ret = btrfs_set_inode_index(dir, &index);
|
||||
ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = btrfs_insert_dir_item(trans, root,
|
||||
name, namelen, dir, &key,
|
||||
name, namelen, BTRFS_I(dir), &key,
|
||||
BTRFS_FT_DIR, index);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
btrfs_i_size_write(dir, dir->i_size + namelen * 2);
|
||||
btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
|
||||
ret = btrfs_update_inode(trans, root, dir);
|
||||
BUG_ON(ret);
|
||||
|
||||
|
@ -832,7 +832,7 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
|
|||
* inside this filesystem so it's quite a bit simpler.
|
||||
*/
|
||||
static noinline int btrfs_mksubvol(const struct path *parent,
|
||||
char *name, int namelen,
|
||||
const char *name, int namelen,
|
||||
struct btrfs_root *snap_src,
|
||||
u64 *async_transid, bool readonly,
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
|
@ -1009,7 +1009,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
|
|||
|
||||
/* get the big lock and read metadata off disk */
|
||||
lock_extent_bits(io_tree, start, end, &cached);
|
||||
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
|
||||
unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
|
||||
|
||||
if (IS_ERR(em))
|
||||
|
@ -1625,7 +1625,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
|||
}
|
||||
|
||||
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
|
||||
char *name, unsigned long fd, int subvol,
|
||||
const char *name, unsigned long fd, int subvol,
|
||||
u64 *transid, bool readonly,
|
||||
struct btrfs_qgroup_inherit *inherit)
|
||||
{
|
||||
|
@ -3298,7 +3298,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
|
|||
if (endoff > destoff + olen)
|
||||
endoff = destoff + olen;
|
||||
if (endoff > inode->i_size)
|
||||
btrfs_i_size_write(inode, endoff);
|
||||
btrfs_i_size_write(BTRFS_I(inode), endoff);
|
||||
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
if (ret) {
|
||||
|
@ -3311,20 +3311,19 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void clone_update_extent_map(struct inode *inode,
|
||||
static void clone_update_extent_map(struct btrfs_inode *inode,
|
||||
const struct btrfs_trans_handle *trans,
|
||||
const struct btrfs_path *path,
|
||||
const u64 hole_offset,
|
||||
const u64 hole_len)
|
||||
{
|
||||
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
|
||||
struct extent_map_tree *em_tree = &inode->extent_tree;
|
||||
struct extent_map *em;
|
||||
int ret;
|
||||
|
||||
em = alloc_extent_map();
|
||||
if (!em) {
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3338,7 +3337,7 @@ static void clone_update_extent_map(struct inode *inode,
|
|||
if (btrfs_file_extent_type(path->nodes[0], fi) ==
|
||||
BTRFS_FILE_EXTENT_INLINE)
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
&inode->runtime_flags);
|
||||
} else {
|
||||
em->start = hole_offset;
|
||||
em->len = hole_len;
|
||||
|
@ -3364,8 +3363,7 @@ static void clone_update_extent_map(struct inode *inode,
|
|||
}
|
||||
|
||||
if (ret)
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3791,11 +3789,12 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
|
|||
|
||||
/* If we have an implicit hole (NO_HOLES feature). */
|
||||
if (drop_start < new_key.offset)
|
||||
clone_update_extent_map(inode, trans,
|
||||
clone_update_extent_map(BTRFS_I(inode), trans,
|
||||
NULL, drop_start,
|
||||
new_key.offset - drop_start);
|
||||
|
||||
clone_update_extent_map(inode, trans, path, 0, 0);
|
||||
clone_update_extent_map(BTRFS_I(inode), trans,
|
||||
path, 0, 0);
|
||||
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
btrfs_release_path(path);
|
||||
|
@ -3845,8 +3844,9 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
|
|||
btrfs_end_transaction(trans);
|
||||
goto out;
|
||||
}
|
||||
clone_update_extent_map(inode, trans, NULL, last_dest_end,
|
||||
destoff + len - last_dest_end);
|
||||
clone_update_extent_map(BTRFS_I(inode), trans, NULL,
|
||||
last_dest_end,
|
||||
destoff + len - last_dest_end);
|
||||
ret = clone_finish_inode_update(trans, inode, destoff + len,
|
||||
destoff, olen, no_time_update);
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ static inline void write_compress_length(char *buf, size_t len)
|
|||
memcpy(buf, &dlen, LZO_LEN);
|
||||
}
|
||||
|
||||
static inline size_t read_compress_length(char *buf)
|
||||
static inline size_t read_compress_length(const char *buf)
|
||||
{
|
||||
__le32 dlen;
|
||||
|
||||
|
@ -86,13 +86,11 @@ static inline size_t read_compress_length(char *buf)
|
|||
|
||||
static int lzo_compress_pages(struct list_head *ws,
|
||||
struct address_space *mapping,
|
||||
u64 start, unsigned long len,
|
||||
u64 start,
|
||||
struct page **pages,
|
||||
unsigned long nr_dest_pages,
|
||||
unsigned long *out_pages,
|
||||
unsigned long *total_in,
|
||||
unsigned long *total_out,
|
||||
unsigned long max_out)
|
||||
unsigned long *total_out)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret = 0;
|
||||
|
@ -102,7 +100,9 @@ static int lzo_compress_pages(struct list_head *ws,
|
|||
struct page *in_page = NULL;
|
||||
struct page *out_page = NULL;
|
||||
unsigned long bytes_left;
|
||||
|
||||
unsigned long len = *total_out;
|
||||
unsigned long nr_dest_pages = *out_pages;
|
||||
const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
|
||||
size_t in_len;
|
||||
size_t out_len;
|
||||
char *buf;
|
||||
|
|
|
@ -879,15 +879,14 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
|
|||
/* Since the DIO code tries to lock a wide area we need to look for any ordered
|
||||
* extents that exist in the range, rather than just the start of the range.
|
||||
*/
|
||||
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len)
|
||||
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
||||
struct btrfs_inode *inode, u64 file_offset, u64 len)
|
||||
{
|
||||
struct btrfs_ordered_inode_tree *tree;
|
||||
struct rb_node *node;
|
||||
struct btrfs_ordered_extent *entry = NULL;
|
||||
|
||||
tree = &BTRFS_I(inode)->ordered_tree;
|
||||
tree = &inode->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
node = tree_search(tree, file_offset);
|
||||
if (!node) {
|
||||
|
@ -923,7 +922,7 @@ bool btrfs_have_ordered_extents_in_range(struct inode *inode,
|
|||
{
|
||||
struct btrfs_ordered_extent *oe;
|
||||
|
||||
oe = btrfs_lookup_ordered_range(inode, file_offset, len);
|
||||
oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len);
|
||||
if (oe) {
|
||||
btrfs_put_ordered_extent(oe);
|
||||
return true;
|
||||
|
|
|
@ -189,9 +189,10 @@ void btrfs_start_ordered_extent(struct inode *inode,
|
|||
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
|
||||
struct btrfs_ordered_extent *
|
||||
btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
|
||||
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
||||
struct btrfs_inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
bool btrfs_have_ordered_extents_in_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
|
|
|
@ -1714,8 +1714,8 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
|
|||
if (!ret)
|
||||
continue;
|
||||
|
||||
btrfs_drop_extent_cache(inode, key.offset, end,
|
||||
1);
|
||||
btrfs_drop_extent_cache(BTRFS_I(inode),
|
||||
key.offset, end, 1);
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree,
|
||||
key.offset, end);
|
||||
}
|
||||
|
@ -2130,7 +2130,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
|
|||
|
||||
/* the lock_extent waits for readpage to complete */
|
||||
lock_extent(&BTRFS_I(inode)->io_tree, start, end);
|
||||
btrfs_drop_extent_cache(inode, start, end, 1);
|
||||
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
|
||||
}
|
||||
return 0;
|
||||
|
@ -3161,7 +3161,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
|
|||
free_extent_map(em);
|
||||
break;
|
||||
}
|
||||
btrfs_drop_extent_cache(inode, start, end, 0);
|
||||
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
|
||||
}
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
|
||||
return ret;
|
||||
|
@ -3203,7 +3203,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
|
|||
index = (cluster->start - offset) >> PAGE_SHIFT;
|
||||
last_index = (cluster->end - offset) >> PAGE_SHIFT;
|
||||
while (index <= last_index) {
|
||||
ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE);
|
||||
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
|
||||
PAGE_SIZE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -3215,7 +3216,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
|
|||
page = find_or_create_page(inode->i_mapping, index,
|
||||
mask);
|
||||
if (!page) {
|
||||
btrfs_delalloc_release_metadata(inode,
|
||||
btrfs_delalloc_release_metadata(BTRFS_I(inode),
|
||||
PAGE_SIZE);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -3234,7 +3235,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
|
|||
if (!PageUptodate(page)) {
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
btrfs_delalloc_release_metadata(inode,
|
||||
btrfs_delalloc_release_metadata(BTRFS_I(inode),
|
||||
PAGE_SIZE);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
|
@ -4245,7 +4246,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
|
|||
BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
|
||||
BTRFS_I(inode)->index_cnt = group->key.objectid;
|
||||
|
||||
err = btrfs_orphan_add(trans, inode);
|
||||
err = btrfs_orphan_add(trans, BTRFS_I(inode));
|
||||
out:
|
||||
btrfs_end_transaction(trans);
|
||||
btrfs_btree_balance_dirty(fs_info);
|
||||
|
|
|
@ -731,7 +731,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
|
|||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
ret = repair_io_failure(inode, offset, PAGE_SIZE,
|
||||
ret = repair_io_failure(BTRFS_I(inode), offset, PAGE_SIZE,
|
||||
fixup->logical, page,
|
||||
offset - page_offset(page),
|
||||
fixup->mirror_num);
|
||||
|
@ -4236,7 +4236,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
|
|||
scrub_pending_trans_workers_dec(sctx);
|
||||
}
|
||||
|
||||
static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
|
||||
static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
|
||||
u64 logical)
|
||||
{
|
||||
struct extent_state *cached_state = NULL;
|
||||
|
@ -4246,7 +4246,7 @@ static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
|
|||
u64 lockstart = start, lockend = start + len - 1;
|
||||
int ret = 0;
|
||||
|
||||
io_tree = &BTRFS_I(inode)->io_tree;
|
||||
io_tree = &inode->io_tree;
|
||||
|
||||
lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
|
||||
ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
|
||||
|
@ -4325,7 +4325,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
|
|||
io_tree = &BTRFS_I(inode)->io_tree;
|
||||
nocow_ctx_logical = nocow_ctx->logical;
|
||||
|
||||
ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
|
||||
ret = check_extent_to_block(BTRFS_I(inode), offset, len,
|
||||
nocow_ctx_logical);
|
||||
if (ret) {
|
||||
ret = ret > 0 ? 0 : ret;
|
||||
goto out;
|
||||
|
@ -4372,7 +4373,7 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
|
|||
}
|
||||
}
|
||||
|
||||
ret = check_extent_to_block(inode, offset, len,
|
||||
ret = check_extent_to_block(BTRFS_I(inode), offset, len,
|
||||
nocow_ctx_logical);
|
||||
if (ret) {
|
||||
ret = ret > 0 ? 0 : ret;
|
||||
|
|
125
fs/btrfs/send.c
125
fs/btrfs/send.c
|
@ -1681,6 +1681,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (ino == BTRFS_FIRST_FREE_OBJECTID)
|
||||
return 1;
|
||||
|
||||
ret = get_cur_inode_state(sctx, ino, gen);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -1866,7 +1869,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
|
|||
* not deleted and then re-created, if it was then we have no overwrite
|
||||
* and we can just unlink this entry.
|
||||
*/
|
||||
if (sctx->parent_root) {
|
||||
if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
|
||||
ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
|
||||
NULL, NULL, NULL);
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
|
@ -1934,6 +1937,19 @@ static int did_overwrite_ref(struct send_ctx *sctx,
|
|||
if (ret <= 0)
|
||||
goto out;
|
||||
|
||||
if (dir != BTRFS_FIRST_FREE_OBJECTID) {
|
||||
ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
|
||||
NULL, NULL, NULL);
|
||||
if (ret < 0 && ret != -ENOENT)
|
||||
goto out;
|
||||
if (ret) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
if (gen != dir_gen)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* check if the ref was overwritten by another ref */
|
||||
ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
|
||||
&ow_inode, &other_type);
|
||||
|
@ -3556,6 +3572,7 @@ static int wait_for_parent_move(struct send_ctx *sctx,
|
|||
{
|
||||
int ret = 0;
|
||||
u64 ino = parent_ref->dir;
|
||||
u64 ino_gen = parent_ref->dir_gen;
|
||||
u64 parent_ino_before, parent_ino_after;
|
||||
struct fs_path *path_before = NULL;
|
||||
struct fs_path *path_after = NULL;
|
||||
|
@ -3576,6 +3593,8 @@ static int wait_for_parent_move(struct send_ctx *sctx,
|
|||
* at get_cur_path()).
|
||||
*/
|
||||
while (ino > BTRFS_FIRST_FREE_OBJECTID) {
|
||||
u64 parent_ino_after_gen;
|
||||
|
||||
if (is_waiting_for_move(sctx, ino)) {
|
||||
/*
|
||||
* If the current inode is an ancestor of ino in the
|
||||
|
@ -3598,7 +3617,7 @@ static int wait_for_parent_move(struct send_ctx *sctx,
|
|||
fs_path_reset(path_after);
|
||||
|
||||
ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
|
||||
NULL, path_after);
|
||||
&parent_ino_after_gen, path_after);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
|
||||
|
@ -3615,10 +3634,20 @@ static int wait_for_parent_move(struct send_ctx *sctx,
|
|||
if (ino > sctx->cur_ino &&
|
||||
(parent_ino_before != parent_ino_after || len1 != len2 ||
|
||||
memcmp(path_before->start, path_after->start, len1))) {
|
||||
ret = 1;
|
||||
break;
|
||||
u64 parent_ino_gen;
|
||||
|
||||
ret = get_inode_info(sctx->parent_root, ino, NULL,
|
||||
&parent_ino_gen, NULL, NULL, NULL,
|
||||
NULL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (ino_gen == parent_ino_gen) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ino = parent_ino_after;
|
||||
ino_gen = parent_ino_after_gen;
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -5277,6 +5306,81 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int range_is_hole_in_parent(struct send_ctx *sctx,
|
||||
const u64 start,
|
||||
const u64 end)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_root *root = sctx->parent_root;
|
||||
u64 search_start = start;
|
||||
int ret;
|
||||
|
||||
path = alloc_path_for_send();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
key.objectid = sctx->cur_ino;
|
||||
key.type = BTRFS_EXTENT_DATA_KEY;
|
||||
key.offset = search_start;
|
||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (ret > 0 && path->slots[0] > 0)
|
||||
path->slots[0]--;
|
||||
|
||||
while (search_start < end) {
|
||||
struct extent_buffer *leaf = path->nodes[0];
|
||||
int slot = path->slots[0];
|
||||
struct btrfs_file_extent_item *fi;
|
||||
u64 extent_end;
|
||||
|
||||
if (slot >= btrfs_header_nritems(leaf)) {
|
||||
ret = btrfs_next_leaf(root, path);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
else if (ret > 0)
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
|
||||
btrfs_item_key_to_cpu(leaf, &key, slot);
|
||||
if (key.objectid < sctx->cur_ino ||
|
||||
key.type < BTRFS_EXTENT_DATA_KEY)
|
||||
goto next;
|
||||
if (key.objectid > sctx->cur_ino ||
|
||||
key.type > BTRFS_EXTENT_DATA_KEY ||
|
||||
key.offset >= end)
|
||||
break;
|
||||
|
||||
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
|
||||
if (btrfs_file_extent_type(leaf, fi) ==
|
||||
BTRFS_FILE_EXTENT_INLINE) {
|
||||
u64 size = btrfs_file_extent_inline_len(leaf, slot, fi);
|
||||
|
||||
extent_end = ALIGN(key.offset + size,
|
||||
root->fs_info->sectorsize);
|
||||
} else {
|
||||
extent_end = key.offset +
|
||||
btrfs_file_extent_num_bytes(leaf, fi);
|
||||
}
|
||||
if (extent_end <= start)
|
||||
goto next;
|
||||
if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
|
||||
search_start = extent_end;
|
||||
goto next;
|
||||
}
|
||||
ret = 0;
|
||||
goto out;
|
||||
next:
|
||||
path->slots[0]++;
|
||||
}
|
||||
ret = 1;
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
|
||||
struct btrfs_key *key)
|
||||
{
|
||||
|
@ -5321,8 +5425,17 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (sctx->cur_inode_last_extent < key->offset)
|
||||
ret = send_hole(sctx, key->offset);
|
||||
if (sctx->cur_inode_last_extent < key->offset) {
|
||||
ret = range_is_hole_in_parent(sctx,
|
||||
sctx->cur_inode_last_extent,
|
||||
key->offset);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else if (ret == 0)
|
||||
ret = send_hole(sctx, key->offset);
|
||||
else
|
||||
ret = 0;
|
||||
}
|
||||
sctx->cur_inode_last_extent = extent_end;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -278,7 +278,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
|
||||
/* First with no extents */
|
||||
BTRFS_I(inode)->root = root;
|
||||
em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
em = NULL;
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
|
@ -293,7 +293,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
goto out;
|
||||
}
|
||||
free_extent_map(em);
|
||||
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
|
||||
btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
|
||||
|
||||
/*
|
||||
* All of the magic numbers are based on the mapping setup in
|
||||
|
@ -302,7 +302,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
*/
|
||||
setup_file_extents(root, sectorsize);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, (u64)-1, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -323,7 +323,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -350,7 +350,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -372,7 +372,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
free_extent_map(em);
|
||||
|
||||
/* Regular extent */
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -399,7 +399,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
free_extent_map(em);
|
||||
|
||||
/* The next 3 are split extents */
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -428,7 +428,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -450,7 +450,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -484,7 +484,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
free_extent_map(em);
|
||||
|
||||
/* Prealloc extent */
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -513,7 +513,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
free_extent_map(em);
|
||||
|
||||
/* The next 3 are a half written prealloc extent */
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -543,7 +543,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -576,7 +576,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -611,7 +611,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
free_extent_map(em);
|
||||
|
||||
/* Now for the compressed extent */
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -645,7 +645,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
free_extent_map(em);
|
||||
|
||||
/* Split compressed extent */
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -680,7 +680,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -707,7 +707,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -742,7 +742,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
free_extent_map(em);
|
||||
|
||||
/* A hole between regular extents but no hole extent */
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset + 6,
|
||||
sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -769,7 +770,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, 4096 * 1024, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, 4096 * 1024, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -802,7 +803,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
|
|||
offset = em->start + em->len;
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -885,7 +886,7 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
|
|||
insert_inode_item_key(root);
|
||||
insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
|
||||
sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
|
||||
em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, 2 * sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
@ -907,7 +908,8 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
|
|||
}
|
||||
free_extent_map(em);
|
||||
|
||||
em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0);
|
||||
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize,
|
||||
2 * sectorsize, 0);
|
||||
if (IS_ERR(em)) {
|
||||
test_msg("Got an error when we shouldn't have\n");
|
||||
goto out;
|
||||
|
|
|
@ -1505,7 +1505,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
/*
|
||||
* insert the directory item
|
||||
*/
|
||||
ret = btrfs_set_inode_index(parent_inode, &index);
|
||||
ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
|
||||
/* check if there is a file/dir which has the same name. */
|
||||
|
@ -1644,7 +1644,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
|
||||
ret = btrfs_insert_dir_item(trans, parent_root,
|
||||
dentry->d_name.name, dentry->d_name.len,
|
||||
parent_inode, &key,
|
||||
BTRFS_I(parent_inode), &key,
|
||||
BTRFS_FT_DIR, index);
|
||||
/* We have check then name at the beginning, so it is impossible. */
|
||||
BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
|
||||
|
@ -1653,7 +1653,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
btrfs_i_size_write(parent_inode, parent_inode->i_size +
|
||||
btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
|
||||
dentry->d_name.len * 2);
|
||||
parent_inode->i_mtime = parent_inode->i_ctime =
|
||||
current_time(parent_inode);
|
||||
|
|
|
@ -673,6 +673,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
|
|||
unsigned long dest_offset;
|
||||
struct btrfs_key ins;
|
||||
|
||||
if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
|
||||
btrfs_fs_incompat(fs_info, NO_HOLES))
|
||||
goto update_inode;
|
||||
|
||||
ret = btrfs_insert_empty_item(trans, root, path, key,
|
||||
sizeof(*item));
|
||||
if (ret)
|
||||
|
@ -825,6 +829,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
inode_add_bytes(inode, nbytes);
|
||||
update_inode:
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
out:
|
||||
if (inode)
|
||||
|
@ -1322,8 +1327,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
/* insert our name */
|
||||
ret = btrfs_add_link(trans, dir, inode, name, namelen,
|
||||
0, ref_index);
|
||||
ret = btrfs_add_link(trans, BTRFS_I(dir),
|
||||
BTRFS_I(inode),
|
||||
name, namelen, 0, ref_index);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -1641,7 +1647,8 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
|
||||
ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
|
||||
name_len, 1, index);
|
||||
|
||||
/* FIXME, put inode into FIXUP list */
|
||||
|
||||
|
@ -1780,7 +1787,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
|
|||
out:
|
||||
btrfs_release_path(path);
|
||||
if (!ret && update_size) {
|
||||
btrfs_i_size_write(dir, dir->i_size + name_len * 2);
|
||||
btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
|
||||
ret = btrfs_update_inode(trans, root, dir);
|
||||
}
|
||||
kfree(name);
|
||||
|
@ -5045,14 +5052,14 @@ static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
* a full commit is required.
|
||||
*/
|
||||
static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode,
|
||||
struct btrfs_inode *inode,
|
||||
struct dentry *parent,
|
||||
struct super_block *sb,
|
||||
u64 last_committed)
|
||||
{
|
||||
int ret = 0;
|
||||
struct dentry *old_parent = NULL;
|
||||
struct inode *orig_inode = inode;
|
||||
struct btrfs_inode *orig_inode = inode;
|
||||
|
||||
/*
|
||||
* for regular files, if its inode is already on disk, we don't
|
||||
|
@ -5060,15 +5067,15 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
|
|||
* we can use the last_unlink_trans field to record renames
|
||||
* and other fun in this file.
|
||||
*/
|
||||
if (S_ISREG(inode->i_mode) &&
|
||||
BTRFS_I(inode)->generation <= last_committed &&
|
||||
BTRFS_I(inode)->last_unlink_trans <= last_committed)
|
||||
goto out;
|
||||
if (S_ISREG(inode->vfs_inode.i_mode) &&
|
||||
inode->generation <= last_committed &&
|
||||
inode->last_unlink_trans <= last_committed)
|
||||
goto out;
|
||||
|
||||
if (!S_ISDIR(inode->i_mode)) {
|
||||
if (!S_ISDIR(inode->vfs_inode.i_mode)) {
|
||||
if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
|
||||
goto out;
|
||||
inode = d_inode(parent);
|
||||
inode = BTRFS_I(d_inode(parent));
|
||||
}
|
||||
|
||||
while (1) {
|
||||
|
@ -5079,10 +5086,10 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
|
|||
* think this inode has already been logged.
|
||||
*/
|
||||
if (inode != orig_inode)
|
||||
BTRFS_I(inode)->logged_trans = trans->transid;
|
||||
inode->logged_trans = trans->transid;
|
||||
smp_mb();
|
||||
|
||||
if (btrfs_must_commit_transaction(trans, BTRFS_I(inode))) {
|
||||
if (btrfs_must_commit_transaction(trans, inode)) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -5091,8 +5098,8 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
|
|||
break;
|
||||
|
||||
if (IS_ROOT(parent)) {
|
||||
inode = d_inode(parent);
|
||||
if (btrfs_must_commit_transaction(trans, BTRFS_I(inode)))
|
||||
inode = BTRFS_I(d_inode(parent));
|
||||
if (btrfs_must_commit_transaction(trans, inode))
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -5100,7 +5107,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
|
|||
parent = dget_parent(parent);
|
||||
dput(old_parent);
|
||||
old_parent = parent;
|
||||
inode = d_inode(parent);
|
||||
inode = BTRFS_I(d_inode(parent));
|
||||
|
||||
}
|
||||
dput(old_parent);
|
||||
|
@ -5287,15 +5294,15 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode,
|
||||
struct btrfs_inode *inode,
|
||||
struct btrfs_log_ctx *ctx)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
|
||||
int ret;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
const u64 ino = btrfs_ino(BTRFS_I(inode));
|
||||
struct btrfs_root *root = inode->root;
|
||||
const u64 ino = btrfs_ino(inode);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
|
@ -5390,7 +5397,8 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
|
|||
* the last committed transaction
|
||||
*/
|
||||
static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct inode *inode,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_inode *inode,
|
||||
struct dentry *parent,
|
||||
const loff_t start,
|
||||
const loff_t end,
|
||||
|
@ -5404,9 +5412,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
|||
int ret = 0;
|
||||
u64 last_committed = fs_info->last_trans_committed;
|
||||
bool log_dentries = false;
|
||||
struct inode *orig_inode = inode;
|
||||
struct btrfs_inode *orig_inode = inode;
|
||||
|
||||
sb = inode->i_sb;
|
||||
sb = inode->vfs_inode.i_sb;
|
||||
|
||||
if (btrfs_test_opt(fs_info, NOTREELOG)) {
|
||||
ret = 1;
|
||||
|
@ -5423,18 +5431,17 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
|||
goto end_no_trans;
|
||||
}
|
||||
|
||||
if (root != BTRFS_I(inode)->root ||
|
||||
btrfs_root_refs(&root->root_item) == 0) {
|
||||
if (root != inode->root || btrfs_root_refs(&root->root_item) == 0) {
|
||||
ret = 1;
|
||||
goto end_no_trans;
|
||||
}
|
||||
|
||||
ret = check_parent_dirs_for_sync(trans, inode, parent,
|
||||
sb, last_committed);
|
||||
ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
|
||||
last_committed);
|
||||
if (ret)
|
||||
goto end_no_trans;
|
||||
|
||||
if (btrfs_inode_in_log(BTRFS_I(inode), trans->transid)) {
|
||||
if (btrfs_inode_in_log(inode, trans->transid)) {
|
||||
ret = BTRFS_NO_LOG_SYNC;
|
||||
goto end_no_trans;
|
||||
}
|
||||
|
@ -5443,8 +5450,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
|||
if (ret)
|
||||
goto end_no_trans;
|
||||
|
||||
ret = btrfs_log_inode(trans, root, BTRFS_I(inode), inode_only,
|
||||
start, end, ctx);
|
||||
ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
|
||||
if (ret)
|
||||
goto end_trans;
|
||||
|
||||
|
@ -5454,14 +5460,14 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
|||
* we can use the last_unlink_trans field to record renames
|
||||
* and other fun in this file.
|
||||
*/
|
||||
if (S_ISREG(inode->i_mode) &&
|
||||
BTRFS_I(inode)->generation <= last_committed &&
|
||||
BTRFS_I(inode)->last_unlink_trans <= last_committed) {
|
||||
if (S_ISREG(inode->vfs_inode.i_mode) &&
|
||||
inode->generation <= last_committed &&
|
||||
inode->last_unlink_trans <= last_committed) {
|
||||
ret = 0;
|
||||
goto end_trans;
|
||||
}
|
||||
|
||||
if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
|
||||
if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
|
||||
log_dentries = true;
|
||||
|
||||
/*
|
||||
|
@ -5505,7 +5511,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
|||
* but the file inode does not have a matching BTRFS_INODE_REF_KEY item
|
||||
* and has a link count of 2.
|
||||
*/
|
||||
if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
|
||||
if (inode->last_unlink_trans > last_committed) {
|
||||
ret = btrfs_log_all_parents(trans, orig_inode, ctx);
|
||||
if (ret)
|
||||
goto end_trans;
|
||||
|
@ -5515,14 +5521,13 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
|||
if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
|
||||
break;
|
||||
|
||||
inode = d_inode(parent);
|
||||
if (root != BTRFS_I(inode)->root)
|
||||
inode = BTRFS_I(d_inode(parent));
|
||||
if (root != inode->root)
|
||||
break;
|
||||
|
||||
if (BTRFS_I(inode)->generation > last_committed) {
|
||||
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
|
||||
LOG_INODE_EXISTS,
|
||||
0, LLONG_MAX, ctx);
|
||||
if (inode->generation > last_committed) {
|
||||
ret = btrfs_log_inode(trans, root, inode,
|
||||
LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
|
||||
if (ret)
|
||||
goto end_trans;
|
||||
}
|
||||
|
@ -5534,7 +5539,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
|
|||
old_parent = parent;
|
||||
}
|
||||
if (log_dentries)
|
||||
ret = log_new_dir_dentries(trans, root, BTRFS_I(orig_inode), ctx);
|
||||
ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
|
||||
else
|
||||
ret = 0;
|
||||
end_trans:
|
||||
|
@ -5566,8 +5571,8 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
|
|||
struct dentry *parent = dget_parent(dentry);
|
||||
int ret;
|
||||
|
||||
ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent,
|
||||
start, end, 0, ctx);
|
||||
ret = btrfs_log_inode_parent(trans, root, BTRFS_I(d_inode(dentry)),
|
||||
parent, start, end, 0, ctx);
|
||||
dput(parent);
|
||||
|
||||
return ret;
|
||||
|
@ -5829,7 +5834,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
|
|||
(!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
|
||||
return 0;
|
||||
|
||||
return btrfs_log_inode_parent(trans, root, &inode->vfs_inode, parent, 0,
|
||||
return btrfs_log_inode_parent(trans, root, inode, parent, 0,
|
||||
LLONG_MAX, 1, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -1725,7 +1725,7 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans,
|
|||
* Function to update ctime/mtime for a given device path.
|
||||
* Mainly used for ctime/mtime based probe like libblkid.
|
||||
*/
|
||||
static void update_dev_time(char *path_name)
|
||||
static void update_dev_time(const char *path_name)
|
||||
{
|
||||
struct file *filp;
|
||||
|
||||
|
@ -1851,7 +1851,8 @@ void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
|
|||
fs_info->fs_devices->latest_bdev = next_device->bdev;
|
||||
}
|
||||
|
||||
int btrfs_rm_device(struct btrfs_fs_info *fs_info, char *device_path, u64 devid)
|
||||
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
|
||||
u64 devid)
|
||||
{
|
||||
struct btrfs_device *device;
|
||||
struct btrfs_fs_devices *cur_devices;
|
||||
|
@ -2091,7 +2092,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
|
|||
}
|
||||
|
||||
static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
|
||||
char *device_path,
|
||||
const char *device_path,
|
||||
struct btrfs_device **device)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -2118,7 +2119,7 @@ static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
|
|||
}
|
||||
|
||||
int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
|
||||
char *device_path,
|
||||
const char *device_path,
|
||||
struct btrfs_device **device)
|
||||
{
|
||||
*device = NULL;
|
||||
|
@ -2151,7 +2152,8 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
|
|||
* Lookup a device given by device id, or the path if the id is 0.
|
||||
*/
|
||||
int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
|
||||
char *devpath, struct btrfs_device **device)
|
||||
const char *devpath,
|
||||
struct btrfs_device **device)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -2307,7 +2309,7 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path)
|
||||
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
|
||||
{
|
||||
struct btrfs_root *root = fs_info->dev_root;
|
||||
struct request_queue *q;
|
||||
|
@ -2515,7 +2517,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path)
|
|||
}
|
||||
|
||||
int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
|
||||
char *device_path,
|
||||
const char *device_path,
|
||||
struct btrfs_device *srcdev,
|
||||
struct btrfs_device **device_out)
|
||||
{
|
||||
|
@ -6954,7 +6956,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
|
|||
key.offset = device->devid;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
BUG_ON(!path);
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
|
||||
if (ret < 0) {
|
||||
btrfs_warn_in_rcu(fs_info,
|
||||
|
@ -7102,7 +7105,7 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path)
|
||||
void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
struct btrfs_super_block *disk_super;
|
||||
|
|
|
@ -422,16 +422,16 @@ void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step);
|
|||
void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *device, struct btrfs_device *this_dev);
|
||||
int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
|
||||
char *device_path,
|
||||
const char *device_path,
|
||||
struct btrfs_device **device);
|
||||
int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
|
||||
char *devpath,
|
||||
const char *devpath,
|
||||
struct btrfs_device **device);
|
||||
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
|
||||
const u64 *devid,
|
||||
const u8 *uuid);
|
||||
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
|
||||
char *device_path, u64 devid);
|
||||
const char *device_path, u64 devid);
|
||||
void btrfs_cleanup_fs_uuids(void);
|
||||
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
|
||||
int btrfs_grow_device(struct btrfs_trans_handle *trans,
|
||||
|
@ -439,9 +439,9 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
|
||||
u8 *uuid, u8 *fsid);
|
||||
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
|
||||
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *path);
|
||||
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
|
||||
int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
|
||||
char *device_path,
|
||||
const char *device_path,
|
||||
struct btrfs_device *srcdev,
|
||||
struct btrfs_device **device_out);
|
||||
int btrfs_balance(struct btrfs_balance_control *bctl,
|
||||
|
@ -474,7 +474,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_device *tgtdev);
|
||||
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *tgtdev);
|
||||
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path);
|
||||
void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path);
|
||||
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical, u64 len, int mirror_num);
|
||||
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
|
||||
|
|
|
@ -73,13 +73,11 @@ static struct list_head *zlib_alloc_workspace(void)
|
|||
|
||||
static int zlib_compress_pages(struct list_head *ws,
|
||||
struct address_space *mapping,
|
||||
u64 start, unsigned long len,
|
||||
u64 start,
|
||||
struct page **pages,
|
||||
unsigned long nr_dest_pages,
|
||||
unsigned long *out_pages,
|
||||
unsigned long *total_in,
|
||||
unsigned long *total_out,
|
||||
unsigned long max_out)
|
||||
unsigned long *total_out)
|
||||
{
|
||||
struct workspace *workspace = list_entry(ws, struct workspace, list);
|
||||
int ret;
|
||||
|
@ -89,6 +87,9 @@ static int zlib_compress_pages(struct list_head *ws,
|
|||
struct page *in_page = NULL;
|
||||
struct page *out_page = NULL;
|
||||
unsigned long bytes_left;
|
||||
unsigned long len = *total_out;
|
||||
unsigned long nr_dest_pages = *out_pages;
|
||||
const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
|
||||
|
||||
*out_pages = 0;
|
||||
*total_out = 0;
|
||||
|
|
Loading…
Reference in New Issue