ext4: pass allocation_request struct to ext4_(alloc,splice)_branch

Instead of initializing the allocation_request structure in
ext4_alloc_branch(), set it up in ext4_ind_map_blocks(), and then pass
it to ext4_alloc_branch() and ext4_splice_branch().

This allows ext4_ind_map_blocks to pass flags in the allocation
request structure without having to add Yet Another argument to
ext4_alloc_branch().

Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Reviewed-by: Jan Kara <jack@suse.cz>
This commit is contained in:
Theodore Ts'o 2014-09-04 18:06:25 -04:00
parent eb68d0e2fc
commit a521100231
1 changed files with 38 additions and 44 deletions

View File

@ -318,34 +318,22 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
* ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
* as described above and return 0.
*/
static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, int indirect_blks,
int *blks, ext4_fsblk_t goal,
ext4_lblk_t *offsets, Indirect *branch)
static int ext4_alloc_branch(handle_t *handle,
struct ext4_allocation_request *ar,
int indirect_blks, ext4_lblk_t *offsets,
Indirect *branch)
{
struct ext4_allocation_request ar;
struct buffer_head * bh;
ext4_fsblk_t b, new_blocks[4];
__le32 *p;
int i, j, err, len = 1;
/*
* Set up for the direct block allocation
*/
memset(&ar, 0, sizeof(ar));
ar.inode = inode;
ar.len = *blks;
ar.logical = iblock;
if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA;
for (i = 0; i <= indirect_blks; i++) {
if (i == indirect_blks) {
ar.goal = goal;
new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err);
new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
} else
goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode,
goal, 0, NULL, &err);
ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
ar->inode, ar->goal, 0, NULL, &err);
if (err) {
i--;
goto failed;
@ -354,7 +342,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
if (i == 0)
continue;
bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]);
bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
if (unlikely(!bh)) {
err = -ENOMEM;
goto failed;
@ -372,7 +360,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
b = new_blocks[i];
if (i == indirect_blks)
len = ar.len;
len = ar->len;
for (j = 0; j < len; j++)
*p++ = cpu_to_le32(b++);
@ -381,11 +369,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
if (err)
goto failed;
}
*blks = ar.len;
return 0;
failed:
for (; i >= 0; i--) {
@ -396,10 +383,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
* existing before ext4_alloc_branch() was called.
*/
if (i > 0 && i != indirect_blks && branch[i].bh)
ext4_forget(handle, 1, inode, branch[i].bh,
ext4_forget(handle, 1, ar->inode, branch[i].bh,
branch[i].bh->b_blocknr);
ext4_free_blocks(handle, inode, NULL, new_blocks[i],
(i == indirect_blks) ? ar.len : 1, 0);
ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
(i == indirect_blks) ? ar->len : 1, 0);
}
return err;
}
@ -419,9 +406,9 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
* inode (->i_blocks, etc.). In case of success we end up with the full
* chain to new block and return 0.
*/
static int ext4_splice_branch(handle_t *handle, struct inode *inode,
ext4_lblk_t block, Indirect *where, int num,
int blks)
static int ext4_splice_branch(handle_t *handle,
struct ext4_allocation_request *ar,
Indirect *where, int num)
{
int i;
int err = 0;
@ -446,9 +433,9 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
* Update the host buffer_head or inode to point to more just allocated
* direct blocks blocks
*/
if (num == 0 && blks > 1) {
if (num == 0 && ar->len > 1) {
current_block = le32_to_cpu(where->key) + 1;
for (i = 1; i < blks; i++)
for (i = 1; i < ar->len; i++)
*(where->p + i) = cpu_to_le32(current_block++);
}
@ -465,14 +452,14 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
*/
jbd_debug(5, "splicing indirect only\n");
BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, where->bh);
err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
if (err)
goto err_out;
} else {
/*
* OK, we spliced it into the inode itself on a direct block.
*/
ext4_mark_inode_dirty(handle, inode);
ext4_mark_inode_dirty(handle, ar->inode);
jbd_debug(5, "splicing direct\n");
}
return err;
@ -484,11 +471,11 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
* need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA.
*/
ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
EXT4_FREE_BLOCKS_FORGET);
}
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
blks, 0);
ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
ar->len, 0);
return err;
}
@ -525,11 +512,11 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
int flags)
{
struct ext4_allocation_request ar;
int err = -EIO;
ext4_lblk_t offsets[4];
Indirect chain[4];
Indirect *partial;
ext4_fsblk_t goal;
int indirect_blks;
int blocks_to_boundary = 0;
int depth;
@ -579,7 +566,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
return -ENOSPC;
}
goal = ext4_find_goal(inode, map->m_lblk, partial);
/* Set up for the direct block allocation */
memset(&ar, 0, sizeof(ar));
ar.inode = inode;
ar.logical = map->m_lblk;
if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA;
ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
@ -588,13 +582,13 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
* Next look up the indirect map to count the totoal number of
* direct blocks to allocate for this branch.
*/
count = ext4_blks_to_allocate(partial, indirect_blks,
map->m_len, blocks_to_boundary);
ar.len = ext4_blks_to_allocate(partial, indirect_blks,
map->m_len, blocks_to_boundary);
/*
* Block out ext4_truncate while we alter the tree
*/
err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
&count, goal,
err = ext4_alloc_branch(handle, &ar, indirect_blks,
offsets + (partial - chain), partial);
/*
@ -605,14 +599,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
* may need to return -EAGAIN upwards in the worst case. --sct
*/
if (!err)
err = ext4_splice_branch(handle, inode, map->m_lblk,
partial, indirect_blks, count);
err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
if (err)
goto cleanup;
map->m_flags |= EXT4_MAP_NEW;
ext4_update_inode_fsync_trans(handle, inode, 1);
count = ar.len;
got_it:
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = le32_to_cpu(chain[depth-1].key);