gfs2: gfs2_iomap_begin cleanup

Following commit d0a22a4b03 ("gfs2: Fix iomap write page reclaim deadlock"),
gfs2_iomap_begin and gfs2_iomap_begin_write can be further cleaned up and the
split between those two functions can be improved.

With suggestions from Christoph Hellwig.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Reviewed-by: Bob Peterson <rpeterso@redhat.com>
This commit is contained in:
Andreas Gruenbacher 2019-07-05 17:38:35 +02:00
parent a27a0c9b6a
commit 34aad20bc3
1 changed files with 63 additions and 53 deletions

View File

@ -1065,40 +1065,28 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
{ {
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks; bool unstuff;
bool unstuff, alloc_required;
int ret; int ret;
ret = gfs2_write_lock(inode);
if (ret)
return ret;
unstuff = gfs2_is_stuffed(ip) && unstuff = gfs2_is_stuffed(ip) &&
pos + length > gfs2_max_stuffed_size(ip); pos + length > gfs2_max_stuffed_size(ip);
ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp); if (unstuff || iomap->type == IOMAP_HOLE) {
if (ret) unsigned int data_blocks, ind_blocks;
goto out_unlock; struct gfs2_alloc_parms ap = {};
unsigned int rblocks;
struct gfs2_trans *tr;
alloc_required = unstuff || iomap->type == IOMAP_HOLE;
if (alloc_required || gfs2_is_jdata(ip))
gfs2_write_calc_reserv(ip, iomap->length, &data_blocks, gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
&ind_blocks); &ind_blocks);
ap.target = data_blocks + ind_blocks;
if (alloc_required) {
struct gfs2_alloc_parms ap = {
.target = data_blocks + ind_blocks
};
ret = gfs2_quota_lock_check(ip, &ap); ret = gfs2_quota_lock_check(ip, &ap);
if (ret) if (ret)
goto out_unlock; return ret;
ret = gfs2_inplace_reserve(ip, &ap); ret = gfs2_inplace_reserve(ip, &ap);
if (ret) if (ret)
goto out_qunlock; goto out_qunlock;
}
rblocks = RES_DINODE + ind_blocks; rblocks = RES_DINODE + ind_blocks;
if (gfs2_is_jdata(ip)) if (gfs2_is_jdata(ip))
@ -1107,12 +1095,8 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
rblocks += RES_STATFS + RES_QUOTA; rblocks += RES_STATFS + RES_QUOTA;
if (inode == sdp->sd_rindex) if (inode == sdp->sd_rindex)
rblocks += 2 * RES_STATFS; rblocks += 2 * RES_STATFS;
if (alloc_required)
rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
if (unstuff || iomap->type == IOMAP_HOLE) {
struct gfs2_trans *tr;
ret = gfs2_trans_begin(sdp, rblocks, ret = gfs2_trans_begin(sdp, rblocks,
iomap->length >> inode->i_blkbits); iomap->length >> inode->i_blkbits);
if (ret) if (ret)
@ -1153,16 +1137,17 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
out_trans_end: out_trans_end:
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
out_trans_fail: out_trans_fail:
if (alloc_required)
gfs2_inplace_release(ip); gfs2_inplace_release(ip);
out_qunlock: out_qunlock:
if (alloc_required)
gfs2_quota_unlock(ip); gfs2_quota_unlock(ip);
out_unlock:
gfs2_write_unlock(inode);
return ret; return ret;
} }
static inline bool gfs2_iomap_need_write_lock(unsigned flags)
{
return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
}
static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, struct iomap *iomap) unsigned flags, struct iomap *iomap)
{ {
@ -1173,20 +1158,39 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
iomap->flags |= IOMAP_F_BUFFER_HEAD; iomap->flags |= IOMAP_F_BUFFER_HEAD;
trace_gfs2_iomap_start(ip, pos, length, flags); trace_gfs2_iomap_start(ip, pos, length, flags);
if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) { if (gfs2_iomap_need_write_lock(flags)) {
ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp); ret = gfs2_write_lock(inode);
} else { if (ret)
ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp); goto out;
/*
* Silently fall back to buffered I/O for stuffed files or if
* we've hot a hole (see gfs2_file_direct_write).
*/
if ((flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT) &&
iomap->type != IOMAP_MAPPED)
ret = -ENOTBLK;
} }
ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
if (ret)
goto out_unlock;
switch(flags & IOMAP_WRITE) {
case IOMAP_WRITE:
if (flags & IOMAP_DIRECT) {
/*
* Silently fall back to buffered I/O for stuffed files
* or if we've got a hole (see gfs2_file_direct_write).
*/
if (iomap->type != IOMAP_MAPPED)
ret = -ENOTBLK;
goto out_unlock;
}
break;
default:
goto out_unlock;
}
ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
out_unlock:
if (ret && gfs2_iomap_need_write_lock(flags))
gfs2_write_unlock(inode);
release_metapath(&mp); release_metapath(&mp);
out:
trace_gfs2_iomap_end(ip, iomap, ret); trace_gfs2_iomap_end(ip, iomap, ret);
return ret; return ret;
} }
@ -1197,8 +1201,14 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_sbd *sdp = GFS2_SB(inode);
if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE) switch (flags & IOMAP_WRITE) {
goto out; case IOMAP_WRITE:
if (flags & IOMAP_DIRECT)
return 0;
break;
default:
return 0;
}
if (!gfs2_is_stuffed(ip)) if (!gfs2_is_stuffed(ip))
gfs2_ordered_add_inode(ip); gfs2_ordered_add_inode(ip);
@ -1231,8 +1241,8 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
out_unlock: out_unlock:
if (gfs2_iomap_need_write_lock(flags))
gfs2_write_unlock(inode); gfs2_write_unlock(inode);
out:
return 0; return 0;
} }