xfs: use xfs_bmap_del_extent_delay for the data fork as well

And remove the delalloc code from xfs_bmap_del_extent, which gets renamed
to xfs_bmap_del_extent_real to fit the naming scheme used by the other
xfs_bmap_{add,del}_extent_* routines.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
Christoph Hellwig 2017-10-17 14:16:21 -07:00 committed by Darrick J. Wong
parent 8280f6ed46
commit e1d7553faf
1 changed files with 113 additions and 216 deletions

View File

@ -5059,10 +5059,10 @@ xfs_bmap_del_extent_cow(
/* /*
* Called by xfs_bmapi to update file extent records and the btree * Called by xfs_bmapi to update file extent records and the btree
* after removing space (or undoing a delayed allocation). * after removing space.
*/ */
STATIC int /* error */ STATIC int /* error */
xfs_bmap_del_extent( xfs_bmap_del_extent_real(
xfs_inode_t *ip, /* incore inode pointer */ xfs_inode_t *ip, /* incore inode pointer */
xfs_trans_t *tp, /* current transaction pointer */ xfs_trans_t *tp, /* current transaction pointer */
xfs_extnum_t *idx, /* extent number to update/delete */ xfs_extnum_t *idx, /* extent number to update/delete */
@ -5073,11 +5073,8 @@ xfs_bmap_del_extent(
int whichfork, /* data or attr fork */ int whichfork, /* data or attr fork */
int bflags) /* bmapi flags */ int bflags) /* bmapi flags */
{ {
xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
xfs_fsblock_t del_endblock=0; /* first block past del */ xfs_fsblock_t del_endblock=0; /* first block past del */
xfs_fileoff_t del_endoff; /* first offset past del */ xfs_fileoff_t del_endoff; /* first offset past del */
int delay; /* current block is delayed allocated */
int do_fx; /* free extent at end of routine */ int do_fx; /* free extent at end of routine */
xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */ xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
int error; /* error return value */ int error; /* error return value */
@ -5112,63 +5109,40 @@ xfs_bmap_del_extent(
del_endoff = del->br_startoff + del->br_blockcount; del_endoff = del->br_startoff + del->br_blockcount;
got_endoff = got.br_startoff + got.br_blockcount; got_endoff = got.br_startoff + got.br_blockcount;
ASSERT(got_endoff >= del_endoff); ASSERT(got_endoff >= del_endoff);
delay = isnullstartblock(got.br_startblock); ASSERT(!isnullstartblock(got.br_startblock));
ASSERT(isnullstartblock(del->br_startblock) == delay); flags = XFS_ILOG_CORE;
flags = 0;
qfield = 0; qfield = 0;
error = 0; error = 0;
/*
* If deleting a real allocation, must free up the disk space.
*/
if (!delay) {
flags = XFS_ILOG_CORE;
/*
* Realtime allocation. Free it and record di_nblocks update.
*/
if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
xfs_fsblock_t bno;
xfs_filblks_t len;
ASSERT(do_mod(del->br_blockcount, if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
mp->m_sb.sb_rextsize) == 0); xfs_fsblock_t bno;
ASSERT(do_mod(del->br_startblock, xfs_filblks_t len;
mp->m_sb.sb_rextsize) == 0);
bno = del->br_startblock; ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0);
len = del->br_blockcount; ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0);
do_div(bno, mp->m_sb.sb_rextsize); bno = del->br_startblock;
do_div(len, mp->m_sb.sb_rextsize); len = del->br_blockcount;
error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); do_div(bno, mp->m_sb.sb_rextsize);
if (error) do_div(len, mp->m_sb.sb_rextsize);
goto done; error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
do_fx = 0; if (error)
nblks = len * mp->m_sb.sb_rextsize; goto done;
qfield = XFS_TRANS_DQ_RTBCOUNT;
}
/*
* Ordinary allocation.
*/
else {
do_fx = 1;
nblks = del->br_blockcount;
qfield = XFS_TRANS_DQ_BCOUNT;
}
/*
* Set up del_endblock and cur for later.
*/
del_endblock = del->br_startblock + del->br_blockcount;
if (cur) {
if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
got.br_startblock, got.br_blockcount,
&i)))
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
}
da_old = da_new = 0;
} else {
da_old = startblockval(got.br_startblock);
da_new = 0;
nblks = 0;
do_fx = 0; do_fx = 0;
nblks = len * mp->m_sb.sb_rextsize;
qfield = XFS_TRANS_DQ_RTBCOUNT;
} else {
do_fx = 1;
nblks = del->br_blockcount;
qfield = XFS_TRANS_DQ_BCOUNT;
}
del_endblock = del->br_startblock + del->br_blockcount;
if (cur) {
error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
got.br_startblock, got.br_blockcount, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
} }
/* /*
@ -5185,8 +5159,6 @@ xfs_bmap_del_extent(
xfs_iext_remove(ip, *idx, 1, xfs_iext_remove(ip, *idx, 1,
whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
--*idx; --*idx;
if (delay)
break;
XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1); XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
@ -5208,14 +5180,6 @@ xfs_bmap_del_extent(
xfs_bmbt_set_startoff(ep, del_endoff); xfs_bmbt_set_startoff(ep, del_endoff);
temp = got.br_blockcount - del->br_blockcount; temp = got.br_blockcount - del->br_blockcount;
xfs_bmbt_set_blockcount(ep, temp); xfs_bmbt_set_blockcount(ep, temp);
if (delay) {
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
da_new = temp;
break;
}
xfs_bmbt_set_startblock(ep, del_endblock); xfs_bmbt_set_startblock(ep, del_endblock);
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
if (!cur) { if (!cur) {
@ -5235,14 +5199,6 @@ xfs_bmap_del_extent(
temp = got.br_blockcount - del->br_blockcount; temp = got.br_blockcount - del->br_blockcount;
trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp); xfs_bmbt_set_blockcount(ep, temp);
if (delay) {
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
da_new = temp;
break;
}
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
if (!cur) { if (!cur) {
flags |= xfs_ilog_fext(whichfork); flags |= xfs_ilog_fext(whichfork);
@ -5266,89 +5222,60 @@ xfs_bmap_del_extent(
temp2 = got_endoff - del_endoff; temp2 = got_endoff - del_endoff;
new.br_blockcount = temp2; new.br_blockcount = temp2;
new.br_state = got.br_state; new.br_state = got.br_state;
if (!delay) { new.br_startblock = del_endblock;
new.br_startblock = del_endblock; flags |= XFS_ILOG_CORE;
flags |= XFS_ILOG_CORE; if (cur) {
if (cur) { error = xfs_bmbt_update(cur, got.br_startoff,
if ((error = xfs_bmbt_update(cur, got.br_startblock, temp,
got.br_startoff, got.br_state);
got.br_startblock, temp, if (error)
got.br_state))) goto done;
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto done;
cur->bc_rec.b = new;
error = xfs_btree_insert(cur, &i);
if (error && error != -ENOSPC)
goto done;
/*
* If get no-space back from btree insert, it tried a
* split, and we have a zero block reservation. Fix up
* our state and return the error.
*/
if (error == -ENOSPC) {
/*
* Reset the cursor, don't trust it after any
* insert operation.
*/
error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
got.br_startblock, temp, &i);
if (error)
goto done; goto done;
if ((error = xfs_btree_increment(cur, 0, &i))) XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
goto done; /*
cur->bc_rec.b = new; * Update the btree record back
error = xfs_btree_insert(cur, &i); * to the original value.
if (error && error != -ENOSPC) */
error = xfs_bmbt_update(cur, got.br_startoff,
got.br_startblock,
got.br_blockcount,
got.br_state);
if (error)
goto done; goto done;
/* /*
* If get no-space back from btree insert, * Reset the extent record back
* it tried a split, and we have a zero * to the original value.
* block reservation.
* Fix up our state and return the error.
*/ */
if (error == -ENOSPC) { xfs_bmbt_set_blockcount(ep, got.br_blockcount);
/* flags = 0;
* Reset the cursor, don't trust error = -ENOSPC;
* it after any insert operation. goto done;
*/ }
if ((error = xfs_bmbt_lookup_eq(cur, XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
got.br_startoff, } else
got.br_startblock, flags |= xfs_ilog_fext(whichfork);
temp, &i))) XFS_IFORK_NEXT_SET(ip, whichfork,
goto done; XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
XFS_WANT_CORRUPTED_GOTO(mp,
i == 1, done);
/*
* Update the btree record back
* to the original value.
*/
if ((error = xfs_bmbt_update(cur,
got.br_startoff,
got.br_startblock,
got.br_blockcount,
got.br_state)))
goto done;
/*
* Reset the extent record back
* to the original value.
*/
xfs_bmbt_set_blockcount(ep,
got.br_blockcount);
flags = 0;
error = -ENOSPC;
goto done;
}
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
} else
flags |= xfs_ilog_fext(whichfork);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
} else {
xfs_filblks_t stolen;
ASSERT(whichfork == XFS_DATA_FORK);
/*
* Distribute the original indlen reservation across the
* two new extents. Steal blocks from the deleted extent
* if necessary. Stealing blocks simply fudges the
* fdblocks accounting in xfs_bunmapi().
*/
temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
del->br_blockcount);
da_new = temp + temp2 - stolen;
del->br_blockcount -= stolen;
/*
* Set the reservation for each extent. Warn if either
* is zero as this can lead to delalloc problems.
*/
WARN_ON_ONCE(!temp || !temp2);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
new.br_startblock = nullstartblock((int)temp2);
}
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
xfs_iext_insert(ip, *idx + 1, 1, &new, state); xfs_iext_insert(ip, *idx + 1, 1, &new, state);
++*idx; ++*idx;
@ -5356,11 +5283,9 @@ xfs_bmap_del_extent(
} }
/* remove reverse mapping */ /* remove reverse mapping */
if (!delay) { error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del); if (error)
if (error) goto done;
goto done;
}
/* /*
* If we need to, add to list of extents to delete. * If we need to, add to list of extents to delete.
@ -5386,13 +5311,6 @@ xfs_bmap_del_extent(
if (qfield && !(bflags & XFS_BMAPI_REMAP)) if (qfield && !(bflags & XFS_BMAPI_REMAP))
xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
/*
* Account for change in delayed indirect blocks.
* Nothing to do for disk quota accounting here.
*/
ASSERT(da_old >= da_new);
if (da_old > da_new)
xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
done: done:
*logflagsp = flags; *logflagsp = flags;
return error; return error;
@ -5677,62 +5595,41 @@ __xfs_bunmapi(
} }
} }
/*
* If it's the case where the directory code is running
* with no block reservation, and the deleted block is in
* the middle of its extent, and the resulting insert
* of an extent would cause transformation to btree format,
* then reject it. The calling code will then swap
* blocks around instead.
* We have to do this now, rather than waiting for the
* conversion to btree format, since the transaction
* will be dirty.
*/
if (!wasdel && tp->t_blk_res == 0 &&
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
XFS_IFORK_MAXEXT(ip, whichfork) &&
del.br_startoff > got.br_startoff &&
del.br_startoff + del.br_blockcount <
got.br_startoff + got.br_blockcount) {
error = -ENOSPC;
goto error0;
}
/*
* Unreserve quota and update realtime free space, if
* appropriate. If delayed allocation, update the inode delalloc
* counter now and wait to update the sb counters as
* xfs_bmap_del_extent() might need to borrow some blocks.
*/
if (wasdel) { if (wasdel) {
ASSERT(startblockval(del.br_startblock) > 0); error = xfs_bmap_del_extent_delay(ip, whichfork, &lastx,
if (isrt) { &got, &del);
xfs_filblks_t rtexts; } else {
/*
rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); * If it's the case where the directory code is running
do_div(rtexts, mp->m_sb.sb_rextsize); * with no block reservation, and the deleted block is
xfs_mod_frextents(mp, (int64_t)rtexts); * in the middle of its extent, and the resulting insert
(void)xfs_trans_reserve_quota_nblks(NULL, * of an extent would cause transformation to btree
ip, -((long)del.br_blockcount), 0, * format, then reject it. The calling code will then
XFS_QMOPT_RES_RTBLKS); * swap blocks around instead. We have to do this now,
} else { * rather than waiting for the conversion to btree
(void)xfs_trans_reserve_quota_nblks(NULL, * format, since the transaction will be dirty.
ip, -((long)del.br_blockcount), 0, */
XFS_QMOPT_RES_REGBLKS); if (tp->t_blk_res == 0 &&
XFS_IFORK_FORMAT(ip, whichfork) ==
XFS_DINODE_FMT_EXTENTS &&
XFS_IFORK_NEXTENTS(ip, whichfork) >=
XFS_IFORK_MAXEXT(ip, whichfork) &&
del.br_startoff > got.br_startoff &&
del.br_startoff + del.br_blockcount <
got.br_startoff + got.br_blockcount) {
error = -ENOSPC;
goto error0;
} }
ip->i_delayed_blks -= del.br_blockcount;
error = xfs_bmap_del_extent_real(ip, tp, &lastx, dfops,
cur, &del, &tmp_logflags, whichfork,
flags);
logflags |= tmp_logflags;
} }
error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
&tmp_logflags, whichfork, flags);
logflags |= tmp_logflags;
if (error) if (error)
goto error0; goto error0;
if (!isrt && wasdel)
xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
max_len -= del.br_blockcount; max_len -= del.br_blockcount;
end = del.br_startoff - 1; end = del.br_startoff - 1;
nodelete: nodelete: