xfs: don't retry xfs_buf_find on XBF_TRYLOCK failure

When looking at an event trace recently, I noticed that non-blocking
buffer lookup attempts would fail on cached locked buffers and then
run the slow cache-miss path. This means we are doing an xfs_buf
allocation, lookup and free unnecessarily every time we avoid
blocking on a locked buffer.

Fix this by changing _xfs_buf_find() to return an error status to
the caller to indicate that we failed the lock attempt rather than
just returning a NULL. This allows the higher level code to
discriminate between a cache miss and an cache hit that we failed to
lock.

This also allows us to return a -EFSCORRUPTED state if we are asked
to look up a block number outside the range of the filesystem in
_xfs_buf_find(), which moves us one step closer to being able to
handle such errors in a more graceful manner at the higher levels.

Signed-Off-By: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
Dave Chinner 2018-04-18 08:25:21 -07:00 committed by Darrick J. Wong
parent 8925a3dc47
commit b027d4c97b
1 changed files with 65 additions and 28 deletions

View File

@ -549,17 +549,31 @@ xfs_buf_hash_destroy(
}
/*
* Look up (and insert if absent), a lockable buffer for a given
* range of an inode. The buffer is returned locked. No I/O is
* implied by this call.
* Look up a buffer in the buffer cache and return it referenced and locked
* in @found_bp.
*
* If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
* cache.
*
* If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
* -EAGAIN if we fail to lock it.
*
* Return values are:
* -EFSCORRUPTED if have been supplied with an invalid address
* -EAGAIN on trylock failure
* -ENOENT if we fail to find a match and @new_bp was NULL
* 0, with @found_bp:
* - @new_bp if we inserted it into the cache
* - the buffer we found and locked.
*/
static struct xfs_buf *
_xfs_buf_find(
static int
xfs_buf_find(
struct xfs_buftarg *btp,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
struct xfs_buf *new_bp)
struct xfs_buf *new_bp,
struct xfs_buf **found_bp)
{
struct xfs_perag *pag;
xfs_buf_t *bp;
@ -567,6 +581,8 @@ _xfs_buf_find(
xfs_daddr_t eofs;
int i;
*found_bp = NULL;
for (i = 0; i < nmaps; i++)
cmap.bm_len += map[i].bm_len;
@ -580,16 +596,11 @@ _xfs_buf_find(
*/
eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
/*
* XXX (dgc): we should really be returning -EFSCORRUPTED here,
* but none of the higher level infrastructure supports
* returning a specific error on buffer lookup failures.
*/
xfs_alert(btp->bt_mount,
"%s: daddr 0x%llx out of range, EOFS 0x%llx",
__func__, cmap.bm_bn, eofs);
WARN_ON(1);
return NULL;
return -EFSCORRUPTED;
}
pag = xfs_perag_get(btp->bt_mount,
@ -604,19 +615,20 @@ _xfs_buf_find(
}
/* No match found */
if (new_bp) {
/* the buffer keeps the perag reference until it is freed */
new_bp->b_pag = pag;
rhashtable_insert_fast(&pag->pag_buf_hash,
&new_bp->b_rhash_head,
xfs_buf_hash_params);
spin_unlock(&pag->pag_buf_lock);
} else {
if (!new_bp) {
XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
return -ENOENT;
}
return new_bp;
/* the buffer keeps the perag reference until it is freed */
new_bp->b_pag = pag;
rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
xfs_buf_hash_params);
spin_unlock(&pag->pag_buf_lock);
*found_bp = new_bp;
return 0;
found:
spin_unlock(&pag->pag_buf_lock);
@ -626,7 +638,7 @@ _xfs_buf_find(
if (flags & XBF_TRYLOCK) {
xfs_buf_rele(bp);
XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
return NULL;
return -EAGAIN;
}
xfs_buf_lock(bp);
XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
@ -646,7 +658,8 @@ _xfs_buf_find(
trace_xfs_buf_find(bp, flags, _RET_IP_);
XFS_STATS_INC(btp->bt_mount, xb_get_locked);
return bp;
*found_bp = bp;
return 0;
}
struct xfs_buf *
@ -656,8 +669,14 @@ xfs_buf_incore(
size_t numblks,
xfs_buf_flags_t flags)
{
struct xfs_buf *bp;
int error;
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
return _xfs_buf_find(target, &map, 1, flags, NULL);
error = xfs_buf_find(target, &map, 1, flags, NULL, &bp);
if (error)
return NULL;
return bp;
}
/*
@ -676,9 +695,27 @@ xfs_buf_get_map(
struct xfs_buf *new_bp;
int error = 0;
bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
if (likely(bp))
error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
switch (error) {
case 0:
/* cache hit */
goto found;
case -EAGAIN:
/* cache hit, trylock failure, caller handles failure */
ASSERT(flags & XBF_TRYLOCK);
return NULL;
case -ENOENT:
/* cache miss, go for insert */
break;
case -EFSCORRUPTED:
default:
/*
* None of the higher layers understand failure types
* yet, so return NULL to signal a fatal lookup error.
*/
return NULL;
}
new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
if (unlikely(!new_bp))
@ -690,8 +727,8 @@ xfs_buf_get_map(
return NULL;
}
bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
if (!bp) {
error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
if (error) {
xfs_buf_free(new_bp);
return NULL;
}