xfs: refactor dquot cache handling

Delegate the dquot cache handling (radix tree lookup and insertion) to
separate helper functions so that we can continue to simplify the body
of dqget.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Darrick J. Wong 2018-05-04 15:30:20 -07:00
parent 2e330e76e0
commit cc2047c4d0
1 changed files with 78 additions and 34 deletions

View File

@ -686,6 +686,81 @@ xfs_dq_get_next_id(
return error; return error;
} }
/*
* Look up the dquot in the in-core cache. If found, the dquot is returned
* locked and ready to go.
*/
static struct xfs_dquot *
xfs_qm_dqget_cache_lookup(
struct xfs_mount *mp,
struct xfs_quotainfo *qi,
struct radix_tree_root *tree,
xfs_dqid_t id)
{
struct xfs_dquot *dqp;
restart:
mutex_lock(&qi->qi_tree_lock);
dqp = radix_tree_lookup(tree, id);
if (!dqp) {
mutex_unlock(&qi->qi_tree_lock);
XFS_STATS_INC(mp, xs_qm_dqcachemisses);
return NULL;
}
xfs_dqlock(dqp);
if (dqp->dq_flags & XFS_DQ_FREEING) {
xfs_dqunlock(dqp);
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_freeing(dqp);
delay(1);
goto restart;
}
dqp->q_nrefs++;
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_hit(dqp);
XFS_STATS_INC(mp, xs_qm_dqcachehits);
return dqp;
}
/*
* Try to insert a new dquot into the in-core cache. If an error occurs the
* caller should throw away the dquot and start over. Otherwise, the dquot
* is returned locked (and held by the cache) as if there had been a cache
* hit.
*/
static int
xfs_qm_dqget_cache_insert(
struct xfs_mount *mp,
struct xfs_quotainfo *qi,
struct radix_tree_root *tree,
xfs_dqid_t id,
struct xfs_dquot *dqp)
{
int error;
mutex_lock(&qi->qi_tree_lock);
error = radix_tree_insert(tree, id, dqp);
if (unlikely(error)) {
/* Duplicate found! Caller must try again. */
WARN_ON(error != -EEXIST);
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_dup(dqp);
return error;
}
/* Return a locked dquot to the caller, with a reference taken. */
xfs_dqlock(dqp);
dqp->q_nrefs = 1;
qi->qi_dquots++;
mutex_unlock(&qi->qi_tree_lock);
return 0;
}
/* /*
* Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
* a locked dquot, doing an allocation (if requested) as needed. * a locked dquot, doing an allocation (if requested) as needed.
@ -724,28 +799,11 @@ xfs_qm_dqget(
} }
restart: restart:
mutex_lock(&qi->qi_tree_lock); dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
dqp = radix_tree_lookup(tree, id);
if (dqp) { if (dqp) {
xfs_dqlock(dqp);
if (dqp->dq_flags & XFS_DQ_FREEING) {
xfs_dqunlock(dqp);
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_freeing(dqp);
delay(1);
goto restart;
}
dqp->q_nrefs++;
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_hit(dqp);
XFS_STATS_INC(mp, xs_qm_dqcachehits);
*O_dqpp = dqp; *O_dqpp = dqp;
return 0; return 0;
} }
mutex_unlock(&qi->qi_tree_lock);
XFS_STATS_INC(mp, xs_qm_dqcachemisses);
/* /*
* Dquot cache miss. We don't want to keep the inode lock across * Dquot cache miss. We don't want to keep the inode lock across
@ -787,31 +845,17 @@ xfs_qm_dqget(
} }
} }
mutex_lock(&qi->qi_tree_lock); error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
error = radix_tree_insert(tree, id, dqp); if (error) {
if (unlikely(error)) {
WARN_ON(error != -EEXIST);
/* /*
* Duplicate found. Just throw away the new dquot and start * Duplicate found. Just throw away the new dquot and start
* over. * over.
*/ */
mutex_unlock(&qi->qi_tree_lock);
trace_xfs_dqget_dup(dqp);
xfs_qm_dqdestroy(dqp); xfs_qm_dqdestroy(dqp);
XFS_STATS_INC(mp, xs_qm_dquot_dups); XFS_STATS_INC(mp, xs_qm_dquot_dups);
goto restart; goto restart;
} }
/*
* We return a locked dquot to the caller, with a reference taken
*/
xfs_dqlock(dqp);
dqp->q_nrefs = 1;
qi->qi_dquots++;
mutex_unlock(&qi->qi_tree_lock);
dqret: dqret:
ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
trace_xfs_dqget_miss(dqp); trace_xfs_dqget_miss(dqp);