mirror of https://gitee.com/openkylin/linux.git
New code for 5.7:
- Fix a hard to trigger race between iclog error checking and log shutdown. - Strengthen the AGF verifier. - Ratelimit some of the more spammy error messages. - Remove the icdinode uid/gid members and just use the ones in the vfs inode. - Hold ILOCK across insert/collapse range. - Clean up the extended attribute interfaces. - Clean up the attr flags mess. - Restore PF_MEMALLOC after exiting xfsaild thread to avoid triggering warnings in the process accounting code. - Remove the flexibly-sized array from struct xfs_agfl to eliminate compiler warnings about unaligned pointers and packed structures. - Various macro and typedef removals. - Stale metadata buffers if we decide they're corrupt outside of a verifier. - Check directory data/block/free block owners. - Fix a UAF when aborting inactivation of a corrupt xattr fork. - Teach online scrub to report failed directory and attr name lookups as a metadata corruption instead of a runtime error. - Avoid potential buffer overflows in sysfs files by using scnprintf. - Fix a regression in getdents lookups due to a mistake in pointer arithmetic. - Refactor btree cursor private data structures to use anonymous unions. - Cleanups in the log unmounting code. - Fix a potential mishandling of ENOMEM errors on multi-block directory buffer lookups. - Fix an incorrect test in the block allocation code. - Cleanups and name prefix shortening in the scrub code. - Introduce btree bulk loading code for online repair and scrub. - Fix a quotaoff log item leak (and hang) when the fs goes down midway through a quotaoff operation. - Remove di_version from the incore inode. - Refactor some of the log shutdown checking code. - Record the forcing of the log unmount records in the log force counters. - Fix a longstanding bug where quotacheck would purge the administrator's default quota grace interval and warning limits. - Reduce memory usage when scrubbing directory and xattr trees. - Don't let fsfreeze race with GETFSMAP or online scrub. - Handle bio_add_page failures more gracefully in xlog_write_iclog. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEUzaAxoMeQq6m2jMV+H93GTRKtOsFAl58z3AACgkQ+H93GTRK tOukrRAAhJmowV5+Req5YMYawRjafkIbCDH3WlFy9AdpFFA6pXSfX6YCtIKwKfq8 +yRj/BFRGoMc6SouXo+J0i3YMS6yQZTjcmVWrQPVnj/+DGVjh+Y70gKExtz2CyjO ItGGxpRwOhpw49zVYmcH6Mrw8sBztHR0VsM0cq6YfJrkNcm0BsnAC+W6zQNaDG24 UO1ivehBOooVh0C8pv0smVcPtBL2N+RRyS3XRT5hGFozUJgLLGDqnHAl1d+KOrWp hPQhUlDw9luiHPBxWkxUuFDr79gjUi7kyHILNt7TIkByyRcTUO9jhS2VpZd4oXlj /J3i1AS+9lhP1yGVxw2RHQhKMvdYBQiLADSCpzkA1dMma99cFGyzMMA6rG0WRMJ4 erXxhAEoM4um3gxDka6+HJxySLOT8E22FesJbn6YIv4QSAkXDBPWz/9hPbjJuJQm 6Y/YkFOZLp3c+xJM0tpCWxWaWW7A+t2OMRIFISSsXesrySpalpbkVXkHwz3NwO6L 3SeTnLWqnADbjl2qsuyF0uYHqURygVz7g+r4X7AO5D1IRyCCkmtDOuwumxERiQ3p 3vZMQrWh+y3SgRiF8brDG5KTshhxcinKdHEYXrwq3XgaHZg4mtLI4XjOyZlJruoX MGWhZjga6+RGysH0RKjZbHaMr/f4m3X00SHa/5Ibcp6Q21TIx6M= =8iJB -----END PGP SIGNATURE----- Merge tag 'xfs-5.7-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull xfs updates from Darrick Wong: "There's a lot going on this cycle with cleanups in the log code, the btree code, and the xattr code. We're tightening of metadata validation and online fsck checking, and introducing a common btree rebuilding library so that we can refactor xfs_repair and introduce online repair in a future cycle. We also fixed a few visible bugs -- most notably there's one in getdents that we introduced in 5.6; and a fix for hangs when disabling quotas. This series has been running fstests & other QA in the background for over a week and looks good so far. I anticipate sending a second pull request next week. That batch will change how xfs interacts with memory reclaim; how the log batches and throttles log items; how hard writes near ENOSPC will try to squeeze more space out of the filesystem; and hopefully fix the last of the umount hangs after a catastrophic failure. That should ease a lot of problems when running at the limits, but for now I'm leaving that in for-next for another week to make sure we got all the subtleties right. Summary: - Fix a hard to trigger race between iclog error checking and log shutdown. - Strengthen the AGF verifier. - Ratelimit some of the more spammy error messages. - Remove the icdinode uid/gid members and just use the ones in the vfs inode. - Hold ILOCK across insert/collapse range. - Clean up the extended attribute interfaces. - Clean up the attr flags mess. - Restore PF_MEMALLOC after exiting xfsaild thread to avoid triggering warnings in the process accounting code. - Remove the flexibly-sized array from struct xfs_agfl to eliminate compiler warnings about unaligned pointers and packed structures. - Various macro and typedef removals. - Stale metadata buffers if we decide they're corrupt outside of a verifier. - Check directory data/block/free block owners. - Fix a UAF when aborting inactivation of a corrupt xattr fork. - Teach online scrub to report failed directory and attr name lookups as a metadata corruption instead of a runtime error. - Avoid potential buffer overflows in sysfs files by using scnprintf. - Fix a regression in getdents lookups due to a mistake in pointer arithmetic. - Refactor btree cursor private data structures to use anonymous unions. - Cleanups in the log unmounting code. - Fix a potential mishandling of ENOMEM errors on multi-block directory buffer lookups. - Fix an incorrect test in the block allocation code. - Cleanups and name prefix shortening in the scrub code. - Introduce btree bulk loading code for online repair and scrub. - Fix a quotaoff log item leak (and hang) when the fs goes down midway through a quotaoff operation. - Remove di_version from the incore inode. - Refactor some of the log shutdown checking code. - Record the forcing of the log unmount records in the log force counters. - Fix a longstanding bug where quotacheck would purge the administrator's default quota grace interval and warning limits. - Reduce memory usage when scrubbing directory and xattr trees. - Don't let fsfreeze race with GETFSMAP or online scrub. - Handle bio_add_page failures more gracefully in xlog_write_iclog" * tag 'xfs-5.7-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (108 commits) xfs: prohibit fs freezing when using empty transactions xfs: shutdown on failure to add page to log bio xfs: directory bestfree check should release buffers xfs: drop all altpath buffers at the end of the sibling check xfs: preserve default grace interval during quotacheck xfs: remove xlog_state_want_sync xfs: move the ioerror check out of xlog_state_clean_iclog xfs: refactor xlog_state_clean_iclog xfs: remove the aborted parameter to xlog_state_done_syncing xfs: simplify log shutdown checking in xfs_log_release_iclog xfs: simplify the xfs_log_release_iclog calling convention xfs: factor out a xlog_wait_on_iclog helper xfs: merge xlog_cil_push into xlog_cil_push_work xfs: remove the di_version field from struct icdinode xfs: simplify a check in xfs_ioctl_setattr_check_cowextsize xfs: simplify di_flags2 inheritance in xfs_ialloc xfs: only check the superblock version for dinode size calculation xfs: add a new xfs_sb_version_has_v3inode helper xfs: fix unmount hang and memory leak on shutdown during quotaoff xfs: factor out quotaoff intent AIL removal and memory free ...
This commit is contained in:
commit
7be97138e7
|
@ -26,6 +26,7 @@ xfs-y += $(addprefix libxfs/, \
|
|||
xfs_bmap.o \
|
||||
xfs_bmap_btree.o \
|
||||
xfs_btree.o \
|
||||
xfs_btree_staging.o \
|
||||
xfs_da_btree.o \
|
||||
xfs_defer.o \
|
||||
xfs_dir2.o \
|
||||
|
|
|
@ -231,7 +231,7 @@ xfs_sbblock_init(
|
|||
struct xfs_buf *bp,
|
||||
struct aghdr_init_data *id)
|
||||
{
|
||||
struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
|
||||
struct xfs_dsb *dsb = bp->b_addr;
|
||||
|
||||
xfs_sb_to_disk(dsb, &mp->m_sb);
|
||||
dsb->sb_inprogress = 1;
|
||||
|
@ -243,7 +243,7 @@ xfs_agfblock_init(
|
|||
struct xfs_buf *bp,
|
||||
struct aghdr_init_data *id)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
|
||||
struct xfs_agf *agf = bp->b_addr;
|
||||
xfs_extlen_t tmpsize;
|
||||
|
||||
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
|
||||
|
@ -301,7 +301,7 @@ xfs_agflblock_init(
|
|||
uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
|
||||
}
|
||||
|
||||
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
|
||||
agfl_bno = xfs_buf_to_agfl_bno(bp);
|
||||
for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
|
||||
agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ xfs_agiblock_init(
|
|||
struct xfs_buf *bp,
|
||||
struct aghdr_init_data *id)
|
||||
{
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(bp);
|
||||
struct xfs_agi *agi = bp->b_addr;
|
||||
int bucket;
|
||||
|
||||
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
|
||||
|
@ -502,7 +502,7 @@ xfs_ag_extend_space(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
agi = XFS_BUF_TO_AGI(bp);
|
||||
agi = bp->b_addr;
|
||||
be32_add_cpu(&agi->agi_length, len);
|
||||
ASSERT(id->agno == mp->m_sb.sb_agcount - 1 ||
|
||||
be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
|
||||
|
@ -515,7 +515,7 @@ xfs_ag_extend_space(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
agf = XFS_BUF_TO_AGF(bp);
|
||||
agf = bp->b_addr;
|
||||
be32_add_cpu(&agf->agf_length, len);
|
||||
ASSERT(agf->agf_length == agi->agi_length);
|
||||
xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
|
||||
|
@ -569,11 +569,11 @@ xfs_ag_get_geometry(
|
|||
memset(ageo, 0, sizeof(*ageo));
|
||||
ageo->ag_number = agno;
|
||||
|
||||
agi = XFS_BUF_TO_AGI(agi_bp);
|
||||
agi = agi_bp->b_addr;
|
||||
ageo->ag_icount = be32_to_cpu(agi->agi_count);
|
||||
ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
|
||||
|
||||
agf = XFS_BUF_TO_AGF(agf_bp);
|
||||
agf = agf_bp->b_addr;
|
||||
ageo->ag_length = be32_to_cpu(agf->agf_length);
|
||||
freeblks = pag->pagf_freeblks +
|
||||
pag->pagf_flcount +
|
||||
|
|
|
@ -151,7 +151,7 @@ xfs_alloc_lookup_eq(
|
|||
cur->bc_rec.a.ar_startblock = bno;
|
||||
cur->bc_rec.a.ar_blockcount = len;
|
||||
error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
|
||||
cur->bc_private.a.priv.abt.active = (*stat == 1);
|
||||
cur->bc_ag.abt.active = (*stat == 1);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ xfs_alloc_lookup_ge(
|
|||
cur->bc_rec.a.ar_startblock = bno;
|
||||
cur->bc_rec.a.ar_blockcount = len;
|
||||
error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
|
||||
cur->bc_private.a.priv.abt.active = (*stat == 1);
|
||||
cur->bc_ag.abt.active = (*stat == 1);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ xfs_alloc_lookup_le(
|
|||
cur->bc_rec.a.ar_startblock = bno;
|
||||
cur->bc_rec.a.ar_blockcount = len;
|
||||
error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
|
||||
cur->bc_private.a.priv.abt.active = (*stat == 1);
|
||||
cur->bc_ag.abt.active = (*stat == 1);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,7 @@ static inline bool
|
|||
xfs_alloc_cur_active(
|
||||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
return cur && cur->bc_private.a.priv.abt.active;
|
||||
return cur && cur->bc_ag.abt.active;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -230,7 +230,7 @@ xfs_alloc_get_rec(
|
|||
int *stat) /* output: success/failure */
|
||||
{
|
||||
struct xfs_mount *mp = cur->bc_mp;
|
||||
xfs_agnumber_t agno = cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = cur->bc_ag.agno;
|
||||
union xfs_btree_rec *rec;
|
||||
int error;
|
||||
|
||||
|
@ -589,6 +589,7 @@ xfs_agfl_verify(
|
|||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
|
||||
__be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -614,8 +615,8 @@ xfs_agfl_verify(
|
|||
return __this_address;
|
||||
|
||||
for (i = 0; i < xfs_agfl_size(mp); i++) {
|
||||
if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
|
||||
be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
|
||||
if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
|
||||
be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
|
@ -713,7 +714,7 @@ xfs_alloc_update_counters(
|
|||
struct xfs_buf *agbp,
|
||||
long len)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
|
||||
pag->pagf_freeblks += len;
|
||||
be32_add_cpu(&agf->agf_freeblks, len);
|
||||
|
@ -721,7 +722,7 @@ xfs_alloc_update_counters(
|
|||
xfs_trans_agblocks_delta(tp, len);
|
||||
if (unlikely(be32_to_cpu(agf->agf_freeblks) >
|
||||
be32_to_cpu(agf->agf_length))) {
|
||||
xfs_buf_corruption_error(agbp);
|
||||
xfs_buf_mark_corrupt(agbp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -907,7 +908,7 @@ xfs_alloc_cur_check(
|
|||
deactivate = true;
|
||||
out:
|
||||
if (deactivate)
|
||||
cur->bc_private.a.priv.abt.active = false;
|
||||
cur->bc_ag.abt.active = false;
|
||||
trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
|
||||
*new);
|
||||
return 0;
|
||||
|
@ -922,13 +923,13 @@ xfs_alloc_cur_finish(
|
|||
struct xfs_alloc_arg *args,
|
||||
struct xfs_alloc_cur *acur)
|
||||
{
|
||||
struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
|
||||
int error;
|
||||
|
||||
ASSERT(acur->cnt && acur->bnolt);
|
||||
ASSERT(acur->bno >= acur->rec_bno);
|
||||
ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
|
||||
ASSERT(acur->rec_bno + acur->rec_len <=
|
||||
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
|
||||
ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
|
||||
|
||||
error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
|
||||
acur->rec_len, acur->bno, acur->len, 0);
|
||||
|
@ -1026,6 +1027,7 @@ xfs_alloc_ag_vextent_small(
|
|||
xfs_extlen_t *flenp, /* result length */
|
||||
int *stat) /* status: 0-freelist, 1-normal/none */
|
||||
{
|
||||
struct xfs_agf *agf = args->agbp->b_addr;
|
||||
int error = 0;
|
||||
xfs_agblock_t fbno = NULLAGBLOCK;
|
||||
xfs_extlen_t flen = 0;
|
||||
|
@ -1054,8 +1056,7 @@ xfs_alloc_ag_vextent_small(
|
|||
|
||||
if (args->minlen != 1 || args->alignment != 1 ||
|
||||
args->resv == XFS_AG_RESV_AGFL ||
|
||||
(be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) <=
|
||||
args->minleft))
|
||||
be32_to_cpu(agf->agf_flcount) <= args->minleft)
|
||||
goto out;
|
||||
|
||||
error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
|
||||
|
@ -1079,9 +1080,7 @@ xfs_alloc_ag_vextent_small(
|
|||
}
|
||||
*fbnop = args->agbno = fbno;
|
||||
*flenp = args->len = 1;
|
||||
if (XFS_IS_CORRUPT(args->mp,
|
||||
fbno >= be32_to_cpu(
|
||||
XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
|
||||
if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
|
||||
error = -EFSCORRUPTED;
|
||||
goto error;
|
||||
}
|
||||
|
@ -1203,6 +1202,7 @@ STATIC int /* error */
|
|||
xfs_alloc_ag_vextent_exact(
|
||||
xfs_alloc_arg_t *args) /* allocation argument structure */
|
||||
{
|
||||
struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
|
||||
xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
|
||||
xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
|
||||
int error;
|
||||
|
@ -1281,8 +1281,7 @@ xfs_alloc_ag_vextent_exact(
|
|||
*/
|
||||
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
|
||||
args->agno, XFS_BTNUM_CNT);
|
||||
ASSERT(args->agbno + args->len <=
|
||||
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
|
||||
ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
|
||||
error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
|
||||
args->len, XFSA_FIXUP_BNO_OK);
|
||||
if (error) {
|
||||
|
@ -1353,7 +1352,7 @@ xfs_alloc_walk_iter(
|
|||
if (error)
|
||||
return error;
|
||||
if (i == 0)
|
||||
cur->bc_private.a.priv.abt.active = false;
|
||||
cur->bc_ag.abt.active = false;
|
||||
|
||||
if (count > 0)
|
||||
count--;
|
||||
|
@ -1468,7 +1467,7 @@ xfs_alloc_ag_vextent_locality(
|
|||
if (error)
|
||||
return error;
|
||||
if (i) {
|
||||
acur->cnt->bc_private.a.priv.abt.active = true;
|
||||
acur->cnt->bc_ag.abt.active = true;
|
||||
fbcur = acur->cnt;
|
||||
fbinc = false;
|
||||
}
|
||||
|
@ -1515,7 +1514,7 @@ xfs_alloc_ag_vextent_lastblock(
|
|||
* maxlen, go to the start of this block, and skip all those smaller
|
||||
* than minlen.
|
||||
*/
|
||||
if (len || args->alignment > 1) {
|
||||
if (*len || args->alignment > 1) {
|
||||
acur->cnt->bc_ptrs[0] = 1;
|
||||
do {
|
||||
error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
|
||||
|
@ -1661,6 +1660,7 @@ STATIC int /* error */
|
|||
xfs_alloc_ag_vextent_size(
|
||||
xfs_alloc_arg_t *args) /* allocation argument structure */
|
||||
{
|
||||
struct xfs_agf *agf = args->agbp->b_addr;
|
||||
xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
|
||||
xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
|
||||
int error; /* error result */
|
||||
|
@ -1851,8 +1851,7 @@ xfs_alloc_ag_vextent_size(
|
|||
args->agbno = rbno;
|
||||
if (XFS_IS_CORRUPT(args->mp,
|
||||
args->agbno + args->len >
|
||||
be32_to_cpu(
|
||||
XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
|
||||
be32_to_cpu(agf->agf_length))) {
|
||||
error = -EFSCORRUPTED;
|
||||
goto error0;
|
||||
}
|
||||
|
@ -2424,7 +2423,7 @@ xfs_agfl_reset(
|
|||
struct xfs_perag *pag)
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
|
||||
ASSERT(pag->pagf_agflreset);
|
||||
trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
|
||||
|
@ -2655,7 +2654,7 @@ xfs_alloc_get_freelist(
|
|||
xfs_agblock_t *bnop, /* block address retrieved from freelist */
|
||||
int btreeblk) /* destination is a AGF btree */
|
||||
{
|
||||
xfs_agf_t *agf; /* a.g. freespace structure */
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
|
||||
xfs_agblock_t bno; /* block number returned */
|
||||
__be32 *agfl_bno;
|
||||
|
@ -2667,7 +2666,6 @@ xfs_alloc_get_freelist(
|
|||
/*
|
||||
* Freelist is empty, give up.
|
||||
*/
|
||||
agf = XFS_BUF_TO_AGF(agbp);
|
||||
if (!agf->agf_flcount) {
|
||||
*bnop = NULLAGBLOCK;
|
||||
return 0;
|
||||
|
@ -2684,7 +2682,7 @@ xfs_alloc_get_freelist(
|
|||
/*
|
||||
* Get the block number and update the data structures.
|
||||
*/
|
||||
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
|
||||
agfl_bno = xfs_buf_to_agfl_bno(agflbp);
|
||||
bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
|
||||
be32_add_cpu(&agf->agf_flfirst, 1);
|
||||
xfs_trans_brelse(tp, agflbp);
|
||||
|
@ -2745,7 +2743,7 @@ xfs_alloc_log_agf(
|
|||
sizeof(xfs_agf_t)
|
||||
};
|
||||
|
||||
trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
|
||||
trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
|
||||
|
||||
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
|
||||
|
||||
|
@ -2783,18 +2781,15 @@ xfs_alloc_put_freelist(
|
|||
xfs_agblock_t bno, /* block being freed */
|
||||
int btreeblk) /* block came from a AGF btree */
|
||||
{
|
||||
xfs_agf_t *agf; /* a.g. freespace structure */
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
__be32 *blockp;/* pointer to array entry */
|
||||
int error;
|
||||
int logflags;
|
||||
xfs_mount_t *mp; /* mount structure */
|
||||
xfs_perag_t *pag; /* per allocation group data */
|
||||
__be32 *agfl_bno;
|
||||
int startoff;
|
||||
|
||||
agf = XFS_BUF_TO_AGF(agbp);
|
||||
mp = tp->t_mountp;
|
||||
|
||||
if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
|
||||
be32_to_cpu(agf->agf_seqno), &agflbp)))
|
||||
return error;
|
||||
|
@ -2820,7 +2815,7 @@ xfs_alloc_put_freelist(
|
|||
|
||||
ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
|
||||
|
||||
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
|
||||
agfl_bno = xfs_buf_to_agfl_bno(agflbp);
|
||||
blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
|
||||
*blockp = cpu_to_be32(bno);
|
||||
startoff = (char *)blockp - (char *)agflbp->b_addr;
|
||||
|
@ -2838,13 +2833,12 @@ xfs_agf_verify(
|
|||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
|
||||
struct xfs_agf *agf = bp->b_addr;
|
||||
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
||||
if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
|
||||
return __this_address;
|
||||
if (!xfs_log_check_lsn(mp,
|
||||
be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
|
||||
if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
|
@ -2858,6 +2852,13 @@ xfs_agf_verify(
|
|||
be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
|
||||
return __this_address;
|
||||
|
||||
if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
|
||||
return __this_address;
|
||||
|
||||
if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
|
||||
be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
|
||||
return __this_address;
|
||||
|
||||
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
|
||||
|
@ -2869,6 +2870,10 @@ xfs_agf_verify(
|
|||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
|
||||
return __this_address;
|
||||
|
||||
if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
|
||||
be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
|
||||
return __this_address;
|
||||
|
||||
/*
|
||||
* during growfs operations, the perag is not fully initialised,
|
||||
* so we can't use it for any useful checking. growfs ensures we can't
|
||||
|
@ -2882,6 +2887,11 @@ xfs_agf_verify(
|
|||
be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
|
||||
return __this_address;
|
||||
|
||||
if (xfs_sb_version_hasreflink(&mp->m_sb) &&
|
||||
be32_to_cpu(agf->agf_refcount_blocks) >
|
||||
be32_to_cpu(agf->agf_length))
|
||||
return __this_address;
|
||||
|
||||
if (xfs_sb_version_hasreflink(&mp->m_sb) &&
|
||||
(be32_to_cpu(agf->agf_refcount_level) < 1 ||
|
||||
be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
|
||||
|
@ -2914,6 +2924,7 @@ xfs_agf_write_verify(
|
|||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
struct xfs_buf_log_item *bip = bp->b_log_item;
|
||||
struct xfs_agf *agf = bp->b_addr;
|
||||
xfs_failaddr_t fa;
|
||||
|
||||
fa = xfs_agf_verify(bp);
|
||||
|
@ -2926,7 +2937,7 @@ xfs_agf_write_verify(
|
|||
return;
|
||||
|
||||
if (bip)
|
||||
XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
|
||||
xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
|
||||
}
|
||||
|
@ -2994,7 +3005,7 @@ xfs_alloc_read_agf(
|
|||
return error;
|
||||
ASSERT(!(*bpp)->b_error);
|
||||
|
||||
agf = XFS_BUF_TO_AGF(*bpp);
|
||||
agf = (*bpp)->b_addr;
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
if (!pag->pagf_init) {
|
||||
pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
|
||||
|
@ -3275,6 +3286,7 @@ __xfs_free_extent(
|
|||
struct xfs_buf *agbp;
|
||||
xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
|
||||
xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
|
||||
struct xfs_agf *agf;
|
||||
int error;
|
||||
unsigned int busy_flags = 0;
|
||||
|
||||
|
@ -3288,6 +3300,7 @@ __xfs_free_extent(
|
|||
error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
|
||||
if (error)
|
||||
return error;
|
||||
agf = agbp->b_addr;
|
||||
|
||||
if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
|
||||
error = -EFSCORRUPTED;
|
||||
|
@ -3295,9 +3308,7 @@ __xfs_free_extent(
|
|||
}
|
||||
|
||||
/* validate the extent size is legal now we have the agf locked */
|
||||
if (XFS_IS_CORRUPT(mp,
|
||||
agbno + len >
|
||||
be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length))) {
|
||||
if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
|
||||
error = -EFSCORRUPTED;
|
||||
goto err;
|
||||
}
|
||||
|
@ -3408,7 +3419,7 @@ xfs_agfl_walk(
|
|||
unsigned int i;
|
||||
int error;
|
||||
|
||||
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
|
||||
agfl_bno = xfs_buf_to_agfl_bno(agflbp);
|
||||
i = be32_to_cpu(agf->agf_flfirst);
|
||||
|
||||
/* Nothing to walk in an empty AGFL. */
|
||||
|
|
|
@ -236,4 +236,13 @@ typedef int (*xfs_agfl_walk_fn)(struct xfs_mount *mp, xfs_agblock_t bno,
|
|||
int xfs_agfl_walk(struct xfs_mount *mp, struct xfs_agf *agf,
|
||||
struct xfs_buf *agflbp, xfs_agfl_walk_fn walk_fn, void *priv);
|
||||
|
||||
static inline __be32 *
|
||||
xfs_buf_to_agfl_bno(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
if (xfs_sb_version_hascrc(&bp->b_mount->m_sb))
|
||||
return bp->b_addr + sizeof(struct xfs_agfl);
|
||||
return bp->b_addr;
|
||||
}
|
||||
|
||||
#endif /* __XFS_ALLOC_H__ */
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "xfs_sb.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_staging.h"
|
||||
#include "xfs_alloc_btree.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_extent_busy.h"
|
||||
|
@ -25,7 +26,7 @@ xfs_allocbt_dup_cursor(
|
|||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
|
||||
cur->bc_private.a.agbp, cur->bc_private.a.agno,
|
||||
cur->bc_ag.agbp, cur->bc_ag.agno,
|
||||
cur->bc_btnum);
|
||||
}
|
||||
|
||||
|
@ -35,8 +36,8 @@ xfs_allocbt_set_root(
|
|||
union xfs_btree_ptr *ptr,
|
||||
int inc)
|
||||
{
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
|
||||
int btnum = cur->bc_btnum;
|
||||
struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
|
||||
|
@ -62,7 +63,7 @@ xfs_allocbt_alloc_block(
|
|||
xfs_agblock_t bno;
|
||||
|
||||
/* Allocate the new block from the freelist. If we can't, give up. */
|
||||
error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
|
||||
error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
|
||||
&bno, 1);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -72,7 +73,7 @@ xfs_allocbt_alloc_block(
|
|||
return 0;
|
||||
}
|
||||
|
||||
xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
|
||||
xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false);
|
||||
|
||||
xfs_trans_agbtree_delta(cur->bc_tp, 1);
|
||||
new->s = cpu_to_be32(bno);
|
||||
|
@ -86,8 +87,8 @@ xfs_allocbt_free_block(
|
|||
struct xfs_btree_cur *cur,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
xfs_agblock_t bno;
|
||||
int error;
|
||||
|
||||
|
@ -113,7 +114,7 @@ xfs_allocbt_update_lastrec(
|
|||
int ptr,
|
||||
int reason)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
|
||||
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
|
||||
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
|
||||
struct xfs_perag *pag;
|
||||
__be32 len;
|
||||
|
@ -162,7 +163,7 @@ xfs_allocbt_update_lastrec(
|
|||
pag = xfs_perag_get(cur->bc_mp, seqno);
|
||||
pag->pagf_longest = be32_to_cpu(len);
|
||||
xfs_perag_put(pag);
|
||||
xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
|
||||
xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
|
@ -226,9 +227,9 @@ xfs_allocbt_init_ptr_from_cur(
|
|||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
|
||||
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
|
||||
|
||||
ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
|
||||
ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
|
||||
|
||||
ptr->s = agf->agf_roots[cur->bc_btnum];
|
||||
}
|
||||
|
@ -471,18 +472,14 @@ static const struct xfs_btree_ops xfs_cntbt_ops = {
|
|||
.recs_inorder = xfs_cntbt_recs_inorder,
|
||||
};
|
||||
|
||||
/*
|
||||
* Allocate a new allocation btree cursor.
|
||||
*/
|
||||
struct xfs_btree_cur * /* new alloc btree cursor */
|
||||
xfs_allocbt_init_cursor(
|
||||
struct xfs_mount *mp, /* file system mount point */
|
||||
struct xfs_trans *tp, /* transaction pointer */
|
||||
struct xfs_buf *agbp, /* buffer for agf structure */
|
||||
xfs_agnumber_t agno, /* allocation group number */
|
||||
xfs_btnum_t btnum) /* btree identifier */
|
||||
/* Allocate most of a new allocation btree cursor. */
|
||||
STATIC struct xfs_btree_cur *
|
||||
xfs_allocbt_init_common(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
xfs_agnumber_t agno,
|
||||
xfs_btnum_t btnum)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
|
||||
|
@ -495,19 +492,16 @@ xfs_allocbt_init_cursor(
|
|||
cur->bc_blocklog = mp->m_sb.sb_blocklog;
|
||||
|
||||
if (btnum == XFS_BTNUM_CNT) {
|
||||
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
|
||||
cur->bc_ops = &xfs_cntbt_ops;
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
|
||||
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
|
||||
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
|
||||
} else {
|
||||
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
|
||||
cur->bc_ops = &xfs_bnobt_ops;
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
|
||||
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
|
||||
}
|
||||
|
||||
cur->bc_private.a.agbp = agbp;
|
||||
cur->bc_private.a.agno = agno;
|
||||
cur->bc_private.a.priv.abt.active = false;
|
||||
cur->bc_ag.agno = agno;
|
||||
cur->bc_ag.abt.active = false;
|
||||
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb))
|
||||
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
|
||||
|
@ -515,6 +509,73 @@ xfs_allocbt_init_cursor(
|
|||
return cur;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new allocation btree cursor.
|
||||
*/
|
||||
struct xfs_btree_cur * /* new alloc btree cursor */
|
||||
xfs_allocbt_init_cursor(
|
||||
struct xfs_mount *mp, /* file system mount point */
|
||||
struct xfs_trans *tp, /* transaction pointer */
|
||||
struct xfs_buf *agbp, /* buffer for agf structure */
|
||||
xfs_agnumber_t agno, /* allocation group number */
|
||||
xfs_btnum_t btnum) /* btree identifier */
|
||||
{
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
cur = xfs_allocbt_init_common(mp, tp, agno, btnum);
|
||||
if (btnum == XFS_BTNUM_CNT)
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
|
||||
else
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
|
||||
|
||||
cur->bc_ag.agbp = agbp;
|
||||
|
||||
return cur;
|
||||
}
|
||||
|
||||
/* Create a free space btree cursor with a fake root for staging. */
|
||||
struct xfs_btree_cur *
|
||||
xfs_allocbt_stage_cursor(
|
||||
struct xfs_mount *mp,
|
||||
struct xbtree_afakeroot *afake,
|
||||
xfs_agnumber_t agno,
|
||||
xfs_btnum_t btnum)
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
cur = xfs_allocbt_init_common(mp, NULL, agno, btnum);
|
||||
xfs_btree_stage_afakeroot(cur, afake);
|
||||
return cur;
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a new free space btree root. Caller is responsible for invalidating
|
||||
* and freeing the old btree blocks.
|
||||
*/
|
||||
void
|
||||
xfs_allocbt_commit_staged_btree(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *agbp)
|
||||
{
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
|
||||
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
|
||||
agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
|
||||
agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
|
||||
xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
|
||||
|
||||
if (cur->bc_btnum == XFS_BTNUM_BNO) {
|
||||
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
|
||||
} else {
|
||||
cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
|
||||
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate number of records in an alloc btree block.
|
||||
*/
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
struct xfs_buf;
|
||||
struct xfs_btree_cur;
|
||||
struct xfs_mount;
|
||||
struct xbtree_afakeroot;
|
||||
|
||||
/*
|
||||
* Btree block header size depends on a superblock flag.
|
||||
|
@ -48,8 +49,14 @@ struct xfs_mount;
|
|||
extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
|
||||
struct xfs_trans *, struct xfs_buf *,
|
||||
xfs_agnumber_t, xfs_btnum_t);
|
||||
struct xfs_btree_cur *xfs_allocbt_stage_cursor(struct xfs_mount *mp,
|
||||
struct xbtree_afakeroot *afake, xfs_agnumber_t agno,
|
||||
xfs_btnum_t btnum);
|
||||
extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
|
||||
extern xfs_extlen_t xfs_allocbt_calc_size(struct xfs_mount *mp,
|
||||
unsigned long long len);
|
||||
|
||||
void xfs_allocbt_commit_staged_btree(struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp, struct xfs_buf *agbp);
|
||||
|
||||
#endif /* __XFS_ALLOC_BTREE_H__ */
|
||||
|
|
|
@ -56,33 +56,6 @@ STATIC int xfs_attr_node_removename(xfs_da_args_t *args);
|
|||
STATIC int xfs_attr_fillstate(xfs_da_state_t *state);
|
||||
STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
|
||||
|
||||
|
||||
STATIC int
|
||||
xfs_attr_args_init(
|
||||
struct xfs_da_args *args,
|
||||
struct xfs_inode *dp,
|
||||
const unsigned char *name,
|
||||
size_t namelen,
|
||||
int flags)
|
||||
{
|
||||
|
||||
if (!name)
|
||||
return -EINVAL;
|
||||
|
||||
memset(args, 0, sizeof(*args));
|
||||
args->geo = dp->i_mount->m_attr_geo;
|
||||
args->whichfork = XFS_ATTR_FORK;
|
||||
args->dp = dp;
|
||||
args->flags = flags;
|
||||
args->name = name;
|
||||
args->namelen = namelen;
|
||||
if (args->namelen >= MAXNAMELEN)
|
||||
return -EFAULT; /* match IRIX behaviour */
|
||||
|
||||
args->hashval = xfs_da_hashname(args->name, args->namelen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_inode_hasattr(
|
||||
struct xfs_inode *ip)
|
||||
|
@ -104,85 +77,60 @@ xfs_inode_hasattr(
|
|||
*/
|
||||
int
|
||||
xfs_attr_get_ilocked(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
|
||||
ASSERT(xfs_isilocked(args->dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
|
||||
|
||||
if (!xfs_inode_hasattr(ip))
|
||||
if (!xfs_inode_hasattr(args->dp))
|
||||
return -ENOATTR;
|
||||
else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
|
||||
|
||||
if (args->dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
|
||||
return xfs_attr_shortform_getvalue(args);
|
||||
else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK))
|
||||
if (xfs_bmap_one_block(args->dp, XFS_ATTR_FORK))
|
||||
return xfs_attr_leaf_get(args);
|
||||
else
|
||||
return xfs_attr_node_get(args);
|
||||
return xfs_attr_node_get(args);
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve an extended attribute by name, and its value if requested.
|
||||
*
|
||||
* If ATTR_KERNOVAL is set in @flags, then the caller does not want the value,
|
||||
* just an indication whether the attribute exists and the size of the value if
|
||||
* it exists. The size is returned in @valuelenp,
|
||||
* If args->valuelen is zero, then the caller does not want the value, just an
|
||||
* indication whether the attribute exists and the size of the value if it
|
||||
* exists. The size is returned in args.valuelen.
|
||||
*
|
||||
* If args->value is NULL but args->valuelen is non-zero, allocate the buffer
|
||||
* for the value after existence of the attribute has been determined. The
|
||||
* caller always has to free args->value if it is set, no matter if this
|
||||
* function was successful or not.
|
||||
*
|
||||
* If the attribute is found, but exceeds the size limit set by the caller in
|
||||
* @valuelenp, return -ERANGE with the size of the attribute that was found in
|
||||
* @valuelenp.
|
||||
*
|
||||
* If ATTR_ALLOC is set in @flags, allocate the buffer for the value after
|
||||
* existence of the attribute has been determined. On success, return that
|
||||
* buffer to the caller and leave them to free it. On failure, free any
|
||||
* allocated buffer and ensure the buffer pointer returned to the caller is
|
||||
* null.
|
||||
* args->valuelen, return -ERANGE with the size of the attribute that was found
|
||||
* in args->valuelen.
|
||||
*/
|
||||
int
|
||||
xfs_attr_get(
|
||||
struct xfs_inode *ip,
|
||||
const unsigned char *name,
|
||||
size_t namelen,
|
||||
unsigned char **value,
|
||||
int *valuelenp,
|
||||
int flags)
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
struct xfs_da_args args;
|
||||
uint lock_mode;
|
||||
int error;
|
||||
|
||||
ASSERT((flags & (ATTR_ALLOC | ATTR_KERNOVAL)) || *value);
|
||||
XFS_STATS_INC(args->dp->i_mount, xs_attr_get);
|
||||
|
||||
XFS_STATS_INC(ip->i_mount, xs_attr_get);
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
|
||||
if (XFS_FORCED_SHUTDOWN(args->dp->i_mount))
|
||||
return -EIO;
|
||||
|
||||
error = xfs_attr_args_init(&args, ip, name, namelen, flags);
|
||||
if (error)
|
||||
return error;
|
||||
args->geo = args->dp->i_mount->m_attr_geo;
|
||||
args->whichfork = XFS_ATTR_FORK;
|
||||
args->hashval = xfs_da_hashname(args->name, args->namelen);
|
||||
|
||||
/* Entirely possible to look up a name which doesn't exist */
|
||||
args.op_flags = XFS_DA_OP_OKNOENT;
|
||||
if (flags & ATTR_ALLOC)
|
||||
args.op_flags |= XFS_DA_OP_ALLOCVAL;
|
||||
else
|
||||
args.value = *value;
|
||||
args.valuelen = *valuelenp;
|
||||
args->op_flags = XFS_DA_OP_OKNOENT;
|
||||
|
||||
lock_mode = xfs_ilock_attr_map_shared(ip);
|
||||
error = xfs_attr_get_ilocked(ip, &args);
|
||||
xfs_iunlock(ip, lock_mode);
|
||||
*valuelenp = args.valuelen;
|
||||
lock_mode = xfs_ilock_attr_map_shared(args->dp);
|
||||
error = xfs_attr_get_ilocked(args);
|
||||
xfs_iunlock(args->dp, lock_mode);
|
||||
|
||||
/* on error, we have to clean up allocated value buffers */
|
||||
if (error) {
|
||||
if (flags & ATTR_ALLOC) {
|
||||
kmem_free(args.value);
|
||||
*value = NULL;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
*value = args.value;
|
||||
return 0;
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -238,7 +186,7 @@ xfs_attr_try_sf_addname(
|
|||
* Commit the shortform mods, and we're done.
|
||||
* NOTE: this is also the error path (EEXIST, etc).
|
||||
*/
|
||||
if (!error && (args->flags & ATTR_KERNOTIME) == 0)
|
||||
if (!error && !(args->op_flags & XFS_DA_OP_NOTIME))
|
||||
xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_WSYNC)
|
||||
|
@ -336,80 +284,102 @@ xfs_attr_remove_args(
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: If args->value is NULL the attribute will be removed, just like the
|
||||
* Linux ->setattr API.
|
||||
*/
|
||||
int
|
||||
xfs_attr_set(
|
||||
struct xfs_inode *dp,
|
||||
const unsigned char *name,
|
||||
size_t namelen,
|
||||
unsigned char *value,
|
||||
int valuelen,
|
||||
int flags)
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
struct xfs_inode *dp = args->dp;
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_da_args args;
|
||||
struct xfs_trans_res tres;
|
||||
int rsvd = (flags & ATTR_ROOT) != 0;
|
||||
bool rsvd = (args->attr_filter & XFS_ATTR_ROOT);
|
||||
int error, local;
|
||||
|
||||
XFS_STATS_INC(mp, xs_attr_set);
|
||||
unsigned int total;
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
|
||||
return -EIO;
|
||||
|
||||
error = xfs_attr_args_init(&args, dp, name, namelen, flags);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
args.value = value;
|
||||
args.valuelen = valuelen;
|
||||
args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
|
||||
args.total = xfs_attr_calc_size(&args, &local);
|
||||
|
||||
error = xfs_qm_dqattach(dp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
args->geo = mp->m_attr_geo;
|
||||
args->whichfork = XFS_ATTR_FORK;
|
||||
args->hashval = xfs_da_hashname(args->name, args->namelen);
|
||||
|
||||
/*
|
||||
* If the inode doesn't have an attribute fork, add one.
|
||||
* (inode must not be locked when we call this routine)
|
||||
* We have no control over the attribute names that userspace passes us
|
||||
* to remove, so we have to allow the name lookup prior to attribute
|
||||
* removal to fail as well.
|
||||
*/
|
||||
if (XFS_IFORK_Q(dp) == 0) {
|
||||
int sf_size = sizeof(xfs_attr_sf_hdr_t) +
|
||||
XFS_ATTR_SF_ENTSIZE_BYNAME(args.namelen, valuelen);
|
||||
args->op_flags = XFS_DA_OP_OKNOENT;
|
||||
|
||||
error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);
|
||||
if (error)
|
||||
return error;
|
||||
if (args->value) {
|
||||
XFS_STATS_INC(mp, xs_attr_set);
|
||||
|
||||
args->op_flags |= XFS_DA_OP_ADDNAME;
|
||||
args->total = xfs_attr_calc_size(args, &local);
|
||||
|
||||
/*
|
||||
* If the inode doesn't have an attribute fork, add one.
|
||||
* (inode must not be locked when we call this routine)
|
||||
*/
|
||||
if (XFS_IFORK_Q(dp) == 0) {
|
||||
int sf_size = sizeof(struct xfs_attr_sf_hdr) +
|
||||
XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen,
|
||||
args->valuelen);
|
||||
|
||||
error = xfs_bmap_add_attrfork(dp, sf_size, rsvd);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
|
||||
M_RES(mp)->tr_attrsetrt.tr_logres *
|
||||
args->total;
|
||||
tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
|
||||
tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
|
||||
total = args->total;
|
||||
} else {
|
||||
XFS_STATS_INC(mp, xs_attr_remove);
|
||||
|
||||
tres = M_RES(mp)->tr_attrrm;
|
||||
total = XFS_ATTRRM_SPACE_RES(mp);
|
||||
}
|
||||
|
||||
tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
|
||||
M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
|
||||
tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
|
||||
tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
|
||||
|
||||
/*
|
||||
* Root fork attributes can use reserved data blocks for this
|
||||
* operation if necessary
|
||||
*/
|
||||
error = xfs_trans_alloc(mp, &tres, args.total, 0,
|
||||
rsvd ? XFS_TRANS_RESERVE : 0, &args.trans);
|
||||
error = xfs_trans_alloc(mp, &tres, total, 0,
|
||||
rsvd ? XFS_TRANS_RESERVE : 0, &args->trans);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_ilock(dp, XFS_ILOCK_EXCL);
|
||||
error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
|
||||
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
xfs_trans_ijoin(args->trans, dp, 0);
|
||||
if (args->value) {
|
||||
unsigned int quota_flags = XFS_QMOPT_RES_REGBLKS;
|
||||
|
||||
xfs_trans_ijoin(args.trans, dp, 0);
|
||||
error = xfs_attr_set_args(&args);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
if (!args.trans) {
|
||||
if (rsvd)
|
||||
quota_flags |= XFS_QMOPT_FORCE_RES;
|
||||
error = xfs_trans_reserve_quota_nblks(args->trans, dp,
|
||||
args->total, 0, quota_flags);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
error = xfs_attr_set_args(args);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
/* shortform attribute has already been committed */
|
||||
goto out_unlock;
|
||||
if (!args->trans)
|
||||
goto out_unlock;
|
||||
} else {
|
||||
error = xfs_attr_remove_args(args);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -417,109 +387,26 @@ xfs_attr_set(
|
|||
* transaction goes to disk before returning to the user.
|
||||
*/
|
||||
if (mp->m_flags & XFS_MOUNT_WSYNC)
|
||||
xfs_trans_set_sync(args.trans);
|
||||
xfs_trans_set_sync(args->trans);
|
||||
|
||||
if ((flags & ATTR_KERNOTIME) == 0)
|
||||
xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
|
||||
if (!(args->op_flags & XFS_DA_OP_NOTIME))
|
||||
xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
|
||||
|
||||
/*
|
||||
* Commit the last in the sequence of transactions.
|
||||
*/
|
||||
xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
|
||||
error = xfs_trans_commit(args.trans);
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
|
||||
error = xfs_trans_commit(args->trans);
|
||||
out_unlock:
|
||||
xfs_iunlock(dp, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
|
||||
out_trans_cancel:
|
||||
if (args.trans)
|
||||
xfs_trans_cancel(args.trans);
|
||||
if (args->trans)
|
||||
xfs_trans_cancel(args->trans);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic handler routine to remove a name from an attribute list.
|
||||
* Transitions attribute list from Btree to shortform as necessary.
|
||||
*/
|
||||
int
|
||||
xfs_attr_remove(
|
||||
struct xfs_inode *dp,
|
||||
const unsigned char *name,
|
||||
size_t namelen,
|
||||
int flags)
|
||||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
struct xfs_da_args args;
|
||||
int error;
|
||||
|
||||
XFS_STATS_INC(mp, xs_attr_remove);
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
|
||||
return -EIO;
|
||||
|
||||
error = xfs_attr_args_init(&args, dp, name, namelen, flags);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* we have no control over the attribute names that userspace passes us
|
||||
* to remove, so we have to allow the name lookup prior to attribute
|
||||
* removal to fail.
|
||||
*/
|
||||
args.op_flags = XFS_DA_OP_OKNOENT;
|
||||
|
||||
error = xfs_qm_dqattach(dp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Root fork attributes can use reserved data blocks for this
|
||||
* operation if necessary
|
||||
*/
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrrm,
|
||||
XFS_ATTRRM_SPACE_RES(mp), 0,
|
||||
(flags & ATTR_ROOT) ? XFS_TRANS_RESERVE : 0,
|
||||
&args.trans);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_ilock(dp, XFS_ILOCK_EXCL);
|
||||
/*
|
||||
* No need to make quota reservations here. We expect to release some
|
||||
* blocks not allocate in the common case.
|
||||
*/
|
||||
xfs_trans_ijoin(args.trans, dp, 0);
|
||||
|
||||
error = xfs_attr_remove_args(&args);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If this is a synchronous mount, make sure that the
|
||||
* transaction goes to disk before returning to the user.
|
||||
*/
|
||||
if (mp->m_flags & XFS_MOUNT_WSYNC)
|
||||
xfs_trans_set_sync(args.trans);
|
||||
|
||||
if ((flags & ATTR_KERNOTIME) == 0)
|
||||
xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
|
||||
|
||||
/*
|
||||
* Commit the last in the sequence of transactions.
|
||||
*/
|
||||
xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
|
||||
error = xfs_trans_commit(args.trans);
|
||||
xfs_iunlock(dp, XFS_ILOCK_EXCL);
|
||||
|
||||
return error;
|
||||
|
||||
out:
|
||||
if (args.trans)
|
||||
xfs_trans_cancel(args.trans);
|
||||
xfs_iunlock(dp, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*========================================================================
|
||||
* External routines when attribute list is inside the inode
|
||||
*========================================================================*/
|
||||
|
@ -536,10 +423,10 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
|
|||
trace_xfs_attr_sf_addname(args);
|
||||
|
||||
retval = xfs_attr_shortform_lookup(args);
|
||||
if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) {
|
||||
if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
|
||||
return retval;
|
||||
} else if (retval == -EEXIST) {
|
||||
if (args->flags & ATTR_CREATE)
|
||||
if (retval == -EEXIST) {
|
||||
if (args->attr_flags & XATTR_CREATE)
|
||||
return retval;
|
||||
retval = xfs_attr_shortform_remove(args);
|
||||
if (retval)
|
||||
|
@ -549,7 +436,7 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
|
|||
* that the leaf format add routine won't trip over the attr
|
||||
* not being around.
|
||||
*/
|
||||
args->flags &= ~ATTR_REPLACE;
|
||||
args->attr_flags &= ~XATTR_REPLACE;
|
||||
}
|
||||
|
||||
if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
|
||||
|
@ -602,14 +489,11 @@ xfs_attr_leaf_addname(
|
|||
* the given flags produce an error or call for an atomic rename.
|
||||
*/
|
||||
retval = xfs_attr3_leaf_lookup_int(bp, args);
|
||||
if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) {
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
return retval;
|
||||
} else if (retval == -EEXIST) {
|
||||
if (args->flags & ATTR_CREATE) { /* pure create op */
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
return retval;
|
||||
}
|
||||
if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
|
||||
goto out_brelse;
|
||||
if (retval == -EEXIST) {
|
||||
if (args->attr_flags & XATTR_CREATE)
|
||||
goto out_brelse;
|
||||
|
||||
trace_xfs_attr_leaf_replace(args);
|
||||
|
||||
|
@ -750,6 +634,9 @@ xfs_attr_leaf_addname(
|
|||
error = xfs_attr3_leaf_clearflag(args);
|
||||
}
|
||||
return error;
|
||||
out_brelse:
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -876,10 +763,10 @@ xfs_attr_node_addname(
|
|||
goto out;
|
||||
blk = &state->path.blk[ state->path.active-1 ];
|
||||
ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
|
||||
if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) {
|
||||
if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
|
||||
goto out;
|
||||
} else if (retval == -EEXIST) {
|
||||
if (args->flags & ATTR_CREATE)
|
||||
if (retval == -EEXIST) {
|
||||
if (args->attr_flags & XATTR_CREATE)
|
||||
goto out;
|
||||
|
||||
trace_xfs_attr_node_replace(args);
|
||||
|
@ -1011,7 +898,7 @@ xfs_attr_node_addname(
|
|||
* The INCOMPLETE flag means that we will find the "old"
|
||||
* attr, not the "new" one.
|
||||
*/
|
||||
args->op_flags |= XFS_DA_OP_INCOMPLETE;
|
||||
args->attr_filter |= XFS_ATTR_INCOMPLETE;
|
||||
state = xfs_da_state_alloc();
|
||||
state->args = args;
|
||||
state->mp = mp;
|
||||
|
|
|
@ -21,39 +21,6 @@ struct xfs_attr_list_context;
|
|||
* as possible so as to fit into the literal area of the inode.
|
||||
*/
|
||||
|
||||
/*========================================================================
|
||||
* External interfaces
|
||||
*========================================================================*/
|
||||
|
||||
|
||||
#define ATTR_DONTFOLLOW 0x0001 /* -- ignored, from IRIX -- */
|
||||
#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
|
||||
#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
|
||||
#define ATTR_SECURE 0x0008 /* use attrs in security namespace */
|
||||
#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */
|
||||
#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */
|
||||
|
||||
#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
|
||||
#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
|
||||
|
||||
#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
|
||||
#define ATTR_ALLOC 0x8000 /* [kernel] allocate xattr buffer on demand */
|
||||
|
||||
#define ATTR_KERNEL_FLAGS \
|
||||
(ATTR_KERNOTIME | ATTR_KERNOVAL | ATTR_INCOMPLETE | ATTR_ALLOC)
|
||||
|
||||
#define XFS_ATTR_FLAGS \
|
||||
{ ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
|
||||
{ ATTR_ROOT, "ROOT" }, \
|
||||
{ ATTR_TRUST, "TRUST" }, \
|
||||
{ ATTR_SECURE, "SECURE" }, \
|
||||
{ ATTR_CREATE, "CREATE" }, \
|
||||
{ ATTR_REPLACE, "REPLACE" }, \
|
||||
{ ATTR_KERNOTIME, "KERNOTIME" }, \
|
||||
{ ATTR_KERNOVAL, "KERNOVAL" }, \
|
||||
{ ATTR_INCOMPLETE, "INCOMPLETE" }, \
|
||||
{ ATTR_ALLOC, "ALLOC" }
|
||||
|
||||
/*
|
||||
* The maximum size (into the kernel or returned from the kernel) of an
|
||||
* attribute value or the buffer used for an attr_list() call. Larger
|
||||
|
@ -61,46 +28,17 @@ struct xfs_attr_list_context;
|
|||
*/
|
||||
#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */
|
||||
|
||||
/*
|
||||
* Define how lists of attribute names are returned to the user from
|
||||
* the attr_list() call. A large, 32bit aligned, buffer is passed in
|
||||
* along with its size. We put an array of offsets at the top that each
|
||||
* reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom.
|
||||
*/
|
||||
typedef struct attrlist {
|
||||
__s32 al_count; /* number of entries in attrlist */
|
||||
__s32 al_more; /* T/F: more attrs (do call again) */
|
||||
__s32 al_offset[1]; /* byte offsets of attrs [var-sized] */
|
||||
} attrlist_t;
|
||||
|
||||
/*
|
||||
* Show the interesting info about one attribute. This is what the
|
||||
* al_offset[i] entry points to.
|
||||
*/
|
||||
typedef struct attrlist_ent { /* data from attr_list() */
|
||||
__u32 a_valuelen; /* number bytes in value of attr */
|
||||
char a_name[1]; /* attr name (NULL terminated) */
|
||||
} attrlist_ent_t;
|
||||
|
||||
/*
|
||||
* Given a pointer to the (char*) buffer containing the attr_list() result,
|
||||
* and an index, return a pointer to the indicated attribute in the buffer.
|
||||
*/
|
||||
#define ATTR_ENTRY(buffer, index) \
|
||||
((attrlist_ent_t *) \
|
||||
&((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ])
|
||||
|
||||
/*
|
||||
* Kernel-internal version of the attrlist cursor.
|
||||
*/
|
||||
typedef struct attrlist_cursor_kern {
|
||||
struct xfs_attrlist_cursor_kern {
|
||||
__u32 hashval; /* hash value of next entry to add */
|
||||
__u32 blkno; /* block containing entry (suggestion) */
|
||||
__u32 offset; /* offset in list of equal-hashvals */
|
||||
__u16 pad1; /* padding to match user-level */
|
||||
__u8 pad2; /* padding to match user-level */
|
||||
__u8 initted; /* T/F: cursor has been initialized */
|
||||
} attrlist_cursor_kern_t;
|
||||
};
|
||||
|
||||
|
||||
/*========================================================================
|
||||
|
@ -112,27 +50,28 @@ typedef struct attrlist_cursor_kern {
|
|||
typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int,
|
||||
unsigned char *, int, int);
|
||||
|
||||
typedef struct xfs_attr_list_context {
|
||||
struct xfs_trans *tp;
|
||||
struct xfs_inode *dp; /* inode */
|
||||
struct attrlist_cursor_kern *cursor; /* position in list */
|
||||
char *alist; /* output buffer */
|
||||
struct xfs_attr_list_context {
|
||||
struct xfs_trans *tp;
|
||||
struct xfs_inode *dp; /* inode */
|
||||
struct xfs_attrlist_cursor_kern cursor; /* position in list */
|
||||
void *buffer; /* output buffer */
|
||||
|
||||
/*
|
||||
* Abort attribute list iteration if non-zero. Can be used to pass
|
||||
* error values to the xfs_attr_list caller.
|
||||
*/
|
||||
int seen_enough;
|
||||
int seen_enough;
|
||||
bool allow_incomplete;
|
||||
|
||||
ssize_t count; /* num used entries */
|
||||
int dupcnt; /* count dup hashvals seen */
|
||||
int bufsize; /* total buffer size */
|
||||
int firstu; /* first used byte in buffer */
|
||||
int flags; /* from VOP call */
|
||||
int resynch; /* T/F: resynch with cursor */
|
||||
put_listent_func_t put_listent; /* list output fmt function */
|
||||
int index; /* index into output buffer */
|
||||
} xfs_attr_list_context_t;
|
||||
ssize_t count; /* num used entries */
|
||||
int dupcnt; /* count dup hashvals seen */
|
||||
int bufsize; /* total buffer size */
|
||||
int firstu; /* first used byte in buffer */
|
||||
unsigned int attr_filter; /* XFS_ATTR_{ROOT,SECURE} */
|
||||
int resynch; /* T/F: resynch with cursor */
|
||||
put_listent_func_t put_listent; /* list output fmt function */
|
||||
int index; /* index into output buffer */
|
||||
};
|
||||
|
||||
|
||||
/*========================================================================
|
||||
|
@ -143,21 +82,14 @@ typedef struct xfs_attr_list_context {
|
|||
* Overall external interface routines.
|
||||
*/
|
||||
int xfs_attr_inactive(struct xfs_inode *dp);
|
||||
int xfs_attr_list_int_ilocked(struct xfs_attr_list_context *);
|
||||
int xfs_attr_list_int(struct xfs_attr_list_context *);
|
||||
int xfs_attr_list_ilocked(struct xfs_attr_list_context *);
|
||||
int xfs_attr_list(struct xfs_attr_list_context *);
|
||||
int xfs_inode_hasattr(struct xfs_inode *ip);
|
||||
int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
|
||||
int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
|
||||
size_t namelen, unsigned char **value, int *valuelenp,
|
||||
int flags);
|
||||
int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
|
||||
size_t namelen, unsigned char *value, int valuelen, int flags);
|
||||
int xfs_attr_get_ilocked(struct xfs_da_args *args);
|
||||
int xfs_attr_get(struct xfs_da_args *args);
|
||||
int xfs_attr_set(struct xfs_da_args *args);
|
||||
int xfs_attr_set_args(struct xfs_da_args *args);
|
||||
int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name,
|
||||
size_t namelen, int flags);
|
||||
int xfs_attr_remove_args(struct xfs_da_args *args);
|
||||
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
|
||||
int flags, struct attrlist_cursor_kern *cursor);
|
||||
bool xfs_attr_namecheck(const void *name, size_t length);
|
||||
|
||||
#endif /* __XFS_ATTR_H__ */
|
||||
|
|
|
@ -445,14 +445,25 @@ xfs_attr3_leaf_read(
|
|||
* Namespace helper routines
|
||||
*========================================================================*/
|
||||
|
||||
/*
|
||||
* If namespace bits don't match return 0.
|
||||
* If all match then return 1.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
|
||||
static bool
|
||||
xfs_attr_match(
|
||||
struct xfs_da_args *args,
|
||||
uint8_t namelen,
|
||||
unsigned char *name,
|
||||
int flags)
|
||||
{
|
||||
return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
|
||||
if (args->namelen != namelen)
|
||||
return false;
|
||||
if (memcmp(args->name, name, namelen) != 0)
|
||||
return false;
|
||||
/*
|
||||
* If we are looking for incomplete entries, show only those, else only
|
||||
* show complete entries.
|
||||
*/
|
||||
if (args->attr_filter !=
|
||||
(flags & (XFS_ATTR_NSP_ONDISK_MASK | XFS_ATTR_INCOMPLETE)))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -464,7 +475,7 @@ xfs_attr_copy_value(
|
|||
/*
|
||||
* No copy if all we have to do is get the length
|
||||
*/
|
||||
if (args->flags & ATTR_KERNOVAL) {
|
||||
if (!args->valuelen) {
|
||||
args->valuelen = valuelen;
|
||||
return 0;
|
||||
}
|
||||
|
@ -477,7 +488,7 @@ xfs_attr_copy_value(
|
|||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (args->op_flags & XFS_DA_OP_ALLOCVAL) {
|
||||
if (!args->value) {
|
||||
args->value = kmem_alloc_large(valuelen, 0);
|
||||
if (!args->value)
|
||||
return -ENOMEM;
|
||||
|
@ -526,7 +537,7 @@ xfs_attr_shortform_bytesfit(
|
|||
int offset;
|
||||
|
||||
/* rounded down */
|
||||
offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3;
|
||||
offset = (XFS_LITINO(mp) - bytes) >> 3;
|
||||
|
||||
if (dp->i_d.di_format == XFS_DINODE_FMT_DEV) {
|
||||
minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
|
||||
|
@ -593,8 +604,7 @@ xfs_attr_shortform_bytesfit(
|
|||
minforkoff = roundup(minforkoff, 8) >> 3;
|
||||
|
||||
/* attr fork btree root can have at least this many key/ptr pairs */
|
||||
maxforkoff = XFS_LITINO(mp, dp->i_d.di_version) -
|
||||
XFS_BMDR_SPACE_CALC(MINABTPTRS);
|
||||
maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
|
||||
maxforkoff = maxforkoff >> 3; /* rounded down */
|
||||
|
||||
if (offset >= maxforkoff)
|
||||
|
@ -678,15 +688,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
|
|||
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
|
||||
sfe = &sf->list[0];
|
||||
for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
|
||||
#ifdef DEBUG
|
||||
if (sfe->namelen != args->namelen)
|
||||
continue;
|
||||
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
|
||||
continue;
|
||||
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
|
||||
continue;
|
||||
ASSERT(0);
|
||||
#endif
|
||||
ASSERT(!xfs_attr_match(args, sfe->namelen, sfe->nameval,
|
||||
sfe->flags));
|
||||
}
|
||||
|
||||
offset = (char *)sfe - (char *)sf;
|
||||
|
@ -697,7 +700,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
|
|||
|
||||
sfe->namelen = args->namelen;
|
||||
sfe->valuelen = args->valuelen;
|
||||
sfe->flags = XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
|
||||
sfe->flags = args->attr_filter;
|
||||
memcpy(sfe->nameval, args->name, args->namelen);
|
||||
memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
|
||||
sf->hdr.count++;
|
||||
|
@ -749,13 +752,9 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
|
|||
for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe),
|
||||
base += size, i++) {
|
||||
size = XFS_ATTR_SF_ENTSIZE(sfe);
|
||||
if (sfe->namelen != args->namelen)
|
||||
continue;
|
||||
if (memcmp(sfe->nameval, args->name, args->namelen) != 0)
|
||||
continue;
|
||||
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
|
||||
continue;
|
||||
break;
|
||||
if (xfs_attr_match(args, sfe->namelen, sfe->nameval,
|
||||
sfe->flags))
|
||||
break;
|
||||
}
|
||||
if (i == end)
|
||||
return -ENOATTR;
|
||||
|
@ -816,13 +815,9 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
|
|||
sfe = &sf->list[0];
|
||||
for (i = 0; i < sf->hdr.count;
|
||||
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
|
||||
if (sfe->namelen != args->namelen)
|
||||
continue;
|
||||
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
|
||||
continue;
|
||||
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
|
||||
continue;
|
||||
return -EEXIST;
|
||||
if (xfs_attr_match(args, sfe->namelen, sfe->nameval,
|
||||
sfe->flags))
|
||||
return -EEXIST;
|
||||
}
|
||||
return -ENOATTR;
|
||||
}
|
||||
|
@ -830,9 +825,9 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
|
|||
/*
|
||||
* Retrieve the attribute value and length.
|
||||
*
|
||||
* If ATTR_KERNOVAL is specified, only the length needs to be returned.
|
||||
* Unlike a lookup, we only return an error if the attribute does not
|
||||
* exist or we can't retrieve the value.
|
||||
* If args->valuelen is zero, only the length needs to be returned. Unlike a
|
||||
* lookup, we only return an error if the attribute does not exist or we can't
|
||||
* retrieve the value.
|
||||
*/
|
||||
int
|
||||
xfs_attr_shortform_getvalue(
|
||||
|
@ -847,14 +842,10 @@ xfs_attr_shortform_getvalue(
|
|||
sfe = &sf->list[0];
|
||||
for (i = 0; i < sf->hdr.count;
|
||||
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
|
||||
if (sfe->namelen != args->namelen)
|
||||
continue;
|
||||
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
|
||||
continue;
|
||||
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
|
||||
continue;
|
||||
return xfs_attr_copy_value(args, &sfe->nameval[args->namelen],
|
||||
sfe->valuelen);
|
||||
if (xfs_attr_match(args, sfe->namelen, sfe->nameval,
|
||||
sfe->flags))
|
||||
return xfs_attr_copy_value(args,
|
||||
&sfe->nameval[args->namelen], sfe->valuelen);
|
||||
}
|
||||
return -ENOATTR;
|
||||
}
|
||||
|
@ -918,7 +909,7 @@ xfs_attr_shortform_to_leaf(
|
|||
nargs.valuelen = sfe->valuelen;
|
||||
nargs.hashval = xfs_da_hashname(sfe->nameval,
|
||||
sfe->namelen);
|
||||
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
|
||||
nargs.attr_filter = sfe->flags & XFS_ATTR_NSP_ONDISK_MASK;
|
||||
error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */
|
||||
ASSERT(error == -ENOATTR);
|
||||
error = xfs_attr3_leaf_add(bp, &nargs);
|
||||
|
@ -1124,7 +1115,7 @@ xfs_attr3_leaf_to_shortform(
|
|||
nargs.value = &name_loc->nameval[nargs.namelen];
|
||||
nargs.valuelen = be16_to_cpu(name_loc->valuelen);
|
||||
nargs.hashval = be32_to_cpu(entry->hashval);
|
||||
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
|
||||
nargs.attr_filter = entry->flags & XFS_ATTR_NSP_ONDISK_MASK;
|
||||
xfs_attr_shortform_add(&nargs, forkoff);
|
||||
}
|
||||
error = 0;
|
||||
|
@ -1449,8 +1440,9 @@ xfs_attr3_leaf_add_work(
|
|||
entry->nameidx = cpu_to_be16(ichdr->freemap[mapindex].base +
|
||||
ichdr->freemap[mapindex].size);
|
||||
entry->hashval = cpu_to_be32(args->hashval);
|
||||
entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
|
||||
entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
|
||||
entry->flags = args->attr_filter;
|
||||
if (tmp)
|
||||
entry->flags |= XFS_ATTR_LOCAL;
|
||||
if (args->op_flags & XFS_DA_OP_RENAME) {
|
||||
entry->flags |= XFS_ATTR_INCOMPLETE;
|
||||
if ((args->blkno2 == args->blkno) &&
|
||||
|
@ -2346,7 +2338,7 @@ xfs_attr3_leaf_lookup_int(
|
|||
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
|
||||
entries = xfs_attr3_leaf_entryp(leaf);
|
||||
if (ichdr.count >= args->geo->blksize / 8) {
|
||||
xfs_buf_corruption_error(bp);
|
||||
xfs_buf_mark_corrupt(bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -2365,11 +2357,11 @@ xfs_attr3_leaf_lookup_int(
|
|||
break;
|
||||
}
|
||||
if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count))) {
|
||||
xfs_buf_corruption_error(bp);
|
||||
xfs_buf_mark_corrupt(bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval)) {
|
||||
xfs_buf_corruption_error(bp);
|
||||
xfs_buf_mark_corrupt(bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -2399,33 +2391,17 @@ xfs_attr3_leaf_lookup_int(
|
|||
/*
|
||||
* GROT: Add code to remove incomplete entries.
|
||||
*/
|
||||
/*
|
||||
* If we are looking for INCOMPLETE entries, show only those.
|
||||
* If we are looking for complete entries, show only those.
|
||||
*/
|
||||
if (!!(args->op_flags & XFS_DA_OP_INCOMPLETE) !=
|
||||
!!(entry->flags & XFS_ATTR_INCOMPLETE)) {
|
||||
continue;
|
||||
}
|
||||
if (entry->flags & XFS_ATTR_LOCAL) {
|
||||
name_loc = xfs_attr3_leaf_name_local(leaf, probe);
|
||||
if (name_loc->namelen != args->namelen)
|
||||
continue;
|
||||
if (memcmp(args->name, name_loc->nameval,
|
||||
args->namelen) != 0)
|
||||
continue;
|
||||
if (!xfs_attr_namesp_match(args->flags, entry->flags))
|
||||
if (!xfs_attr_match(args, name_loc->namelen,
|
||||
name_loc->nameval, entry->flags))
|
||||
continue;
|
||||
args->index = probe;
|
||||
return -EEXIST;
|
||||
} else {
|
||||
name_rmt = xfs_attr3_leaf_name_remote(leaf, probe);
|
||||
if (name_rmt->namelen != args->namelen)
|
||||
continue;
|
||||
if (memcmp(args->name, name_rmt->name,
|
||||
args->namelen) != 0)
|
||||
continue;
|
||||
if (!xfs_attr_namesp_match(args->flags, entry->flags))
|
||||
if (!xfs_attr_match(args, name_rmt->namelen,
|
||||
name_rmt->name, entry->flags))
|
||||
continue;
|
||||
args->index = probe;
|
||||
args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
|
||||
|
@ -2444,9 +2420,9 @@ xfs_attr3_leaf_lookup_int(
|
|||
* Get the value associated with an attribute name from a leaf attribute
|
||||
* list structure.
|
||||
*
|
||||
* If ATTR_KERNOVAL is specified, only the length needs to be returned.
|
||||
* Unlike a lookup, we only return an error if the attribute does not
|
||||
* exist or we can't retrieve the value.
|
||||
* If args->valuelen is zero, only the length needs to be returned. Unlike a
|
||||
* lookup, we only return an error if the attribute does not exist or we can't
|
||||
* retrieve the value.
|
||||
*/
|
||||
int
|
||||
xfs_attr3_leaf_getvalue(
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#define __XFS_ATTR_LEAF_H__
|
||||
|
||||
struct attrlist;
|
||||
struct attrlist_cursor_kern;
|
||||
struct xfs_attr_list_context;
|
||||
struct xfs_da_args;
|
||||
struct xfs_da_state;
|
||||
|
|
|
@ -397,7 +397,7 @@ xfs_attr_rmtval_get(
|
|||
|
||||
trace_xfs_attr_rmtval_get(args);
|
||||
|
||||
ASSERT(!(args->flags & ATTR_KERNOVAL));
|
||||
ASSERT(args->valuelen != 0);
|
||||
ASSERT(args->rmtvaluelen == args->valuelen);
|
||||
|
||||
valuelen = args->rmtvaluelen;
|
||||
|
|
|
@ -193,14 +193,12 @@ xfs_default_attroffset(
|
|||
struct xfs_mount *mp = ip->i_mount;
|
||||
uint offset;
|
||||
|
||||
if (mp->m_sb.sb_inodesize == 256) {
|
||||
offset = XFS_LITINO(mp, ip->i_d.di_version) -
|
||||
XFS_BMDR_SPACE_CALC(MINABTPTRS);
|
||||
} else {
|
||||
if (mp->m_sb.sb_inodesize == 256)
|
||||
offset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
|
||||
else
|
||||
offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
|
||||
}
|
||||
|
||||
ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
|
||||
ASSERT(offset < XFS_LITINO(mp));
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -690,7 +688,7 @@ xfs_bmap_extents_to_btree(
|
|||
* Need a cursor. Can't allocate until bb_level is filled in.
|
||||
*/
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
|
||||
cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
|
||||
/*
|
||||
* Convert to a btree with two levels, one record in root.
|
||||
*/
|
||||
|
@ -727,7 +725,7 @@ xfs_bmap_extents_to_btree(
|
|||
ASSERT(tp->t_firstblock == NULLFSBLOCK ||
|
||||
args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
|
||||
tp->t_firstblock = args.fsbno;
|
||||
cur->bc_private.b.allocated++;
|
||||
cur->bc_ino.allocated++;
|
||||
ip->i_d.di_nblocks++;
|
||||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
|
||||
|
@ -953,7 +951,7 @@ xfs_bmap_add_attrfork_btree(
|
|||
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
|
||||
return -ENOSPC;
|
||||
}
|
||||
cur->bc_private.b.allocated = 0;
|
||||
cur->bc_ino.allocated = 0;
|
||||
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
|
||||
}
|
||||
return 0;
|
||||
|
@ -980,7 +978,7 @@ xfs_bmap_add_attrfork_extents(
|
|||
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
|
||||
XFS_DATA_FORK);
|
||||
if (cur) {
|
||||
cur->bc_private.b.allocated = 0;
|
||||
cur->bc_ino.allocated = 0;
|
||||
xfs_btree_del_cursor(cur, error);
|
||||
}
|
||||
return error;
|
||||
|
@ -1178,13 +1176,13 @@ xfs_iread_bmbt_block(
|
|||
{
|
||||
struct xfs_iread_state *ir = priv;
|
||||
struct xfs_mount *mp = cur->bc_mp;
|
||||
struct xfs_inode *ip = cur->bc_private.b.ip;
|
||||
struct xfs_inode *ip = cur->bc_ino.ip;
|
||||
struct xfs_btree_block *block;
|
||||
struct xfs_buf *bp;
|
||||
struct xfs_bmbt_rec *frp;
|
||||
xfs_extnum_t num_recs;
|
||||
xfs_extnum_t j;
|
||||
int whichfork = cur->bc_private.b.whichfork;
|
||||
int whichfork = cur->bc_ino.whichfork;
|
||||
|
||||
block = xfs_btree_get_block(cur, level, &bp);
|
||||
|
||||
|
@ -1528,7 +1526,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
|
||||
ASSERT(!isnullstartblock(new->br_startblock));
|
||||
ASSERT(!bma->cur ||
|
||||
(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
|
||||
(bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
|
||||
|
||||
XFS_STATS_INC(mp, xs_add_exlist);
|
||||
|
||||
|
@ -1818,7 +1816,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
temp = PREV.br_blockcount - new->br_blockcount;
|
||||
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
|
||||
startblockval(PREV.br_startblock) -
|
||||
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
|
||||
(bma->cur ? bma->cur->bc_ino.allocated : 0));
|
||||
|
||||
PREV.br_startoff = new_endoff;
|
||||
PREV.br_blockcount = temp;
|
||||
|
@ -1904,7 +1902,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
temp = PREV.br_blockcount - new->br_blockcount;
|
||||
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
|
||||
startblockval(PREV.br_startblock) -
|
||||
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
|
||||
(bma->cur ? bma->cur->bc_ino.allocated : 0));
|
||||
|
||||
PREV.br_startblock = nullstartblock(da_new);
|
||||
PREV.br_blockcount = temp;
|
||||
|
@ -2025,8 +2023,8 @@ xfs_bmap_add_extent_delay_real(
|
|||
xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
|
||||
|
||||
if (bma->cur) {
|
||||
da_new += bma->cur->bc_private.b.allocated;
|
||||
bma->cur->bc_private.b.allocated = 0;
|
||||
da_new += bma->cur->bc_ino.allocated;
|
||||
bma->cur->bc_ino.allocated = 0;
|
||||
}
|
||||
|
||||
/* adjust for changes in reserved delayed indirect blocks */
|
||||
|
@ -2573,7 +2571,7 @@ xfs_bmap_add_extent_unwritten_real(
|
|||
|
||||
/* clear out the allocated field, done with it now in any case. */
|
||||
if (cur) {
|
||||
cur->bc_private.b.allocated = 0;
|
||||
cur->bc_ino.allocated = 0;
|
||||
*curp = cur;
|
||||
}
|
||||
|
||||
|
@ -2752,7 +2750,7 @@ xfs_bmap_add_extent_hole_real(
|
|||
struct xfs_bmbt_irec old;
|
||||
|
||||
ASSERT(!isnullstartblock(new->br_startblock));
|
||||
ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
|
||||
ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
|
||||
|
||||
XFS_STATS_INC(mp, xs_add_exlist);
|
||||
|
||||
|
@ -2955,7 +2953,7 @@ xfs_bmap_add_extent_hole_real(
|
|||
|
||||
/* clear out the allocated field, done with it now in any case. */
|
||||
if (cur)
|
||||
cur->bc_private.b.allocated = 0;
|
||||
cur->bc_ino.allocated = 0;
|
||||
|
||||
xfs_bmap_check_leaf_extents(cur, ip, whichfork);
|
||||
done:
|
||||
|
@ -4187,8 +4185,8 @@ xfs_bmapi_allocate(
|
|||
bma->nallocs++;
|
||||
|
||||
if (bma->cur)
|
||||
bma->cur->bc_private.b.flags =
|
||||
bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
|
||||
bma->cur->bc_ino.flags =
|
||||
bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
|
||||
|
||||
bma->got.br_startoff = bma->offset;
|
||||
bma->got.br_startblock = bma->blkno;
|
||||
|
@ -4709,7 +4707,7 @@ xfs_bmapi_remap(
|
|||
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_private.b.flags = 0;
|
||||
cur->bc_ino.flags = 0;
|
||||
}
|
||||
|
||||
got.br_startoff = bno;
|
||||
|
@ -5364,7 +5362,7 @@ __xfs_bunmapi(
|
|||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_private.b.flags = 0;
|
||||
cur->bc_ino.flags = 0;
|
||||
} else
|
||||
cur = NULL;
|
||||
|
||||
|
@ -5620,7 +5618,7 @@ __xfs_bunmapi(
|
|||
xfs_trans_log_inode(tp, ip, logflags);
|
||||
if (cur) {
|
||||
if (!error)
|
||||
cur->bc_private.b.allocated = 0;
|
||||
cur->bc_ino.allocated = 0;
|
||||
xfs_btree_del_cursor(cur, error);
|
||||
}
|
||||
return error;
|
||||
|
@ -5839,7 +5837,7 @@ xfs_bmap_collapse_extents(
|
|||
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_private.b.flags = 0;
|
||||
cur->bc_ino.flags = 0;
|
||||
}
|
||||
|
||||
if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
|
||||
|
@ -5956,7 +5954,7 @@ xfs_bmap_insert_extents(
|
|||
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_private.b.flags = 0;
|
||||
cur->bc_ino.flags = 0;
|
||||
}
|
||||
|
||||
if (*next_fsb == NULLFSBLOCK) {
|
||||
|
@ -6025,8 +6023,8 @@ xfs_bmap_insert_extents(
|
|||
* @split_fsb is a block where the extents is split. If split_fsb lies in a
|
||||
* hole or the first block of extents, just return 0.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_bmap_split_extent_at(
|
||||
int
|
||||
xfs_bmap_split_extent(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
xfs_fileoff_t split_fsb)
|
||||
|
@ -6074,7 +6072,7 @@ xfs_bmap_split_extent_at(
|
|||
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_private.b.flags = 0;
|
||||
cur->bc_ino.flags = 0;
|
||||
error = xfs_bmbt_lookup_eq(cur, &got, &i);
|
||||
if (error)
|
||||
goto del_cursor;
|
||||
|
@ -6133,7 +6131,7 @@ xfs_bmap_split_extent_at(
|
|||
|
||||
del_cursor:
|
||||
if (cur) {
|
||||
cur->bc_private.b.allocated = 0;
|
||||
cur->bc_ino.allocated = 0;
|
||||
xfs_btree_del_cursor(cur, error);
|
||||
}
|
||||
|
||||
|
@ -6142,34 +6140,6 @@ xfs_bmap_split_extent_at(
|
|||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_bmap_split_extent(
|
||||
struct xfs_inode *ip,
|
||||
xfs_fileoff_t split_fsb)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_trans *tp;
|
||||
int error;
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
|
||||
XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
||||
|
||||
error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
return xfs_trans_commit(tp);
|
||||
|
||||
out:
|
||||
xfs_trans_cancel(tp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Deferred mapping is only for real extents in the data fork. */
|
||||
static bool
|
||||
xfs_bmap_is_update_needed(
|
||||
|
|
|
@ -222,7 +222,8 @@ int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
|
|||
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
|
||||
bool *done, xfs_fileoff_t stop_fsb);
|
||||
int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
|
||||
int xfs_bmap_split_extent(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
xfs_fileoff_t split_offset);
|
||||
int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
|
||||
xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
|
||||
struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
|
||||
|
|
|
@ -166,13 +166,13 @@ xfs_bmbt_dup_cursor(
|
|||
struct xfs_btree_cur *new;
|
||||
|
||||
new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
|
||||
cur->bc_private.b.ip, cur->bc_private.b.whichfork);
|
||||
cur->bc_ino.ip, cur->bc_ino.whichfork);
|
||||
|
||||
/*
|
||||
* Copy the firstblock, dfops, and flags values,
|
||||
* since init cursor doesn't get them.
|
||||
*/
|
||||
new->bc_private.b.flags = cur->bc_private.b.flags;
|
||||
new->bc_ino.flags = cur->bc_ino.flags;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
@ -183,12 +183,12 @@ xfs_bmbt_update_cursor(
|
|||
struct xfs_btree_cur *dst)
|
||||
{
|
||||
ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
|
||||
(dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
|
||||
(dst->bc_ino.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
|
||||
|
||||
dst->bc_private.b.allocated += src->bc_private.b.allocated;
|
||||
dst->bc_ino.allocated += src->bc_ino.allocated;
|
||||
dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
|
||||
|
||||
src->bc_private.b.allocated = 0;
|
||||
src->bc_ino.allocated = 0;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
|
@ -205,8 +205,8 @@ xfs_bmbt_alloc_block(
|
|||
args.tp = cur->bc_tp;
|
||||
args.mp = cur->bc_mp;
|
||||
args.fsbno = cur->bc_tp->t_firstblock;
|
||||
xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
|
||||
cur->bc_private.b.whichfork);
|
||||
xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino,
|
||||
cur->bc_ino.whichfork);
|
||||
|
||||
if (args.fsbno == NULLFSBLOCK) {
|
||||
args.fsbno = be64_to_cpu(start->l);
|
||||
|
@ -230,7 +230,7 @@ xfs_bmbt_alloc_block(
|
|||
}
|
||||
|
||||
args.minlen = args.maxlen = args.prod = 1;
|
||||
args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
|
||||
args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL;
|
||||
if (!args.wasdel && args.tp->t_blk_res == 0) {
|
||||
error = -ENOSPC;
|
||||
goto error0;
|
||||
|
@ -259,10 +259,10 @@ xfs_bmbt_alloc_block(
|
|||
|
||||
ASSERT(args.len == 1);
|
||||
cur->bc_tp->t_firstblock = args.fsbno;
|
||||
cur->bc_private.b.allocated++;
|
||||
cur->bc_private.b.ip->i_d.di_nblocks++;
|
||||
xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
|
||||
xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
|
||||
cur->bc_ino.allocated++;
|
||||
cur->bc_ino.ip->i_d.di_nblocks++;
|
||||
xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE);
|
||||
xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip,
|
||||
XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
|
||||
new->l = cpu_to_be64(args.fsbno);
|
||||
|
@ -280,12 +280,12 @@ xfs_bmbt_free_block(
|
|||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_mount *mp = cur->bc_mp;
|
||||
struct xfs_inode *ip = cur->bc_private.b.ip;
|
||||
struct xfs_inode *ip = cur->bc_ino.ip;
|
||||
struct xfs_trans *tp = cur->bc_tp;
|
||||
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
|
||||
struct xfs_owner_info oinfo;
|
||||
|
||||
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
|
||||
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
|
||||
xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
|
||||
ip->i_d.di_nblocks--;
|
||||
|
||||
|
@ -302,8 +302,8 @@ xfs_bmbt_get_minrecs(
|
|||
if (level == cur->bc_nlevels - 1) {
|
||||
struct xfs_ifork *ifp;
|
||||
|
||||
ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
|
||||
cur->bc_private.b.whichfork);
|
||||
ifp = XFS_IFORK_PTR(cur->bc_ino.ip,
|
||||
cur->bc_ino.whichfork);
|
||||
|
||||
return xfs_bmbt_maxrecs(cur->bc_mp,
|
||||
ifp->if_broot_bytes, level == 0) / 2;
|
||||
|
@ -320,8 +320,8 @@ xfs_bmbt_get_maxrecs(
|
|||
if (level == cur->bc_nlevels - 1) {
|
||||
struct xfs_ifork *ifp;
|
||||
|
||||
ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
|
||||
cur->bc_private.b.whichfork);
|
||||
ifp = XFS_IFORK_PTR(cur->bc_ino.ip,
|
||||
cur->bc_ino.whichfork);
|
||||
|
||||
return xfs_bmbt_maxrecs(cur->bc_mp,
|
||||
ifp->if_broot_bytes, level == 0);
|
||||
|
@ -347,7 +347,7 @@ xfs_bmbt_get_dmaxrecs(
|
|||
{
|
||||
if (level != cur->bc_nlevels - 1)
|
||||
return cur->bc_mp->m_bmap_dmxr[level != 0];
|
||||
return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
|
||||
return xfs_bmdr_maxrecs(cur->bc_ino.forksize, level == 0);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
|
@ -566,11 +566,11 @@ xfs_bmbt_init_cursor(
|
|||
if (xfs_sb_version_hascrc(&mp->m_sb))
|
||||
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
|
||||
|
||||
cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
|
||||
cur->bc_private.b.ip = ip;
|
||||
cur->bc_private.b.allocated = 0;
|
||||
cur->bc_private.b.flags = 0;
|
||||
cur->bc_private.b.whichfork = whichfork;
|
||||
cur->bc_ino.forksize = XFS_IFORK_SIZE(ip, whichfork);
|
||||
cur->bc_ino.ip = ip;
|
||||
cur->bc_ino.allocated = 0;
|
||||
cur->bc_ino.flags = 0;
|
||||
cur->bc_ino.whichfork = whichfork;
|
||||
|
||||
return cur;
|
||||
}
|
||||
|
@ -644,7 +644,7 @@ xfs_bmbt_change_owner(
|
|||
cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
|
||||
if (!cur)
|
||||
return -ENOMEM;
|
||||
cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
|
||||
cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER;
|
||||
|
||||
error = xfs_btree_change_owner(cur, new_owner, buffer_list);
|
||||
xfs_btree_del_cursor(cur, error);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "xfs_trace.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_log.h"
|
||||
#include "xfs_btree_staging.h"
|
||||
|
||||
/*
|
||||
* Cursor allocation zone.
|
||||
|
@ -214,7 +215,7 @@ xfs_btree_check_sptr(
|
|||
{
|
||||
if (level <= 0)
|
||||
return false;
|
||||
return xfs_verify_agbno(cur->bc_mp, cur->bc_private.a.agno, agbno);
|
||||
return xfs_verify_agbno(cur->bc_mp, cur->bc_ag.agno, agbno);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -234,8 +235,8 @@ xfs_btree_check_ptr(
|
|||
return 0;
|
||||
xfs_err(cur->bc_mp,
|
||||
"Inode %llu fork %d: Corrupt btree %d pointer at level %d index %d.",
|
||||
cur->bc_private.b.ip->i_ino,
|
||||
cur->bc_private.b.whichfork, cur->bc_btnum,
|
||||
cur->bc_ino.ip->i_ino,
|
||||
cur->bc_ino.whichfork, cur->bc_btnum,
|
||||
level, index);
|
||||
} else {
|
||||
if (xfs_btree_check_sptr(cur, be32_to_cpu((&ptr->s)[index]),
|
||||
|
@ -243,7 +244,7 @@ xfs_btree_check_ptr(
|
|||
return 0;
|
||||
xfs_err(cur->bc_mp,
|
||||
"AG %u: Corrupt btree %d pointer at level %d index %d.",
|
||||
cur->bc_private.a.agno, cur->bc_btnum,
|
||||
cur->bc_ag.agno, cur->bc_btnum,
|
||||
level, index);
|
||||
}
|
||||
|
||||
|
@ -378,10 +379,12 @@ xfs_btree_del_cursor(
|
|||
* allocated indirect blocks' accounting.
|
||||
*/
|
||||
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
|
||||
cur->bc_private.b.allocated == 0);
|
||||
cur->bc_ino.allocated == 0);
|
||||
/*
|
||||
* Free the cursor.
|
||||
*/
|
||||
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
|
||||
kmem_free((void *)cur->bc_ops);
|
||||
kmem_cache_free(xfs_btree_cur_zone, cur);
|
||||
}
|
||||
|
||||
|
@ -642,6 +645,17 @@ xfs_btree_ptr_addr(
|
|||
((char *)block + xfs_btree_ptr_offset(cur, n, level));
|
||||
}
|
||||
|
||||
struct xfs_ifork *
|
||||
xfs_btree_ifork_ptr(
|
||||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
|
||||
|
||||
if (cur->bc_flags & XFS_BTREE_STAGING)
|
||||
return cur->bc_ino.ifake->if_fork;
|
||||
return XFS_IFORK_PTR(cur->bc_ino.ip, cur->bc_ino.whichfork);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the root block which is stored in the inode.
|
||||
*
|
||||
|
@ -652,9 +666,8 @@ STATIC struct xfs_btree_block *
|
|||
xfs_btree_get_iroot(
|
||||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
struct xfs_ifork *ifp;
|
||||
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
|
||||
|
||||
ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
|
||||
return (struct xfs_btree_block *)ifp->if_broot;
|
||||
}
|
||||
|
||||
|
@ -881,13 +894,13 @@ xfs_btree_readahead_sblock(
|
|||
|
||||
|
||||
if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) {
|
||||
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
|
||||
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.agno,
|
||||
left, 1, cur->bc_ops->buf_ops);
|
||||
rval++;
|
||||
}
|
||||
|
||||
if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) {
|
||||
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno,
|
||||
xfs_btree_reada_bufs(cur->bc_mp, cur->bc_ag.agno,
|
||||
right, 1, cur->bc_ops->buf_ops);
|
||||
rval++;
|
||||
}
|
||||
|
@ -945,7 +958,7 @@ xfs_btree_ptr_to_daddr(
|
|||
*daddr = XFS_FSB_TO_DADDR(cur->bc_mp, fsbno);
|
||||
} else {
|
||||
agbno = be32_to_cpu(ptr->s);
|
||||
*daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
|
||||
*daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_ag.agno,
|
||||
agbno);
|
||||
}
|
||||
|
||||
|
@ -1014,7 +1027,7 @@ xfs_btree_ptr_is_null(
|
|||
return ptr->s == cpu_to_be32(NULLAGBLOCK);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
void
|
||||
xfs_btree_set_ptr_null(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr)
|
||||
|
@ -1050,7 +1063,7 @@ xfs_btree_get_sibling(
|
|||
}
|
||||
}
|
||||
|
||||
STATIC void
|
||||
void
|
||||
xfs_btree_set_sibling(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_block *block,
|
||||
|
@ -1128,7 +1141,7 @@ xfs_btree_init_block(
|
|||
btnum, level, numrecs, owner, 0);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
void
|
||||
xfs_btree_init_block_cur(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_buf *bp,
|
||||
|
@ -1144,9 +1157,9 @@ xfs_btree_init_block_cur(
|
|||
* code.
|
||||
*/
|
||||
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
|
||||
owner = cur->bc_private.b.ip->i_ino;
|
||||
owner = cur->bc_ino.ip->i_ino;
|
||||
else
|
||||
owner = cur->bc_private.a.agno;
|
||||
owner = cur->bc_ag.agno;
|
||||
|
||||
xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn,
|
||||
cur->bc_btnum, level, numrecs,
|
||||
|
@ -1220,7 +1233,7 @@ xfs_btree_set_refs(
|
|||
}
|
||||
}
|
||||
|
||||
STATIC int
|
||||
int
|
||||
xfs_btree_get_buf_block(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr,
|
||||
|
@ -1280,7 +1293,7 @@ xfs_btree_read_buf_block(
|
|||
/*
|
||||
* Copy keys from one btree block to another.
|
||||
*/
|
||||
STATIC void
|
||||
void
|
||||
xfs_btree_copy_keys(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *dst_key,
|
||||
|
@ -1308,11 +1321,11 @@ xfs_btree_copy_recs(
|
|||
/*
|
||||
* Copy block pointers from one btree block to another.
|
||||
*/
|
||||
STATIC void
|
||||
void
|
||||
xfs_btree_copy_ptrs(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *dst_ptr,
|
||||
union xfs_btree_ptr *src_ptr,
|
||||
const union xfs_btree_ptr *src_ptr,
|
||||
int numptrs)
|
||||
{
|
||||
ASSERT(numptrs >= 0);
|
||||
|
@ -1393,8 +1406,8 @@ xfs_btree_log_keys(
|
|||
xfs_btree_key_offset(cur, first),
|
||||
xfs_btree_key_offset(cur, last + 1) - 1);
|
||||
} else {
|
||||
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
|
||||
xfs_ilog_fbroot(cur->bc_private.b.whichfork));
|
||||
xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
|
||||
xfs_ilog_fbroot(cur->bc_ino.whichfork));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1436,8 +1449,8 @@ xfs_btree_log_ptrs(
|
|||
xfs_btree_ptr_offset(cur, first, level),
|
||||
xfs_btree_ptr_offset(cur, last + 1, level) - 1);
|
||||
} else {
|
||||
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
|
||||
xfs_ilog_fbroot(cur->bc_private.b.whichfork));
|
||||
xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
|
||||
xfs_ilog_fbroot(cur->bc_ino.whichfork));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1505,8 +1518,8 @@ xfs_btree_log_block(
|
|||
xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
|
||||
xfs_trans_log_buf(cur->bc_tp, bp, first, last);
|
||||
} else {
|
||||
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
|
||||
xfs_ilog_fbroot(cur->bc_private.b.whichfork));
|
||||
xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
|
||||
xfs_ilog_fbroot(cur->bc_ino.whichfork));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1743,10 +1756,10 @@ xfs_btree_lookup_get_block(
|
|||
|
||||
/* Check the inode owner since the verifiers don't. */
|
||||
if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) &&
|
||||
!(cur->bc_private.b.flags & XFS_BTCUR_BPRV_INVALID_OWNER) &&
|
||||
!(cur->bc_ino.flags & XFS_BTCUR_BMBT_INVALID_OWNER) &&
|
||||
(cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
|
||||
be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
|
||||
cur->bc_private.b.ip->i_ino)
|
||||
cur->bc_ino.ip->i_ino)
|
||||
goto out_bad;
|
||||
|
||||
/* Did we get the level we were looking for? */
|
||||
|
@ -1762,7 +1775,7 @@ xfs_btree_lookup_get_block(
|
|||
|
||||
out_bad:
|
||||
*blkp = NULL;
|
||||
xfs_buf_corruption_error(bp);
|
||||
xfs_buf_mark_corrupt(bp);
|
||||
xfs_trans_brelse(cur->bc_tp, bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
@ -2938,9 +2951,9 @@ xfs_btree_new_iroot(
|
|||
|
||||
xfs_btree_copy_ptrs(cur, pp, &nptr, 1);
|
||||
|
||||
xfs_iroot_realloc(cur->bc_private.b.ip,
|
||||
xfs_iroot_realloc(cur->bc_ino.ip,
|
||||
1 - xfs_btree_get_numrecs(cblock),
|
||||
cur->bc_private.b.whichfork);
|
||||
cur->bc_ino.whichfork);
|
||||
|
||||
xfs_btree_setbuf(cur, level, cbp);
|
||||
|
||||
|
@ -2953,7 +2966,7 @@ xfs_btree_new_iroot(
|
|||
xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
|
||||
|
||||
*logflags |=
|
||||
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
|
||||
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork);
|
||||
*stat = 1;
|
||||
return 0;
|
||||
error0:
|
||||
|
@ -3105,11 +3118,11 @@ xfs_btree_make_block_unfull(
|
|||
|
||||
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
|
||||
level == cur->bc_nlevels - 1) {
|
||||
struct xfs_inode *ip = cur->bc_private.b.ip;
|
||||
struct xfs_inode *ip = cur->bc_ino.ip;
|
||||
|
||||
if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
|
||||
/* A root block that can be made bigger. */
|
||||
xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
|
||||
xfs_iroot_realloc(ip, 1, cur->bc_ino.whichfork);
|
||||
*stat = 1;
|
||||
} else {
|
||||
/* A root block that needs replacing */
|
||||
|
@ -3455,8 +3468,8 @@ STATIC int
|
|||
xfs_btree_kill_iroot(
|
||||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
int whichfork = cur->bc_private.b.whichfork;
|
||||
struct xfs_inode *ip = cur->bc_private.b.ip;
|
||||
int whichfork = cur->bc_ino.whichfork;
|
||||
struct xfs_inode *ip = cur->bc_ino.ip;
|
||||
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
||||
struct xfs_btree_block *block;
|
||||
struct xfs_btree_block *cblock;
|
||||
|
@ -3514,8 +3527,8 @@ xfs_btree_kill_iroot(
|
|||
|
||||
index = numrecs - cur->bc_ops->get_maxrecs(cur, level);
|
||||
if (index) {
|
||||
xfs_iroot_realloc(cur->bc_private.b.ip, index,
|
||||
cur->bc_private.b.whichfork);
|
||||
xfs_iroot_realloc(cur->bc_ino.ip, index,
|
||||
cur->bc_ino.whichfork);
|
||||
block = ifp->if_broot;
|
||||
}
|
||||
|
||||
|
@ -3544,7 +3557,7 @@ xfs_btree_kill_iroot(
|
|||
cur->bc_bufs[level - 1] = NULL;
|
||||
be16_add_cpu(&block->bb_level, -1);
|
||||
xfs_trans_log_inode(cur->bc_tp, ip,
|
||||
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
|
||||
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork));
|
||||
cur->bc_nlevels--;
|
||||
out0:
|
||||
return 0;
|
||||
|
@ -3712,8 +3725,8 @@ xfs_btree_delrec(
|
|||
*/
|
||||
if (level == cur->bc_nlevels - 1) {
|
||||
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
|
||||
xfs_iroot_realloc(cur->bc_private.b.ip, -1,
|
||||
cur->bc_private.b.whichfork);
|
||||
xfs_iroot_realloc(cur->bc_ino.ip, -1,
|
||||
cur->bc_ino.whichfork);
|
||||
|
||||
error = xfs_btree_kill_iroot(cur);
|
||||
if (error)
|
||||
|
|
|
@ -10,6 +10,7 @@ struct xfs_buf;
|
|||
struct xfs_inode;
|
||||
struct xfs_mount;
|
||||
struct xfs_trans;
|
||||
struct xfs_ifork;
|
||||
|
||||
extern kmem_zone_t *xfs_btree_cur_zone;
|
||||
|
||||
|
@ -177,15 +178,37 @@ union xfs_btree_irec {
|
|||
struct xfs_refcount_irec rc;
|
||||
};
|
||||
|
||||
/* Per-AG btree private information. */
|
||||
union xfs_btree_cur_private {
|
||||
struct {
|
||||
unsigned long nr_ops; /* # record updates */
|
||||
int shape_changes; /* # of extent splits */
|
||||
} refc;
|
||||
struct {
|
||||
bool active; /* allocation cursor state */
|
||||
} abt;
|
||||
/* Per-AG btree information. */
|
||||
struct xfs_btree_cur_ag {
|
||||
union {
|
||||
struct xfs_buf *agbp;
|
||||
struct xbtree_afakeroot *afake; /* for staging cursor */
|
||||
};
|
||||
xfs_agnumber_t agno;
|
||||
union {
|
||||
struct {
|
||||
unsigned long nr_ops; /* # record updates */
|
||||
int shape_changes; /* # of extent splits */
|
||||
} refc;
|
||||
struct {
|
||||
bool active; /* allocation cursor state */
|
||||
} abt;
|
||||
};
|
||||
};
|
||||
|
||||
/* Btree-in-inode cursor information */
|
||||
struct xfs_btree_cur_ino {
|
||||
struct xfs_inode *ip;
|
||||
struct xbtree_ifakeroot *ifake; /* for staging cursor */
|
||||
int allocated;
|
||||
short forksize;
|
||||
char whichfork;
|
||||
char flags;
|
||||
/* We are converting a delalloc reservation */
|
||||
#define XFS_BTCUR_BMBT_WASDEL (1 << 0)
|
||||
|
||||
/* For extent swap, ignore owner check in verifier */
|
||||
#define XFS_BTCUR_BMBT_INVALID_OWNER (1 << 1)
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -209,21 +232,9 @@ typedef struct xfs_btree_cur
|
|||
xfs_btnum_t bc_btnum; /* identifies which btree type */
|
||||
int bc_statoff; /* offset of btre stats array */
|
||||
union {
|
||||
struct { /* needed for BNO, CNT, INO */
|
||||
struct xfs_buf *agbp; /* agf/agi buffer pointer */
|
||||
xfs_agnumber_t agno; /* ag number */
|
||||
union xfs_btree_cur_private priv;
|
||||
} a;
|
||||
struct { /* needed for BMAP */
|
||||
struct xfs_inode *ip; /* pointer to our inode */
|
||||
int allocated; /* count of alloced */
|
||||
short forksize; /* fork's inode space */
|
||||
char whichfork; /* data or attr fork */
|
||||
char flags; /* flags */
|
||||
#define XFS_BTCUR_BPRV_WASDEL (1<<0) /* was delayed */
|
||||
#define XFS_BTCUR_BPRV_INVALID_OWNER (1<<1) /* for ext swap */
|
||||
} b;
|
||||
} bc_private; /* per-btree type data */
|
||||
struct xfs_btree_cur_ag bc_ag;
|
||||
struct xfs_btree_cur_ino bc_ino;
|
||||
};
|
||||
} xfs_btree_cur_t;
|
||||
|
||||
/* cursor flags */
|
||||
|
@ -232,6 +243,12 @@ typedef struct xfs_btree_cur
|
|||
#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
|
||||
#define XFS_BTREE_CRC_BLOCKS (1<<3) /* uses extended btree blocks */
|
||||
#define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */
|
||||
/*
|
||||
* The root of this btree is a fakeroot structure so that we can stage a btree
|
||||
* rebuild without leaving it accessible via primary metadata. The ops struct
|
||||
* is dynamically allocated and must be freed when the cursor is deleted.
|
||||
*/
|
||||
#define XFS_BTREE_STAGING (1<<5)
|
||||
|
||||
|
||||
#define XFS_BTREE_NOERROR 0
|
||||
|
@ -494,6 +511,7 @@ union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
|
|||
int xfs_btree_has_record(struct xfs_btree_cur *cur, union xfs_btree_irec *low,
|
||||
union xfs_btree_irec *high, bool *exists);
|
||||
bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
|
||||
struct xfs_ifork *xfs_btree_ifork_ptr(struct xfs_btree_cur *cur);
|
||||
|
||||
/* Does this cursor point to the last block in the given level? */
|
||||
static inline bool
|
||||
|
@ -512,4 +530,20 @@ xfs_btree_islastblock(
|
|||
return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
|
||||
}
|
||||
|
||||
void xfs_btree_set_ptr_null(struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr);
|
||||
int xfs_btree_get_buf_block(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr,
|
||||
struct xfs_btree_block **block, struct xfs_buf **bpp);
|
||||
void xfs_btree_set_sibling(struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_block *block, union xfs_btree_ptr *ptr,
|
||||
int lr);
|
||||
void xfs_btree_init_block_cur(struct xfs_btree_cur *cur,
|
||||
struct xfs_buf *bp, int level, int numrecs);
|
||||
void xfs_btree_copy_ptrs(struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *dst_ptr,
|
||||
const union xfs_btree_ptr *src_ptr, int numptrs);
|
||||
void xfs_btree_copy_keys(struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *dst_key, union xfs_btree_key *src_key,
|
||||
int numkeys);
|
||||
|
||||
#endif /* __XFS_BTREE_H__ */
|
||||
|
|
|
@ -0,0 +1,879 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (C) 2020 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <darrick.wong@oracle.com>
|
||||
*/
|
||||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_bit.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_trans.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_trace.h"
|
||||
#include "xfs_btree_staging.h"
|
||||
|
||||
/*
|
||||
* Staging Cursors and Fake Roots for Btrees
|
||||
* =========================================
|
||||
*
|
||||
* A staging btree cursor is a special type of btree cursor that callers must
|
||||
* use to construct a new btree index using the btree bulk loader code. The
|
||||
* bulk loading code uses the staging btree cursor to abstract the details of
|
||||
* initializing new btree blocks and filling them with records or key/ptr
|
||||
* pairs. Regular btree operations (e.g. queries and modifications) are not
|
||||
* supported with staging cursors, and callers must not invoke them.
|
||||
*
|
||||
* Fake root structures contain all the information about a btree that is under
|
||||
* construction by the bulk loading code. Staging btree cursors point to fake
|
||||
* root structures instead of the usual AG header or inode structure.
|
||||
*
|
||||
* Callers are expected to initialize a fake root structure and pass it into
|
||||
* the _stage_cursor function for a specific btree type. When bulk loading is
|
||||
* complete, callers should call the _commit_staged_btree function for that
|
||||
* specific btree type to commit the new btree into the filesystem.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Don't allow staging cursors to be duplicated because they're supposed to be
|
||||
* kept private to a single thread.
|
||||
*/
|
||||
STATIC struct xfs_btree_cur *
|
||||
xfs_btree_fakeroot_dup_cursor(
|
||||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
ASSERT(0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't allow block allocation for a staging cursor, because staging cursors
|
||||
* do not support regular btree modifications.
|
||||
*
|
||||
* Bulk loading uses a separate callback to obtain new blocks from a
|
||||
* preallocated list, which prevents ENOSPC failures during loading.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_btree_fakeroot_alloc_block(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *start_bno,
|
||||
union xfs_btree_ptr *new_bno,
|
||||
int *stat)
|
||||
{
|
||||
ASSERT(0);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't allow block freeing for a staging cursor, because staging cursors
|
||||
* do not support regular btree modifications.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_btree_fakeroot_free_block(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
ASSERT(0);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/* Initialize a pointer to the root block from the fakeroot. */
|
||||
STATIC void
|
||||
xfs_btree_fakeroot_init_ptr_from_cur(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr)
|
||||
{
|
||||
struct xbtree_afakeroot *afake;
|
||||
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
|
||||
afake = cur->bc_ag.afake;
|
||||
ptr->s = cpu_to_be32(afake->af_root);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bulk Loading for AG Btrees
|
||||
* ==========================
|
||||
*
|
||||
* For a btree rooted in an AG header, pass a xbtree_afakeroot structure to the
|
||||
* staging cursor. Callers should initialize this to zero.
|
||||
*
|
||||
* The _stage_cursor() function for a specific btree type should call
|
||||
* xfs_btree_stage_afakeroot to set up the in-memory cursor as a staging
|
||||
* cursor. The corresponding _commit_staged_btree() function should log the
|
||||
* new root and call xfs_btree_commit_afakeroot() to transform the staging
|
||||
* cursor into a regular btree cursor.
|
||||
*/
|
||||
|
||||
/* Update the btree root information for a per-AG fake root. */
|
||||
STATIC void
|
||||
xfs_btree_afakeroot_set_root(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr,
|
||||
int inc)
|
||||
{
|
||||
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
|
||||
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
afake->af_root = be32_to_cpu(ptr->s);
|
||||
afake->af_levels += inc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize a AG-rooted btree cursor with the given AG btree fake root.
|
||||
* The btree cursor's bc_ops will be overridden as needed to make the staging
|
||||
* functionality work.
|
||||
*/
|
||||
void
|
||||
xfs_btree_stage_afakeroot(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xbtree_afakeroot *afake)
|
||||
{
|
||||
struct xfs_btree_ops *nops;
|
||||
|
||||
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
|
||||
ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE));
|
||||
ASSERT(cur->bc_tp == NULL);
|
||||
|
||||
nops = kmem_alloc(sizeof(struct xfs_btree_ops), KM_NOFS);
|
||||
memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops));
|
||||
nops->alloc_block = xfs_btree_fakeroot_alloc_block;
|
||||
nops->free_block = xfs_btree_fakeroot_free_block;
|
||||
nops->init_ptr_from_cur = xfs_btree_fakeroot_init_ptr_from_cur;
|
||||
nops->set_root = xfs_btree_afakeroot_set_root;
|
||||
nops->dup_cursor = xfs_btree_fakeroot_dup_cursor;
|
||||
|
||||
cur->bc_ag.afake = afake;
|
||||
cur->bc_nlevels = afake->af_levels;
|
||||
cur->bc_ops = nops;
|
||||
cur->bc_flags |= XFS_BTREE_STAGING;
|
||||
}
|
||||
|
||||
/*
|
||||
* Transform an AG-rooted staging btree cursor back into a regular cursor by
|
||||
* substituting a real btree root for the fake one and restoring normal btree
|
||||
* cursor ops. The caller must log the btree root change prior to calling
|
||||
* this.
|
||||
*/
|
||||
void
|
||||
xfs_btree_commit_afakeroot(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *agbp,
|
||||
const struct xfs_btree_ops *ops)
|
||||
{
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
ASSERT(cur->bc_tp == NULL);
|
||||
|
||||
trace_xfs_btree_commit_afakeroot(cur);
|
||||
|
||||
kmem_free((void *)cur->bc_ops);
|
||||
cur->bc_ag.agbp = agbp;
|
||||
cur->bc_ops = ops;
|
||||
cur->bc_flags &= ~XFS_BTREE_STAGING;
|
||||
cur->bc_tp = tp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bulk Loading for Inode-Rooted Btrees
|
||||
* ====================================
|
||||
*
|
||||
* For a btree rooted in an inode fork, pass a xbtree_ifakeroot structure to
|
||||
* the staging cursor. This structure should be initialized as follows:
|
||||
*
|
||||
* - if_fork_size field should be set to the number of bytes available to the
|
||||
* fork in the inode.
|
||||
*
|
||||
* - if_fork should point to a freshly allocated struct xfs_ifork.
|
||||
*
|
||||
* - if_format should be set to the appropriate fork type (e.g.
|
||||
* XFS_DINODE_FMT_BTREE).
|
||||
*
|
||||
* All other fields must be zero.
|
||||
*
|
||||
* The _stage_cursor() function for a specific btree type should call
|
||||
* xfs_btree_stage_ifakeroot to set up the in-memory cursor as a staging
|
||||
* cursor. The corresponding _commit_staged_btree() function should log the
|
||||
* new root and call xfs_btree_commit_ifakeroot() to transform the staging
|
||||
* cursor into a regular btree cursor.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Initialize an inode-rooted btree cursor with the given inode btree fake
|
||||
* root. The btree cursor's bc_ops will be overridden as needed to make the
|
||||
* staging functionality work. If new_ops is not NULL, these new ops will be
|
||||
* passed out to the caller for further overriding.
|
||||
*/
|
||||
void
|
||||
xfs_btree_stage_ifakeroot(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xbtree_ifakeroot *ifake,
|
||||
struct xfs_btree_ops **new_ops)
|
||||
{
|
||||
struct xfs_btree_ops *nops;
|
||||
|
||||
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
|
||||
ASSERT(cur->bc_tp == NULL);
|
||||
|
||||
nops = kmem_alloc(sizeof(struct xfs_btree_ops), KM_NOFS);
|
||||
memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops));
|
||||
nops->alloc_block = xfs_btree_fakeroot_alloc_block;
|
||||
nops->free_block = xfs_btree_fakeroot_free_block;
|
||||
nops->init_ptr_from_cur = xfs_btree_fakeroot_init_ptr_from_cur;
|
||||
nops->dup_cursor = xfs_btree_fakeroot_dup_cursor;
|
||||
|
||||
cur->bc_ino.ifake = ifake;
|
||||
cur->bc_nlevels = ifake->if_levels;
|
||||
cur->bc_ops = nops;
|
||||
cur->bc_flags |= XFS_BTREE_STAGING;
|
||||
|
||||
if (new_ops)
|
||||
*new_ops = nops;
|
||||
}
|
||||
|
||||
/*
|
||||
* Transform an inode-rooted staging btree cursor back into a regular cursor by
|
||||
* substituting a real btree root for the fake one and restoring normal btree
|
||||
* cursor ops. The caller must log the btree root change prior to calling
|
||||
* this.
|
||||
*/
|
||||
void
|
||||
xfs_btree_commit_ifakeroot(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp,
|
||||
int whichfork,
|
||||
const struct xfs_btree_ops *ops)
|
||||
{
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
ASSERT(cur->bc_tp == NULL);
|
||||
|
||||
trace_xfs_btree_commit_ifakeroot(cur);
|
||||
|
||||
kmem_free((void *)cur->bc_ops);
|
||||
cur->bc_ino.ifake = NULL;
|
||||
cur->bc_ino.whichfork = whichfork;
|
||||
cur->bc_ops = ops;
|
||||
cur->bc_flags &= ~XFS_BTREE_STAGING;
|
||||
cur->bc_tp = tp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bulk Loading of Staged Btrees
|
||||
* =============================
|
||||
*
|
||||
* This interface is used with a staged btree cursor to create a totally new
|
||||
* btree with a large number of records (i.e. more than what would fit in a
|
||||
* single root block). When the creation is complete, the new root can be
|
||||
* linked atomically into the filesystem by committing the staged cursor.
|
||||
*
|
||||
* Creation of a new btree proceeds roughly as follows:
|
||||
*
|
||||
* The first step is to initialize an appropriate fake btree root structure and
|
||||
* then construct a staged btree cursor. Refer to the block comments about
|
||||
* "Bulk Loading for AG Btrees" and "Bulk Loading for Inode-Rooted Btrees" for
|
||||
* more information about how to do this.
|
||||
*
|
||||
* The second step is to initialize a struct xfs_btree_bload context as
|
||||
* documented in the structure definition.
|
||||
*
|
||||
* The third step is to call xfs_btree_bload_compute_geometry to compute the
|
||||
* height of and the number of blocks needed to construct the btree. See the
|
||||
* section "Computing the Geometry of the New Btree" for details about this
|
||||
* computation.
|
||||
*
|
||||
* In step four, the caller must allocate xfs_btree_bload.nr_blocks blocks and
|
||||
* save them for later use by ->claim_block(). Bulk loading requires all
|
||||
* blocks to be allocated beforehand to avoid ENOSPC failures midway through a
|
||||
* rebuild, and to minimize seek distances of the new btree.
|
||||
*
|
||||
* Step five is to call xfs_btree_bload() to start constructing the btree.
|
||||
*
|
||||
* The final step is to commit the staging btree cursor, which logs the new
|
||||
* btree root and turns the staging cursor into a regular cursor. The caller
|
||||
* is responsible for cleaning up the previous btree blocks, if any.
|
||||
*
|
||||
* Computing the Geometry of the New Btree
|
||||
* =======================================
|
||||
*
|
||||
* The number of items placed in each btree block is computed via the following
|
||||
* algorithm: For leaf levels, the number of items for the level is nr_records
|
||||
* in the bload structure. For node levels, the number of items for the level
|
||||
* is the number of blocks in the next lower level of the tree. For each
|
||||
* level, the desired number of items per block is defined as:
|
||||
*
|
||||
* desired = max(minrecs, maxrecs - slack factor)
|
||||
*
|
||||
* The number of blocks for the level is defined to be:
|
||||
*
|
||||
* blocks = floor(nr_items / desired)
|
||||
*
|
||||
* Note this is rounded down so that the npb calculation below will never fall
|
||||
* below minrecs. The number of items that will actually be loaded into each
|
||||
* btree block is defined as:
|
||||
*
|
||||
* npb = nr_items / blocks
|
||||
*
|
||||
* Some of the leftmost blocks in the level will contain one extra record as
|
||||
* needed to handle uneven division. If the number of records in any block
|
||||
* would exceed maxrecs for that level, blocks is incremented and npb is
|
||||
* recalculated.
|
||||
*
|
||||
* In other words, we compute the number of blocks needed to satisfy a given
|
||||
* loading level, then spread the items as evenly as possible.
|
||||
*
|
||||
* The height and number of fs blocks required to create the btree are computed
|
||||
* and returned via btree_height and nr_blocks.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Put a btree block that we're loading onto the ordered list and release it.
|
||||
* The btree blocks will be written to disk when bulk loading is finished.
|
||||
*/
|
||||
static void
|
||||
xfs_btree_bload_drop_buf(
|
||||
struct list_head *buffers_list,
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
if (*bpp == NULL)
|
||||
return;
|
||||
|
||||
if (!xfs_buf_delwri_queue(*bpp, buffers_list))
|
||||
ASSERT(0);
|
||||
|
||||
xfs_buf_relse(*bpp);
|
||||
*bpp = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and initialize one btree block for bulk loading.
|
||||
*
|
||||
* The new btree block will have its level and numrecs fields set to the values
|
||||
* of the level and nr_this_block parameters, respectively.
|
||||
*
|
||||
* The caller should ensure that ptrp, bpp, and blockp refer to the left
|
||||
* sibling of the new block, if there is any. On exit, ptrp, bpp, and blockp
|
||||
* will all point to the new block.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_btree_bload_prep_block(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_bload *bbl,
|
||||
struct list_head *buffers_list,
|
||||
unsigned int level,
|
||||
unsigned int nr_this_block,
|
||||
union xfs_btree_ptr *ptrp, /* in/out */
|
||||
struct xfs_buf **bpp, /* in/out */
|
||||
struct xfs_btree_block **blockp, /* in/out */
|
||||
void *priv)
|
||||
{
|
||||
union xfs_btree_ptr new_ptr;
|
||||
struct xfs_buf *new_bp;
|
||||
struct xfs_btree_block *new_block;
|
||||
int ret;
|
||||
|
||||
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
|
||||
level == cur->bc_nlevels - 1) {
|
||||
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
|
||||
size_t new_size;
|
||||
|
||||
ASSERT(*bpp == NULL);
|
||||
|
||||
/* Allocate a new incore btree root block. */
|
||||
new_size = bbl->iroot_size(cur, nr_this_block, priv);
|
||||
ifp->if_broot = kmem_zalloc(new_size, 0);
|
||||
ifp->if_broot_bytes = (int)new_size;
|
||||
ifp->if_flags |= XFS_IFBROOT;
|
||||
|
||||
/* Initialize it and send it out. */
|
||||
xfs_btree_init_block_int(cur->bc_mp, ifp->if_broot,
|
||||
XFS_BUF_DADDR_NULL, cur->bc_btnum, level,
|
||||
nr_this_block, cur->bc_ino.ip->i_ino,
|
||||
cur->bc_flags);
|
||||
|
||||
*bpp = NULL;
|
||||
*blockp = ifp->if_broot;
|
||||
xfs_btree_set_ptr_null(cur, ptrp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Claim one of the caller's preallocated blocks. */
|
||||
xfs_btree_set_ptr_null(cur, &new_ptr);
|
||||
ret = bbl->claim_block(cur, &new_ptr, priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ASSERT(!xfs_btree_ptr_is_null(cur, &new_ptr));
|
||||
|
||||
ret = xfs_btree_get_buf_block(cur, &new_ptr, &new_block, &new_bp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The previous block (if any) is the left sibling of the new block,
|
||||
* so set its right sibling pointer to the new block and drop it.
|
||||
*/
|
||||
if (*blockp)
|
||||
xfs_btree_set_sibling(cur, *blockp, &new_ptr, XFS_BB_RIGHTSIB);
|
||||
xfs_btree_bload_drop_buf(buffers_list, bpp);
|
||||
|
||||
/* Initialize the new btree block. */
|
||||
xfs_btree_init_block_cur(cur, new_bp, level, nr_this_block);
|
||||
xfs_btree_set_sibling(cur, new_block, ptrp, XFS_BB_LEFTSIB);
|
||||
|
||||
/* Set the out parameters. */
|
||||
*bpp = new_bp;
|
||||
*blockp = new_block;
|
||||
xfs_btree_copy_ptrs(cur, ptrp, &new_ptr, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Load one leaf block. */
|
||||
STATIC int
|
||||
xfs_btree_bload_leaf(
|
||||
struct xfs_btree_cur *cur,
|
||||
unsigned int recs_this_block,
|
||||
xfs_btree_bload_get_record_fn get_record,
|
||||
struct xfs_btree_block *block,
|
||||
void *priv)
|
||||
{
|
||||
unsigned int j;
|
||||
int ret;
|
||||
|
||||
/* Fill the leaf block with records. */
|
||||
for (j = 1; j <= recs_this_block; j++) {
|
||||
union xfs_btree_rec *block_rec;
|
||||
|
||||
ret = get_record(cur, priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
block_rec = xfs_btree_rec_addr(cur, j, block);
|
||||
cur->bc_ops->init_rec_from_cur(cur, block_rec);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Load one node block with key/ptr pairs.
|
||||
*
|
||||
* child_ptr must point to a block within the next level down in the tree. A
|
||||
* key/ptr entry will be created in the new node block to the block pointed to
|
||||
* by child_ptr. On exit, child_ptr points to the next block on the child
|
||||
* level that needs processing.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_btree_bload_node(
|
||||
struct xfs_btree_cur *cur,
|
||||
unsigned int recs_this_block,
|
||||
union xfs_btree_ptr *child_ptr,
|
||||
struct xfs_btree_block *block)
|
||||
{
|
||||
unsigned int j;
|
||||
int ret;
|
||||
|
||||
/* Fill the node block with keys and pointers. */
|
||||
for (j = 1; j <= recs_this_block; j++) {
|
||||
union xfs_btree_key child_key;
|
||||
union xfs_btree_ptr *block_ptr;
|
||||
union xfs_btree_key *block_key;
|
||||
struct xfs_btree_block *child_block;
|
||||
struct xfs_buf *child_bp;
|
||||
|
||||
ASSERT(!xfs_btree_ptr_is_null(cur, child_ptr));
|
||||
|
||||
ret = xfs_btree_get_buf_block(cur, child_ptr, &child_block,
|
||||
&child_bp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
block_ptr = xfs_btree_ptr_addr(cur, j, block);
|
||||
xfs_btree_copy_ptrs(cur, block_ptr, child_ptr, 1);
|
||||
|
||||
block_key = xfs_btree_key_addr(cur, j, block);
|
||||
xfs_btree_get_keys(cur, child_block, &child_key);
|
||||
xfs_btree_copy_keys(cur, block_key, &child_key, 1);
|
||||
|
||||
xfs_btree_get_sibling(cur, child_block, child_ptr,
|
||||
XFS_BB_RIGHTSIB);
|
||||
xfs_buf_relse(child_bp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the maximum number of records (or keyptrs) per block that we want to
|
||||
* install at this level in the btree. Caller is responsible for having set
|
||||
* @cur->bc_ino.forksize to the desired fork size, if appropriate.
|
||||
*/
|
||||
STATIC unsigned int
|
||||
xfs_btree_bload_max_npb(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_bload *bbl,
|
||||
unsigned int level)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
if (level == cur->bc_nlevels - 1 && cur->bc_ops->get_dmaxrecs)
|
||||
return cur->bc_ops->get_dmaxrecs(cur, level);
|
||||
|
||||
ret = cur->bc_ops->get_maxrecs(cur, level);
|
||||
if (level == 0)
|
||||
ret -= bbl->leaf_slack;
|
||||
else
|
||||
ret -= bbl->node_slack;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the desired number of records (or keyptrs) per block that we want to
|
||||
* install at this level in the btree, which must be somewhere between minrecs
|
||||
* and max_npb. The caller is free to install fewer records per block.
|
||||
*/
|
||||
STATIC unsigned int
|
||||
xfs_btree_bload_desired_npb(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_bload *bbl,
|
||||
unsigned int level)
|
||||
{
|
||||
unsigned int npb = xfs_btree_bload_max_npb(cur, bbl, level);
|
||||
|
||||
/* Root blocks are not subject to minrecs rules. */
|
||||
if (level == cur->bc_nlevels - 1)
|
||||
return max(1U, npb);
|
||||
|
||||
return max_t(unsigned int, cur->bc_ops->get_minrecs(cur, level), npb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the number of records to be stored in each block at this level and
|
||||
* the number of blocks for this level. For leaf levels, we must populate an
|
||||
* empty root block even if there are no records, so we have to have at least
|
||||
* one block.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_btree_bload_level_geometry(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_bload *bbl,
|
||||
unsigned int level,
|
||||
uint64_t nr_this_level,
|
||||
unsigned int *avg_per_block,
|
||||
uint64_t *blocks,
|
||||
uint64_t *blocks_with_extra)
|
||||
{
|
||||
uint64_t npb;
|
||||
uint64_t dontcare;
|
||||
unsigned int desired_npb;
|
||||
unsigned int maxnr;
|
||||
|
||||
maxnr = cur->bc_ops->get_maxrecs(cur, level);
|
||||
|
||||
/*
|
||||
* Compute the number of blocks we need to fill each block with the
|
||||
* desired number of records/keyptrs per block. Because desired_npb
|
||||
* could be minrecs, we use regular integer division (which rounds
|
||||
* the block count down) so that in the next step the effective # of
|
||||
* items per block will never be less than desired_npb.
|
||||
*/
|
||||
desired_npb = xfs_btree_bload_desired_npb(cur, bbl, level);
|
||||
*blocks = div64_u64_rem(nr_this_level, desired_npb, &dontcare);
|
||||
*blocks = max(1ULL, *blocks);
|
||||
|
||||
/*
|
||||
* Compute the number of records that we will actually put in each
|
||||
* block, assuming that we want to spread the records evenly between
|
||||
* the blocks. Take care that the effective # of items per block (npb)
|
||||
* won't exceed maxrecs even for the blocks that get an extra record,
|
||||
* since desired_npb could be maxrecs, and in the previous step we
|
||||
* rounded the block count down.
|
||||
*/
|
||||
npb = div64_u64_rem(nr_this_level, *blocks, blocks_with_extra);
|
||||
if (npb > maxnr || (npb == maxnr && *blocks_with_extra > 0)) {
|
||||
(*blocks)++;
|
||||
npb = div64_u64_rem(nr_this_level, *blocks, blocks_with_extra);
|
||||
}
|
||||
|
||||
*avg_per_block = min_t(uint64_t, npb, nr_this_level);
|
||||
|
||||
trace_xfs_btree_bload_level_geometry(cur, level, nr_this_level,
|
||||
*avg_per_block, desired_npb, *blocks,
|
||||
*blocks_with_extra);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure a slack value is appropriate for the btree.
|
||||
*
|
||||
* If the slack value is negative, set slack so that we fill the block to
|
||||
* halfway between minrecs and maxrecs. Make sure the slack is never so large
|
||||
* that we can underflow minrecs.
|
||||
*/
|
||||
static void
|
||||
xfs_btree_bload_ensure_slack(
|
||||
struct xfs_btree_cur *cur,
|
||||
int *slack,
|
||||
int level)
|
||||
{
|
||||
int maxr;
|
||||
int minr;
|
||||
|
||||
maxr = cur->bc_ops->get_maxrecs(cur, level);
|
||||
minr = cur->bc_ops->get_minrecs(cur, level);
|
||||
|
||||
/*
|
||||
* If slack is negative, automatically set slack so that we load the
|
||||
* btree block approximately halfway between minrecs and maxrecs.
|
||||
* Generally, this will net us 75% loading.
|
||||
*/
|
||||
if (*slack < 0)
|
||||
*slack = maxr - ((maxr + minr) >> 1);
|
||||
|
||||
*slack = min(*slack, maxr - minr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare a btree cursor for a bulk load operation by computing the geometry
|
||||
* fields in bbl. Caller must ensure that the btree cursor is a staging
|
||||
* cursor. This function can be called multiple times.
|
||||
*/
|
||||
int
|
||||
xfs_btree_bload_compute_geometry(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_bload *bbl,
|
||||
uint64_t nr_records)
|
||||
{
|
||||
uint64_t nr_blocks = 0;
|
||||
uint64_t nr_this_level;
|
||||
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
|
||||
/*
|
||||
* Make sure that the slack values make sense for traditional leaf and
|
||||
* node blocks. Inode-rooted btrees will return different minrecs and
|
||||
* maxrecs values for the root block (bc_nlevels == level - 1). We're
|
||||
* checking levels 0 and 1 here, so set bc_nlevels such that the btree
|
||||
* code doesn't interpret either as the root level.
|
||||
*/
|
||||
cur->bc_nlevels = XFS_BTREE_MAXLEVELS - 1;
|
||||
xfs_btree_bload_ensure_slack(cur, &bbl->leaf_slack, 0);
|
||||
xfs_btree_bload_ensure_slack(cur, &bbl->node_slack, 1);
|
||||
|
||||
bbl->nr_records = nr_this_level = nr_records;
|
||||
for (cur->bc_nlevels = 1; cur->bc_nlevels < XFS_BTREE_MAXLEVELS;) {
|
||||
uint64_t level_blocks;
|
||||
uint64_t dontcare64;
|
||||
unsigned int level = cur->bc_nlevels - 1;
|
||||
unsigned int avg_per_block;
|
||||
|
||||
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
|
||||
&avg_per_block, &level_blocks, &dontcare64);
|
||||
|
||||
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
|
||||
/*
|
||||
* If all the items we want to store at this level
|
||||
* would fit in the inode root block, then we have our
|
||||
* btree root and are done.
|
||||
*
|
||||
* Note that bmap btrees forbid records in the root.
|
||||
*/
|
||||
if (level != 0 && nr_this_level <= avg_per_block) {
|
||||
nr_blocks++;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise, we have to store all the items for this
|
||||
* level in traditional btree blocks and therefore need
|
||||
* another level of btree to point to those blocks.
|
||||
*
|
||||
* We have to re-compute the geometry for each level of
|
||||
* an inode-rooted btree because the geometry differs
|
||||
* between a btree root in an inode fork and a
|
||||
* traditional btree block.
|
||||
*
|
||||
* This distinction is made in the btree code based on
|
||||
* whether level == bc_nlevels - 1. Based on the
|
||||
* previous root block size check against the root
|
||||
* block geometry, we know that we aren't yet ready to
|
||||
* populate the root. Increment bc_nevels and
|
||||
* recalculate the geometry for a traditional
|
||||
* block-based btree level.
|
||||
*/
|
||||
cur->bc_nlevels++;
|
||||
xfs_btree_bload_level_geometry(cur, bbl, level,
|
||||
nr_this_level, &avg_per_block,
|
||||
&level_blocks, &dontcare64);
|
||||
} else {
|
||||
/*
|
||||
* If all the items we want to store at this level
|
||||
* would fit in a single root block, we're done.
|
||||
*/
|
||||
if (nr_this_level <= avg_per_block) {
|
||||
nr_blocks++;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Otherwise, we need another level of btree. */
|
||||
cur->bc_nlevels++;
|
||||
}
|
||||
|
||||
nr_blocks += level_blocks;
|
||||
nr_this_level = level_blocks;
|
||||
}
|
||||
|
||||
if (cur->bc_nlevels == XFS_BTREE_MAXLEVELS)
|
||||
return -EOVERFLOW;
|
||||
|
||||
bbl->btree_height = cur->bc_nlevels;
|
||||
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
|
||||
bbl->nr_blocks = nr_blocks - 1;
|
||||
else
|
||||
bbl->nr_blocks = nr_blocks;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Bulk load a btree given the parameters and geometry established in bbl. */
|
||||
int
|
||||
xfs_btree_bload(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_bload *bbl,
|
||||
void *priv)
|
||||
{
|
||||
struct list_head buffers_list;
|
||||
union xfs_btree_ptr child_ptr;
|
||||
union xfs_btree_ptr ptr;
|
||||
struct xfs_buf *bp = NULL;
|
||||
struct xfs_btree_block *block = NULL;
|
||||
uint64_t nr_this_level = bbl->nr_records;
|
||||
uint64_t blocks;
|
||||
uint64_t i;
|
||||
uint64_t blocks_with_extra;
|
||||
uint64_t total_blocks = 0;
|
||||
unsigned int avg_per_block;
|
||||
unsigned int level = 0;
|
||||
int ret;
|
||||
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
|
||||
INIT_LIST_HEAD(&buffers_list);
|
||||
cur->bc_nlevels = bbl->btree_height;
|
||||
xfs_btree_set_ptr_null(cur, &child_ptr);
|
||||
xfs_btree_set_ptr_null(cur, &ptr);
|
||||
|
||||
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
|
||||
&avg_per_block, &blocks, &blocks_with_extra);
|
||||
|
||||
/* Load each leaf block. */
|
||||
for (i = 0; i < blocks; i++) {
|
||||
unsigned int nr_this_block = avg_per_block;
|
||||
|
||||
/*
|
||||
* Due to rounding, btree blocks will not be evenly populated
|
||||
* in most cases. blocks_with_extra tells us how many blocks
|
||||
* will receive an extra record to distribute the excess across
|
||||
* the current level as evenly as possible.
|
||||
*/
|
||||
if (i < blocks_with_extra)
|
||||
nr_this_block++;
|
||||
|
||||
ret = xfs_btree_bload_prep_block(cur, bbl, &buffers_list, level,
|
||||
nr_this_block, &ptr, &bp, &block, priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
trace_xfs_btree_bload_block(cur, level, i, blocks, &ptr,
|
||||
nr_this_block);
|
||||
|
||||
ret = xfs_btree_bload_leaf(cur, nr_this_block, bbl->get_record,
|
||||
block, priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Record the leftmost leaf pointer so we know where to start
|
||||
* with the first node level.
|
||||
*/
|
||||
if (i == 0)
|
||||
xfs_btree_copy_ptrs(cur, &child_ptr, &ptr, 1);
|
||||
}
|
||||
total_blocks += blocks;
|
||||
xfs_btree_bload_drop_buf(&buffers_list, &bp);
|
||||
|
||||
/* Populate the internal btree nodes. */
|
||||
for (level = 1; level < cur->bc_nlevels; level++) {
|
||||
union xfs_btree_ptr first_ptr;
|
||||
|
||||
nr_this_level = blocks;
|
||||
block = NULL;
|
||||
xfs_btree_set_ptr_null(cur, &ptr);
|
||||
|
||||
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
|
||||
&avg_per_block, &blocks, &blocks_with_extra);
|
||||
|
||||
/* Load each node block. */
|
||||
for (i = 0; i < blocks; i++) {
|
||||
unsigned int nr_this_block = avg_per_block;
|
||||
|
||||
if (i < blocks_with_extra)
|
||||
nr_this_block++;
|
||||
|
||||
ret = xfs_btree_bload_prep_block(cur, bbl,
|
||||
&buffers_list, level, nr_this_block,
|
||||
&ptr, &bp, &block, priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
trace_xfs_btree_bload_block(cur, level, i, blocks,
|
||||
&ptr, nr_this_block);
|
||||
|
||||
ret = xfs_btree_bload_node(cur, nr_this_block,
|
||||
&child_ptr, block);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Record the leftmost node pointer so that we know
|
||||
* where to start the next node level above this one.
|
||||
*/
|
||||
if (i == 0)
|
||||
xfs_btree_copy_ptrs(cur, &first_ptr, &ptr, 1);
|
||||
}
|
||||
total_blocks += blocks;
|
||||
xfs_btree_bload_drop_buf(&buffers_list, &bp);
|
||||
xfs_btree_copy_ptrs(cur, &child_ptr, &first_ptr, 1);
|
||||
}
|
||||
|
||||
/* Initialize the new root. */
|
||||
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
|
||||
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
|
||||
cur->bc_ino.ifake->if_levels = cur->bc_nlevels;
|
||||
cur->bc_ino.ifake->if_blocks = total_blocks - 1;
|
||||
} else {
|
||||
cur->bc_ag.afake->af_root = be32_to_cpu(ptr.s);
|
||||
cur->bc_ag.afake->af_levels = cur->bc_nlevels;
|
||||
cur->bc_ag.afake->af_blocks = total_blocks;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write the new blocks to disk. If the ordered list isn't empty after
|
||||
* that, then something went wrong and we have to fail. This should
|
||||
* never happen, but we'll check anyway.
|
||||
*/
|
||||
ret = xfs_buf_delwri_submit(&buffers_list);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (!list_empty(&buffers_list)) {
|
||||
ASSERT(list_empty(&buffers_list));
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
out:
|
||||
xfs_buf_delwri_cancel(&buffers_list);
|
||||
if (bp)
|
||||
xfs_buf_relse(bp);
|
||||
return ret;
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (C) 2020 Oracle. All Rights Reserved.
|
||||
* Author: Darrick J. Wong <darrick.wong@oracle.com>
|
||||
*/
|
||||
#ifndef __XFS_BTREE_STAGING_H__
|
||||
#define __XFS_BTREE_STAGING_H__
|
||||
|
||||
/* Fake root for an AG-rooted btree. */
|
||||
struct xbtree_afakeroot {
|
||||
/* AG block number of the new btree root. */
|
||||
xfs_agblock_t af_root;
|
||||
|
||||
/* Height of the new btree. */
|
||||
unsigned int af_levels;
|
||||
|
||||
/* Number of blocks used by the btree. */
|
||||
unsigned int af_blocks;
|
||||
};
|
||||
|
||||
/* Cursor interactions with with fake roots for AG-rooted btrees. */
|
||||
void xfs_btree_stage_afakeroot(struct xfs_btree_cur *cur,
|
||||
struct xbtree_afakeroot *afake);
|
||||
void xfs_btree_commit_afakeroot(struct xfs_btree_cur *cur, struct xfs_trans *tp,
|
||||
struct xfs_buf *agbp, const struct xfs_btree_ops *ops);
|
||||
|
||||
/* Fake root for an inode-rooted btree. */
|
||||
struct xbtree_ifakeroot {
|
||||
/* Fake inode fork. */
|
||||
struct xfs_ifork *if_fork;
|
||||
|
||||
/* Number of blocks used by the btree. */
|
||||
int64_t if_blocks;
|
||||
|
||||
/* Height of the new btree. */
|
||||
unsigned int if_levels;
|
||||
|
||||
/* Number of bytes available for this fork in the inode. */
|
||||
unsigned int if_fork_size;
|
||||
|
||||
/* Fork format. */
|
||||
unsigned int if_format;
|
||||
|
||||
/* Number of records. */
|
||||
unsigned int if_extents;
|
||||
};
|
||||
|
||||
/* Cursor interactions with with fake roots for inode-rooted btrees. */
|
||||
void xfs_btree_stage_ifakeroot(struct xfs_btree_cur *cur,
|
||||
struct xbtree_ifakeroot *ifake,
|
||||
struct xfs_btree_ops **new_ops);
|
||||
void xfs_btree_commit_ifakeroot(struct xfs_btree_cur *cur, struct xfs_trans *tp,
|
||||
int whichfork, const struct xfs_btree_ops *ops);
|
||||
|
||||
/* Bulk loading of staged btrees. */
|
||||
typedef int (*xfs_btree_bload_get_record_fn)(struct xfs_btree_cur *cur, void *priv);
|
||||
typedef int (*xfs_btree_bload_claim_block_fn)(struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr, void *priv);
|
||||
typedef size_t (*xfs_btree_bload_iroot_size_fn)(struct xfs_btree_cur *cur,
|
||||
unsigned int nr_this_level, void *priv);
|
||||
|
||||
struct xfs_btree_bload {
|
||||
/*
|
||||
* This function will be called nr_records times to load records into
|
||||
* the btree. The function does this by setting the cursor's bc_rec
|
||||
* field in in-core format. Records must be returned in sort order.
|
||||
*/
|
||||
xfs_btree_bload_get_record_fn get_record;
|
||||
|
||||
/*
|
||||
* This function will be called nr_blocks times to obtain a pointer
|
||||
* to a new btree block on disk. Callers must preallocate all space
|
||||
* for the new btree before calling xfs_btree_bload, and this function
|
||||
* is what claims that reservation.
|
||||
*/
|
||||
xfs_btree_bload_claim_block_fn claim_block;
|
||||
|
||||
/*
|
||||
* This function should return the size of the in-core btree root
|
||||
* block. It is only necessary for XFS_BTREE_ROOT_IN_INODE btree
|
||||
* types.
|
||||
*/
|
||||
xfs_btree_bload_iroot_size_fn iroot_size;
|
||||
|
||||
/*
|
||||
* The caller should set this to the number of records that will be
|
||||
* stored in the new btree.
|
||||
*/
|
||||
uint64_t nr_records;
|
||||
|
||||
/*
|
||||
* Number of free records to leave in each leaf block. If the caller
|
||||
* sets this to -1, the slack value will be calculated to be be halfway
|
||||
* between maxrecs and minrecs. This typically leaves the block 75%
|
||||
* full. Note that slack values are not enforced on inode root blocks.
|
||||
*/
|
||||
int leaf_slack;
|
||||
|
||||
/*
|
||||
* Number of free key/ptrs pairs to leave in each node block. This
|
||||
* field has the same semantics as leaf_slack.
|
||||
*/
|
||||
int node_slack;
|
||||
|
||||
/*
|
||||
* The xfs_btree_bload_compute_geometry function will set this to the
|
||||
* number of btree blocks needed to store nr_records records.
|
||||
*/
|
||||
uint64_t nr_blocks;
|
||||
|
||||
/*
|
||||
* The xfs_btree_bload_compute_geometry function will set this to the
|
||||
* height of the new btree.
|
||||
*/
|
||||
unsigned int btree_height;
|
||||
};
|
||||
|
||||
int xfs_btree_bload_compute_geometry(struct xfs_btree_cur *cur,
|
||||
struct xfs_btree_bload *bbl, uint64_t nr_records);
|
||||
int xfs_btree_bload(struct xfs_btree_cur *cur, struct xfs_btree_bload *bbl,
|
||||
void *priv);
|
||||
|
||||
#endif /* __XFS_BTREE_STAGING_H__ */
|
|
@ -590,7 +590,7 @@ xfs_da3_split(
|
|||
node = oldblk->bp->b_addr;
|
||||
if (node->hdr.info.forw) {
|
||||
if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
|
||||
xfs_buf_corruption_error(oldblk->bp);
|
||||
xfs_buf_mark_corrupt(oldblk->bp);
|
||||
error = -EFSCORRUPTED;
|
||||
goto out;
|
||||
}
|
||||
|
@ -603,7 +603,7 @@ xfs_da3_split(
|
|||
node = oldblk->bp->b_addr;
|
||||
if (node->hdr.info.back) {
|
||||
if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
|
||||
xfs_buf_corruption_error(oldblk->bp);
|
||||
xfs_buf_mark_corrupt(oldblk->bp);
|
||||
error = -EFSCORRUPTED;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1624,7 +1624,7 @@ xfs_da3_node_lookup_int(
|
|||
}
|
||||
|
||||
if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
|
||||
xfs_buf_corruption_error(blk->bp);
|
||||
xfs_buf_mark_corrupt(blk->bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -1639,7 +1639,7 @@ xfs_da3_node_lookup_int(
|
|||
|
||||
/* Tree taller than we can handle; bail out! */
|
||||
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
|
||||
xfs_buf_corruption_error(blk->bp);
|
||||
xfs_buf_mark_corrupt(blk->bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -1647,7 +1647,7 @@ xfs_da3_node_lookup_int(
|
|||
if (blkno == args->geo->leafblk)
|
||||
expected_level = nodehdr.level - 1;
|
||||
else if (expected_level != nodehdr.level) {
|
||||
xfs_buf_corruption_error(blk->bp);
|
||||
xfs_buf_mark_corrupt(blk->bp);
|
||||
return -EFSCORRUPTED;
|
||||
} else
|
||||
expected_level--;
|
||||
|
@ -1986,7 +1986,8 @@ xfs_da3_path_shift(
|
|||
ASSERT(path != NULL);
|
||||
ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
|
||||
level = (path->active-1) - 1; /* skip bottom layer in path */
|
||||
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
|
||||
for (; level >= 0; level--) {
|
||||
blk = &path->blk[level];
|
||||
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
|
||||
blk->bp->b_addr);
|
||||
|
||||
|
@ -2520,8 +2521,10 @@ xfs_dabuf_map(
|
|||
*/
|
||||
if (nirecs > 1) {
|
||||
map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
|
||||
if (!map)
|
||||
if (!map) {
|
||||
error = -ENOMEM;
|
||||
goto out_free_irecs;
|
||||
}
|
||||
*mapp = map;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,9 +57,10 @@ typedef struct xfs_da_args {
|
|||
const uint8_t *name; /* string (maybe not NULL terminated) */
|
||||
int namelen; /* length of string (maybe no NULL) */
|
||||
uint8_t filetype; /* filetype of inode for directories */
|
||||
uint8_t *value; /* set of bytes (maybe contain NULLs) */
|
||||
void *value; /* set of bytes (maybe contain NULLs) */
|
||||
int valuelen; /* length of value */
|
||||
int flags; /* argument flags (eg: ATTR_NOCREATE) */
|
||||
unsigned int attr_filter; /* XFS_ATTR_{ROOT,SECURE,INCOMPLETE} */
|
||||
unsigned int attr_flags; /* XATTR_{CREATE,REPLACE} */
|
||||
xfs_dahash_t hashval; /* hash value of name */
|
||||
xfs_ino_t inumber; /* input/output inode number */
|
||||
struct xfs_inode *dp; /* directory inode to manipulate */
|
||||
|
@ -88,8 +89,7 @@ typedef struct xfs_da_args {
|
|||
#define XFS_DA_OP_ADDNAME 0x0004 /* this is an add operation */
|
||||
#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
|
||||
#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
|
||||
#define XFS_DA_OP_ALLOCVAL 0x0020 /* lookup to alloc buffer if found */
|
||||
#define XFS_DA_OP_INCOMPLETE 0x0040 /* lookup INCOMPLETE attr keys */
|
||||
#define XFS_DA_OP_NOTIME 0x0020 /* don't update inode timestamps */
|
||||
|
||||
#define XFS_DA_OP_FLAGS \
|
||||
{ XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
|
||||
|
@ -97,8 +97,7 @@ typedef struct xfs_da_args {
|
|||
{ XFS_DA_OP_ADDNAME, "ADDNAME" }, \
|
||||
{ XFS_DA_OP_OKNOENT, "OKNOENT" }, \
|
||||
{ XFS_DA_OP_CILOOKUP, "CILOOKUP" }, \
|
||||
{ XFS_DA_OP_ALLOCVAL, "ALLOCVAL" }, \
|
||||
{ XFS_DA_OP_INCOMPLETE, "INCOMPLETE" }
|
||||
{ XFS_DA_OP_NOTIME, "NOTIME" }
|
||||
|
||||
/*
|
||||
* Storage for holding state during Btree searches and split/join ops.
|
||||
|
|
|
@ -692,19 +692,7 @@ struct xfs_attr3_leafblock {
|
|||
#define XFS_ATTR_ROOT (1 << XFS_ATTR_ROOT_BIT)
|
||||
#define XFS_ATTR_SECURE (1 << XFS_ATTR_SECURE_BIT)
|
||||
#define XFS_ATTR_INCOMPLETE (1 << XFS_ATTR_INCOMPLETE_BIT)
|
||||
|
||||
/*
|
||||
* Conversion macros for converting namespace bits from argument flags
|
||||
* to ondisk flags.
|
||||
*/
|
||||
#define XFS_ATTR_NSP_ARGS_MASK (ATTR_ROOT | ATTR_SECURE)
|
||||
#define XFS_ATTR_NSP_ONDISK_MASK (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
|
||||
#define XFS_ATTR_NSP_ONDISK(flags) ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
|
||||
#define XFS_ATTR_NSP_ARGS(flags) ((flags) & XFS_ATTR_NSP_ARGS_MASK)
|
||||
#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
|
||||
((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
|
||||
#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
|
||||
((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
|
||||
|
||||
/*
|
||||
* Alignment for namelist and valuelist entries (since they are mixed
|
||||
|
|
|
@ -114,6 +114,23 @@ const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
|
|||
.verify_struct = xfs_dir3_block_verify,
|
||||
};
|
||||
|
||||
static xfs_failaddr_t
|
||||
xfs_dir3_block_header_check(
|
||||
struct xfs_inode *dp,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
||||
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
|
||||
|
||||
if (be64_to_cpu(hdr3->owner) != dp->i_ino)
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_dir3_block_read(
|
||||
struct xfs_trans *tp,
|
||||
|
@ -121,12 +138,24 @@ xfs_dir3_block_read(
|
|||
struct xfs_buf **bpp)
|
||||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
xfs_failaddr_t fa;
|
||||
int err;
|
||||
|
||||
err = xfs_da_read_buf(tp, dp, mp->m_dir_geo->datablk, 0, bpp,
|
||||
XFS_DATA_FORK, &xfs_dir3_block_buf_ops);
|
||||
if (!err && tp && *bpp)
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
|
||||
if (err || !*bpp)
|
||||
return err;
|
||||
|
||||
/* Check things that we can't do in the verifier. */
|
||||
fa = xfs_dir3_block_header_check(dp, *bpp);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(*bpp, fa);
|
||||
xfs_trans_brelse(tp, *bpp);
|
||||
*bpp = NULL;
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -394,6 +394,22 @@ static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
|
|||
.verify_write = xfs_dir3_data_write_verify,
|
||||
};
|
||||
|
||||
static xfs_failaddr_t
|
||||
xfs_dir3_data_header_check(
|
||||
struct xfs_inode *dp,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_mount *mp = dp->i_mount;
|
||||
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
||||
struct xfs_dir3_data_hdr *hdr3 = bp->b_addr;
|
||||
|
||||
if (be64_to_cpu(hdr3->hdr.owner) != dp->i_ino)
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_dir3_data_read(
|
||||
|
@ -403,12 +419,24 @@ xfs_dir3_data_read(
|
|||
unsigned int flags,
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
xfs_failaddr_t fa;
|
||||
int err;
|
||||
|
||||
err = xfs_da_read_buf(tp, dp, bno, flags, bpp, XFS_DATA_FORK,
|
||||
&xfs_dir3_data_buf_ops);
|
||||
if (!err && tp && *bpp)
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF);
|
||||
if (err || !*bpp)
|
||||
return err;
|
||||
|
||||
/* Check things that we can't do in the verifier. */
|
||||
fa = xfs_dir3_data_header_check(dp, *bpp);
|
||||
if (fa) {
|
||||
__xfs_buf_mark_corrupt(*bpp, fa);
|
||||
xfs_trans_brelse(tp, *bpp);
|
||||
*bpp = NULL;
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1383,7 +1383,7 @@ xfs_dir2_leaf_removename(
|
|||
ltp = xfs_dir2_leaf_tail_p(geo, leaf);
|
||||
bestsp = xfs_dir2_leaf_bests_p(ltp);
|
||||
if (be16_to_cpu(bestsp[db]) != oldbest) {
|
||||
xfs_buf_corruption_error(lbp);
|
||||
xfs_buf_mark_corrupt(lbp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
/*
|
||||
|
|
|
@ -194,6 +194,8 @@ xfs_dir3_free_header_check(
|
|||
return __this_address;
|
||||
if (be32_to_cpu(hdr3->nvalid) < be32_to_cpu(hdr3->nused))
|
||||
return __this_address;
|
||||
if (be64_to_cpu(hdr3->hdr.owner) != dp->i_ino)
|
||||
return __this_address;
|
||||
} else {
|
||||
struct xfs_dir2_free_hdr *hdr = bp->b_addr;
|
||||
|
||||
|
@ -226,8 +228,9 @@ __xfs_dir3_free_read(
|
|||
/* Check things that we can't do in the verifier. */
|
||||
fa = xfs_dir3_free_header_check(dp, fbno, *bpp);
|
||||
if (fa) {
|
||||
xfs_verifier_error(*bpp, -EFSCORRUPTED, fa);
|
||||
__xfs_buf_mark_corrupt(*bpp, fa);
|
||||
xfs_trans_brelse(tp, *bpp);
|
||||
*bpp = NULL;
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -439,7 +442,7 @@ xfs_dir2_leaf_to_node(
|
|||
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
|
||||
if (be32_to_cpu(ltp->bestcount) >
|
||||
(uint)dp->i_d.di_size / args->geo->blksize) {
|
||||
xfs_buf_corruption_error(lbp);
|
||||
xfs_buf_mark_corrupt(lbp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -513,7 +516,7 @@ xfs_dir2_leafn_add(
|
|||
* into other peoples memory
|
||||
*/
|
||||
if (index < 0) {
|
||||
xfs_buf_corruption_error(bp);
|
||||
xfs_buf_mark_corrupt(bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -800,7 +803,7 @@ xfs_dir2_leafn_lookup_for_entry(
|
|||
|
||||
xfs_dir3_leaf_check(dp, bp);
|
||||
if (leafhdr.count <= 0) {
|
||||
xfs_buf_corruption_error(bp);
|
||||
xfs_buf_mark_corrupt(bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
|
|
@ -497,6 +497,23 @@ static inline bool xfs_sb_version_hascrc(struct xfs_sb *sbp)
|
|||
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
|
||||
}
|
||||
|
||||
/*
|
||||
* v5 file systems support V3 inodes only, earlier file systems support
|
||||
* v2 and v1 inodes.
|
||||
*/
|
||||
static inline bool xfs_sb_version_has_v3inode(struct xfs_sb *sbp)
|
||||
{
|
||||
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
|
||||
}
|
||||
|
||||
static inline bool xfs_dinode_good_version(struct xfs_sb *sbp,
|
||||
uint8_t version)
|
||||
{
|
||||
if (xfs_sb_version_has_v3inode(sbp))
|
||||
return version == 3;
|
||||
return version == 1 || version == 2;
|
||||
}
|
||||
|
||||
static inline bool xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
|
||||
{
|
||||
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
|
||||
|
@ -560,7 +577,6 @@ xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
|
|||
|
||||
#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */
|
||||
#define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR)
|
||||
#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)((bp)->b_addr))
|
||||
|
||||
#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d))
|
||||
#define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \
|
||||
|
@ -707,7 +723,6 @@ typedef struct xfs_agf {
|
|||
/* disk block (xfs_daddr_t) in the AG */
|
||||
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
|
||||
#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
|
||||
#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr))
|
||||
|
||||
/*
|
||||
* Size of the unlinked inode hash table in the agi.
|
||||
|
@ -775,7 +790,6 @@ typedef struct xfs_agi {
|
|||
/* disk block (xfs_daddr_t) in the AG */
|
||||
#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
|
||||
#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp))
|
||||
#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr))
|
||||
|
||||
/*
|
||||
* The third a.g. block contains the a.g. freelist, an array
|
||||
|
@ -783,21 +797,15 @@ typedef struct xfs_agi {
|
|||
*/
|
||||
#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log))
|
||||
#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp))
|
||||
#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr))
|
||||
#define XFS_BUF_TO_AGFL(bp) ((struct xfs_agfl *)((bp)->b_addr))
|
||||
|
||||
#define XFS_BUF_TO_AGFL_BNO(mp, bp) \
|
||||
(xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
|
||||
&(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
|
||||
(__be32 *)(bp)->b_addr)
|
||||
|
||||
typedef struct xfs_agfl {
|
||||
struct xfs_agfl {
|
||||
__be32 agfl_magicnum;
|
||||
__be32 agfl_seqno;
|
||||
uuid_t agfl_uuid;
|
||||
__be64 agfl_lsn;
|
||||
__be32 agfl_crc;
|
||||
__be32 agfl_bno[]; /* actually xfs_agfl_size(mp) */
|
||||
} __attribute__((packed)) xfs_agfl_t;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
|
||||
|
||||
|
@ -946,8 +954,12 @@ enum xfs_dinode_fmt {
|
|||
/*
|
||||
* Inode size for given fs.
|
||||
*/
|
||||
#define XFS_LITINO(mp, version) \
|
||||
((int)(((mp)->m_sb.sb_inodesize) - xfs_dinode_size(version)))
|
||||
#define XFS_DINODE_SIZE(sbp) \
|
||||
(xfs_sb_version_has_v3inode(sbp) ? \
|
||||
sizeof(struct xfs_dinode) : \
|
||||
offsetof(struct xfs_dinode, di_crc))
|
||||
#define XFS_LITINO(mp) \
|
||||
((mp)->m_sb.sb_inodesize - XFS_DINODE_SIZE(&(mp)->m_sb))
|
||||
|
||||
/*
|
||||
* Inode data & attribute fork sizes, per inode.
|
||||
|
@ -956,13 +968,9 @@ enum xfs_dinode_fmt {
|
|||
#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3))
|
||||
|
||||
#define XFS_DFORK_DSIZE(dip,mp) \
|
||||
(XFS_DFORK_Q(dip) ? \
|
||||
XFS_DFORK_BOFF(dip) : \
|
||||
XFS_LITINO(mp, (dip)->di_version))
|
||||
(XFS_DFORK_Q(dip) ? XFS_DFORK_BOFF(dip) : XFS_LITINO(mp))
|
||||
#define XFS_DFORK_ASIZE(dip,mp) \
|
||||
(XFS_DFORK_Q(dip) ? \
|
||||
XFS_LITINO(mp, (dip)->di_version) - XFS_DFORK_BOFF(dip) : \
|
||||
0)
|
||||
(XFS_DFORK_Q(dip) ? XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : 0)
|
||||
#define XFS_DFORK_SIZE(dip,mp,w) \
|
||||
((w) == XFS_DATA_FORK ? \
|
||||
XFS_DFORK_DSIZE(dip, mp) : \
|
||||
|
|
|
@ -568,10 +568,40 @@ typedef struct xfs_fsop_setdm_handlereq {
|
|||
struct fsdmidata __user *data; /* DMAPI data */
|
||||
} xfs_fsop_setdm_handlereq_t;
|
||||
|
||||
/*
|
||||
* Flags passed in xfs_attr_multiop.am_flags for the attr ioctl interface.
|
||||
*
|
||||
* NOTE: Must match the values declared in libattr without the XFS_IOC_ prefix.
|
||||
*/
|
||||
#define XFS_IOC_ATTR_ROOT 0x0002 /* use attrs in root namespace */
|
||||
#define XFS_IOC_ATTR_SECURE 0x0008 /* use attrs in security namespace */
|
||||
#define XFS_IOC_ATTR_CREATE 0x0010 /* fail if attr already exists */
|
||||
#define XFS_IOC_ATTR_REPLACE 0x0020 /* fail if attr does not exist */
|
||||
|
||||
typedef struct xfs_attrlist_cursor {
|
||||
__u32 opaque[4];
|
||||
} xfs_attrlist_cursor_t;
|
||||
|
||||
/*
|
||||
* Define how lists of attribute names are returned to userspace from the
|
||||
* XFS_IOC_ATTRLIST_BY_HANDLE ioctl. struct xfs_attrlist is the header at the
|
||||
* beginning of the returned buffer, and a each entry in al_offset contains the
|
||||
* relative offset of an xfs_attrlist_ent containing the actual entry.
|
||||
*
|
||||
* NOTE: struct xfs_attrlist must match struct attrlist defined in libattr, and
|
||||
* struct xfs_attrlist_ent must match struct attrlist_ent defined in libattr.
|
||||
*/
|
||||
struct xfs_attrlist {
|
||||
__s32 al_count; /* number of entries in attrlist */
|
||||
__s32 al_more; /* T/F: more attrs (do call again) */
|
||||
__s32 al_offset[1]; /* byte offsets of attrs [var-sized] */
|
||||
};
|
||||
|
||||
struct xfs_attrlist_ent { /* data from attr_list() */
|
||||
__u32 a_valuelen; /* number bytes in value of attr */
|
||||
char a_name[1]; /* attr name (NULL terminated) */
|
||||
};
|
||||
|
||||
typedef struct xfs_fsop_attrlist_handlereq {
|
||||
struct xfs_fsop_handlereq hreq; /* handle interface structure */
|
||||
struct xfs_attrlist_cursor pos; /* opaque cookie, list offset */
|
||||
|
@ -589,7 +619,7 @@ typedef struct xfs_attr_multiop {
|
|||
void __user *am_attrname;
|
||||
void __user *am_attrvalue;
|
||||
__u32 am_length;
|
||||
__u32 am_flags;
|
||||
__u32 am_flags; /* XFS_IOC_ATTR_* */
|
||||
} xfs_attr_multiop_t;
|
||||
|
||||
typedef struct xfs_fsop_attrmulti_handlereq {
|
||||
|
|
|
@ -105,7 +105,7 @@ xfs_inobt_get_rec(
|
|||
int *stat)
|
||||
{
|
||||
struct xfs_mount *mp = cur->bc_mp;
|
||||
xfs_agnumber_t agno = cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = cur->bc_ag.agno;
|
||||
union xfs_btree_rec *rec;
|
||||
int error;
|
||||
uint64_t realfree;
|
||||
|
@ -177,7 +177,7 @@ xfs_inobt_insert(
|
|||
xfs_btnum_t btnum)
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
|
||||
xfs_agino_t thisino;
|
||||
int i;
|
||||
|
@ -304,7 +304,7 @@ xfs_ialloc_inode_init(
|
|||
* That means for v3 inode we log the entire buffer rather than just the
|
||||
* inode cores.
|
||||
*/
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
version = 3;
|
||||
ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
|
||||
|
||||
|
@ -339,7 +339,7 @@ xfs_ialloc_inode_init(
|
|||
xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
|
||||
for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
|
||||
int ioffset = i << mp->m_sb.sb_inodelog;
|
||||
uint isize = xfs_dinode_size(version);
|
||||
uint isize = XFS_DINODE_SIZE(&mp->m_sb);
|
||||
|
||||
free = xfs_make_iptr(mp, fbuf, i);
|
||||
free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
|
||||
|
@ -525,7 +525,7 @@ xfs_inobt_insert_sprec(
|
|||
bool merge) /* merge or replace */
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
|
||||
int error;
|
||||
int i;
|
||||
|
@ -658,7 +658,7 @@ xfs_ialloc_ag_alloc(
|
|||
* chunk of inodes. If the filesystem is striped, this will fill
|
||||
* an entire stripe unit with inodes.
|
||||
*/
|
||||
agi = XFS_BUF_TO_AGI(agbp);
|
||||
agi = agbp->b_addr;
|
||||
newino = be32_to_cpu(agi->agi_newino);
|
||||
agno = be32_to_cpu(agi->agi_seqno);
|
||||
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
|
||||
|
@ -1130,7 +1130,7 @@ xfs_dialloc_ag_inobt(
|
|||
xfs_ino_t *inop)
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
|
||||
xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
|
||||
xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
|
||||
|
@ -1583,7 +1583,7 @@ xfs_dialloc_ag(
|
|||
xfs_ino_t *inop)
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
|
||||
xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
|
||||
xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
|
||||
|
@ -1943,7 +1943,7 @@ xfs_difree_inobt(
|
|||
struct xfs_icluster *xic,
|
||||
struct xfs_inobt_rec_incore *orec)
|
||||
{
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
|
||||
struct xfs_perag *pag;
|
||||
struct xfs_btree_cur *cur;
|
||||
|
@ -2079,7 +2079,7 @@ xfs_difree_finobt(
|
|||
xfs_agino_t agino,
|
||||
struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
|
||||
{
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
|
||||
struct xfs_btree_cur *cur;
|
||||
struct xfs_inobt_rec_incore rec;
|
||||
|
@ -2489,9 +2489,8 @@ xfs_ialloc_log_agi(
|
|||
sizeof(xfs_agi_t)
|
||||
};
|
||||
#ifdef DEBUG
|
||||
xfs_agi_t *agi; /* allocation group header */
|
||||
struct xfs_agi *agi = bp->b_addr;
|
||||
|
||||
agi = XFS_BUF_TO_AGI(bp);
|
||||
ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
|
||||
#endif
|
||||
|
||||
|
@ -2523,14 +2522,13 @@ xfs_agi_verify(
|
|||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(bp);
|
||||
struct xfs_agi *agi = bp->b_addr;
|
||||
int i;
|
||||
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
||||
if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
|
||||
return __this_address;
|
||||
if (!xfs_log_check_lsn(mp,
|
||||
be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn)))
|
||||
if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn)))
|
||||
return __this_address;
|
||||
}
|
||||
|
||||
|
@ -2593,6 +2591,7 @@ xfs_agi_write_verify(
|
|||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
struct xfs_buf_log_item *bip = bp->b_log_item;
|
||||
struct xfs_agi *agi = bp->b_addr;
|
||||
xfs_failaddr_t fa;
|
||||
|
||||
fa = xfs_agi_verify(bp);
|
||||
|
@ -2605,7 +2604,7 @@ xfs_agi_write_verify(
|
|||
return;
|
||||
|
||||
if (bip)
|
||||
XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
|
||||
}
|
||||
|
||||
|
@ -2661,7 +2660,7 @@ xfs_ialloc_read_agi(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
agi = XFS_BUF_TO_AGI(*bpp);
|
||||
agi = (*bpp)->b_addr;
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
if (!pag->pagi_init) {
|
||||
pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
|
||||
|
@ -2873,7 +2872,7 @@ xfs_ialloc_setup_geometry(
|
|||
* cannot change the behavior.
|
||||
*/
|
||||
igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
int new_size = igeo->inode_cluster_size_raw;
|
||||
|
||||
new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "xfs_bit.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_staging.h"
|
||||
#include "xfs_ialloc.h"
|
||||
#include "xfs_ialloc_btree.h"
|
||||
#include "xfs_alloc.h"
|
||||
|
@ -20,7 +21,6 @@
|
|||
#include "xfs_trans.h"
|
||||
#include "xfs_rmap.h"
|
||||
|
||||
|
||||
STATIC int
|
||||
xfs_inobt_get_minrecs(
|
||||
struct xfs_btree_cur *cur,
|
||||
|
@ -34,7 +34,7 @@ xfs_inobt_dup_cursor(
|
|||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
|
||||
cur->bc_private.a.agbp, cur->bc_private.a.agno,
|
||||
cur->bc_ag.agbp, cur->bc_ag.agno,
|
||||
cur->bc_btnum);
|
||||
}
|
||||
|
||||
|
@ -44,8 +44,8 @@ xfs_inobt_set_root(
|
|||
union xfs_btree_ptr *nptr,
|
||||
int inc) /* level change */
|
||||
{
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
|
||||
agi->agi_root = nptr->s;
|
||||
be32_add_cpu(&agi->agi_level, inc);
|
||||
|
@ -58,8 +58,8 @@ xfs_finobt_set_root(
|
|||
union xfs_btree_ptr *nptr,
|
||||
int inc) /* level change */
|
||||
{
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
|
||||
agi->agi_free_root = nptr->s;
|
||||
be32_add_cpu(&agi->agi_free_level, inc);
|
||||
|
@ -83,7 +83,7 @@ __xfs_inobt_alloc_block(
|
|||
args.tp = cur->bc_tp;
|
||||
args.mp = cur->bc_mp;
|
||||
args.oinfo = XFS_RMAP_OINFO_INOBT;
|
||||
args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
|
||||
args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.agno, sbno);
|
||||
args.minlen = 1;
|
||||
args.maxlen = 1;
|
||||
args.prod = 1;
|
||||
|
@ -212,9 +212,9 @@ xfs_inobt_init_ptr_from_cur(
|
|||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr)
|
||||
{
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
|
||||
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
|
||||
|
||||
ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
|
||||
ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
|
||||
|
||||
ptr->s = agi->agi_root;
|
||||
}
|
||||
|
@ -224,9 +224,9 @@ xfs_finobt_init_ptr_from_cur(
|
|||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr)
|
||||
{
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp);
|
||||
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
|
||||
|
||||
ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno));
|
||||
ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
|
||||
ptr->s = agi->agi_free_root;
|
||||
}
|
||||
|
||||
|
@ -400,32 +400,27 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
|
|||
};
|
||||
|
||||
/*
|
||||
* Allocate a new inode btree cursor.
|
||||
* Initialize a new inode btree cursor.
|
||||
*/
|
||||
struct xfs_btree_cur * /* new inode btree cursor */
|
||||
xfs_inobt_init_cursor(
|
||||
static struct xfs_btree_cur *
|
||||
xfs_inobt_init_common(
|
||||
struct xfs_mount *mp, /* file system mount point */
|
||||
struct xfs_trans *tp, /* transaction pointer */
|
||||
struct xfs_buf *agbp, /* buffer for agi structure */
|
||||
xfs_agnumber_t agno, /* allocation group number */
|
||||
xfs_btnum_t btnum) /* ialloc or free ino btree */
|
||||
{
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
|
||||
|
||||
cur->bc_tp = tp;
|
||||
cur->bc_mp = mp;
|
||||
cur->bc_btnum = btnum;
|
||||
if (btnum == XFS_BTNUM_INO) {
|
||||
cur->bc_nlevels = be32_to_cpu(agi->agi_level);
|
||||
cur->bc_ops = &xfs_inobt_ops;
|
||||
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
|
||||
cur->bc_ops = &xfs_inobt_ops;
|
||||
} else {
|
||||
cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
|
||||
cur->bc_ops = &xfs_finobt_ops;
|
||||
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
|
||||
cur->bc_ops = &xfs_finobt_ops;
|
||||
}
|
||||
|
||||
cur->bc_blocklog = mp->m_sb.sb_blocklog;
|
||||
|
@ -433,12 +428,75 @@ xfs_inobt_init_cursor(
|
|||
if (xfs_sb_version_hascrc(&mp->m_sb))
|
||||
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
|
||||
|
||||
cur->bc_private.a.agbp = agbp;
|
||||
cur->bc_private.a.agno = agno;
|
||||
|
||||
cur->bc_ag.agno = agno;
|
||||
return cur;
|
||||
}
|
||||
|
||||
/* Create an inode btree cursor. */
|
||||
struct xfs_btree_cur *
|
||||
xfs_inobt_init_cursor(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *agbp,
|
||||
xfs_agnumber_t agno,
|
||||
xfs_btnum_t btnum)
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
|
||||
cur = xfs_inobt_init_common(mp, tp, agno, btnum);
|
||||
if (btnum == XFS_BTNUM_INO)
|
||||
cur->bc_nlevels = be32_to_cpu(agi->agi_level);
|
||||
else
|
||||
cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
|
||||
cur->bc_ag.agbp = agbp;
|
||||
return cur;
|
||||
}
|
||||
|
||||
/* Create an inode btree cursor with a fake root for staging. */
|
||||
struct xfs_btree_cur *
|
||||
xfs_inobt_stage_cursor(
|
||||
struct xfs_mount *mp,
|
||||
struct xbtree_afakeroot *afake,
|
||||
xfs_agnumber_t agno,
|
||||
xfs_btnum_t btnum)
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
cur = xfs_inobt_init_common(mp, NULL, agno, btnum);
|
||||
xfs_btree_stage_afakeroot(cur, afake);
|
||||
return cur;
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a new inobt btree root. Caller is responsible for invalidating
|
||||
* and freeing the old btree blocks.
|
||||
*/
|
||||
void
|
||||
xfs_inobt_commit_staged_btree(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *agbp)
|
||||
{
|
||||
struct xfs_agi *agi = agbp->b_addr;
|
||||
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
|
||||
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
|
||||
if (cur->bc_btnum == XFS_BTNUM_INO) {
|
||||
agi->agi_root = cpu_to_be32(afake->af_root);
|
||||
agi->agi_level = cpu_to_be32(afake->af_levels);
|
||||
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
|
||||
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops);
|
||||
} else {
|
||||
agi->agi_free_root = cpu_to_be32(afake->af_root);
|
||||
agi->agi_free_level = cpu_to_be32(afake->af_levels);
|
||||
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREE_ROOT |
|
||||
XFS_AGI_FREE_LEVEL);
|
||||
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate number of records in an inobt btree block.
|
||||
*/
|
||||
|
|
|
@ -48,6 +48,9 @@ struct xfs_mount;
|
|||
extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
|
||||
struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t,
|
||||
xfs_btnum_t);
|
||||
struct xfs_btree_cur *xfs_inobt_stage_cursor(struct xfs_mount *mp,
|
||||
struct xbtree_afakeroot *afake, xfs_agnumber_t agno,
|
||||
xfs_btnum_t btnum);
|
||||
extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
|
||||
|
||||
/* ir_holemask to inode allocation bitmap conversion */
|
||||
|
@ -68,4 +71,7 @@ int xfs_inobt_cur(struct xfs_mount *mp, struct xfs_trans *tp,
|
|||
xfs_agnumber_t agno, xfs_btnum_t btnum,
|
||||
struct xfs_btree_cur **curpp, struct xfs_buf **agi_bpp);
|
||||
|
||||
void xfs_inobt_commit_staged_btree(struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp, struct xfs_buf *agbp);
|
||||
|
||||
#endif /* __XFS_IALLOC_BTREE_H__ */
|
||||
|
|
|
@ -44,17 +44,6 @@ xfs_inobp_check(
|
|||
}
|
||||
#endif
|
||||
|
||||
bool
|
||||
xfs_dinode_good_version(
|
||||
struct xfs_mount *mp,
|
||||
__u8 version)
|
||||
{
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb))
|
||||
return version == 3;
|
||||
|
||||
return version == 1 || version == 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are doing readahead on an inode buffer, we might be in log recovery
|
||||
* reading an inode allocation buffer that hasn't yet been replayed, and hence
|
||||
|
@ -93,7 +82,7 @@ xfs_inode_buf_verify(
|
|||
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
|
||||
unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
|
||||
di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
|
||||
xfs_dinode_good_version(mp, dip->di_version) &&
|
||||
xfs_dinode_good_version(&mp->m_sb, dip->di_version) &&
|
||||
xfs_verify_agino_or_null(mp, agno, unlinked_ino);
|
||||
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
|
||||
XFS_ERRTAG_ITOBP_INOTOBP))) {
|
||||
|
@ -205,16 +194,14 @@ xfs_inode_from_disk(
|
|||
struct xfs_icdinode *to = &ip->i_d;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
|
||||
|
||||
/*
|
||||
* Convert v1 inodes immediately to v2 inode format as this is the
|
||||
* minimum inode version format we support in the rest of the code.
|
||||
* They will also be unconditionally written back to disk as v2 inodes.
|
||||
*/
|
||||
to->di_version = from->di_version;
|
||||
if (to->di_version == 1) {
|
||||
if (unlikely(from->di_version == 1)) {
|
||||
set_nlink(inode, be16_to_cpu(from->di_onlink));
|
||||
to->di_projid = 0;
|
||||
to->di_version = 2;
|
||||
} else {
|
||||
set_nlink(inode, be32_to_cpu(from->di_nlink));
|
||||
to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
|
||||
|
@ -222,8 +209,8 @@ xfs_inode_from_disk(
|
|||
}
|
||||
|
||||
to->di_format = from->di_format;
|
||||
to->di_uid = be32_to_cpu(from->di_uid);
|
||||
to->di_gid = be32_to_cpu(from->di_gid);
|
||||
i_uid_write(inode, be32_to_cpu(from->di_uid));
|
||||
i_gid_write(inode, be32_to_cpu(from->di_gid));
|
||||
to->di_flushiter = be16_to_cpu(from->di_flushiter);
|
||||
|
||||
/*
|
||||
|
@ -252,7 +239,7 @@ xfs_inode_from_disk(
|
|||
to->di_dmstate = be16_to_cpu(from->di_dmstate);
|
||||
to->di_flags = be16_to_cpu(from->di_flags);
|
||||
|
||||
if (to->di_version == 3) {
|
||||
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
||||
inode_set_iversion_queried(inode,
|
||||
be64_to_cpu(from->di_changecount));
|
||||
to->di_crtime.tv_sec = be32_to_cpu(from->di_crtime.t_sec);
|
||||
|
@ -274,10 +261,9 @@ xfs_inode_to_disk(
|
|||
to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
|
||||
to->di_onlink = 0;
|
||||
|
||||
to->di_version = from->di_version;
|
||||
to->di_format = from->di_format;
|
||||
to->di_uid = cpu_to_be32(from->di_uid);
|
||||
to->di_gid = cpu_to_be32(from->di_gid);
|
||||
to->di_uid = cpu_to_be32(i_uid_read(inode));
|
||||
to->di_gid = cpu_to_be32(i_gid_read(inode));
|
||||
to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
|
||||
to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
|
||||
|
||||
|
@ -303,7 +289,8 @@ xfs_inode_to_disk(
|
|||
to->di_dmstate = cpu_to_be16(from->di_dmstate);
|
||||
to->di_flags = cpu_to_be16(from->di_flags);
|
||||
|
||||
if (from->di_version == 3) {
|
||||
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
||||
to->di_version = 3;
|
||||
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
|
||||
to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.tv_sec);
|
||||
to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.tv_nsec);
|
||||
|
@ -315,6 +302,7 @@ xfs_inode_to_disk(
|
|||
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
|
||||
to->di_flushiter = 0;
|
||||
} else {
|
||||
to->di_version = 2;
|
||||
to->di_flushiter = cpu_to_be16(from->di_flushiter);
|
||||
}
|
||||
}
|
||||
|
@ -428,7 +416,7 @@ xfs_dinode_verify_forkoff(
|
|||
case XFS_DINODE_FMT_LOCAL: /* fall through ... */
|
||||
case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
|
||||
if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
|
||||
return __this_address;
|
||||
break;
|
||||
default:
|
||||
|
@ -454,7 +442,7 @@ xfs_dinode_verify(
|
|||
|
||||
/* Verify v3 integrity information first */
|
||||
if (dip->di_version >= 3) {
|
||||
if (!xfs_sb_version_hascrc(&mp->m_sb))
|
||||
if (!xfs_sb_version_has_v3inode(&mp->m_sb))
|
||||
return __this_address;
|
||||
if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
|
||||
XFS_DINODE_CRC_OFF))
|
||||
|
@ -629,10 +617,9 @@ xfs_iread(
|
|||
|
||||
/* shortcut IO on inode allocation if possible */
|
||||
if ((iget_flags & XFS_IGET_CREATE) &&
|
||||
xfs_sb_version_hascrc(&mp->m_sb) &&
|
||||
xfs_sb_version_has_v3inode(&mp->m_sb) &&
|
||||
!(mp->m_flags & XFS_MOUNT_IKEEP)) {
|
||||
VFS_I(ip)->i_generation = prandom_u32();
|
||||
ip->i_d.di_version = 3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -674,7 +661,6 @@ xfs_iread(
|
|||
* Partial initialisation of the in-core inode. Just the bits
|
||||
* that xfs_ialloc won't overwrite or relies on being correct.
|
||||
*/
|
||||
ip->i_d.di_version = dip->di_version;
|
||||
VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
|
||||
ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
|
||||
|
||||
|
@ -688,7 +674,6 @@ xfs_iread(
|
|||
VFS_I(ip)->i_mode = 0;
|
||||
}
|
||||
|
||||
ASSERT(ip->i_d.di_version >= 2);
|
||||
ip->i_delayed_blks = 0;
|
||||
|
||||
/*
|
||||
|
|
|
@ -16,11 +16,8 @@ struct xfs_dinode;
|
|||
* format specific structures at the appropriate time.
|
||||
*/
|
||||
struct xfs_icdinode {
|
||||
int8_t di_version; /* inode version */
|
||||
int8_t di_format; /* format of di_c data */
|
||||
uint16_t di_flushiter; /* incremented on flush */
|
||||
uint32_t di_uid; /* owner's user id */
|
||||
uint32_t di_gid; /* owner's group id */
|
||||
uint32_t di_projid; /* owner's project id */
|
||||
xfs_fsize_t di_size; /* number of bytes in file */
|
||||
xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */
|
||||
|
@ -61,8 +58,6 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
|
|||
void xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
|
||||
struct xfs_dinode *to);
|
||||
|
||||
bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version);
|
||||
|
||||
#if defined(DEBUG)
|
||||
void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
|
||||
#else
|
||||
|
|
|
@ -183,7 +183,7 @@ xfs_iformat_local(
|
|||
*/
|
||||
if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
|
||||
xfs_warn(ip->i_mount,
|
||||
"corrupt inode %Lu (bad size %d for local fork, size = %d).",
|
||||
"corrupt inode %Lu (bad size %d for local fork, size = %zd).",
|
||||
(unsigned long long) ip->i_ino, size,
|
||||
XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
|
||||
xfs_inode_verifier_error(ip, -EFSCORRUPTED,
|
||||
|
|
|
@ -46,14 +46,9 @@ struct xfs_ifork {
|
|||
(ip)->i_afp : \
|
||||
(ip)->i_cowfp))
|
||||
#define XFS_IFORK_DSIZE(ip) \
|
||||
(XFS_IFORK_Q(ip) ? \
|
||||
XFS_IFORK_BOFF(ip) : \
|
||||
XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version))
|
||||
(XFS_IFORK_Q(ip) ? XFS_IFORK_BOFF(ip) : XFS_LITINO((ip)->i_mount))
|
||||
#define XFS_IFORK_ASIZE(ip) \
|
||||
(XFS_IFORK_Q(ip) ? \
|
||||
XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version) - \
|
||||
XFS_IFORK_BOFF(ip) : \
|
||||
0)
|
||||
(XFS_IFORK_Q(ip) ? XFS_LITINO((ip)->i_mount) - XFS_IFORK_BOFF(ip) : 0)
|
||||
#define XFS_IFORK_SIZE(ip,w) \
|
||||
((w) == XFS_DATA_FORK ? \
|
||||
XFS_IFORK_DSIZE(ip) : \
|
||||
|
|
|
@ -424,12 +424,10 @@ struct xfs_log_dinode {
|
|||
/* structure must be padded to 64 bit alignment */
|
||||
};
|
||||
|
||||
static inline uint xfs_log_dinode_size(int version)
|
||||
{
|
||||
if (version == 3)
|
||||
return sizeof(struct xfs_log_dinode);
|
||||
return offsetof(struct xfs_log_dinode, di_next_unlinked);
|
||||
}
|
||||
#define xfs_log_dinode_size(mp) \
|
||||
(xfs_sb_version_has_v3inode(&(mp)->m_sb) ? \
|
||||
sizeof(struct xfs_log_dinode) : \
|
||||
offsetof(struct xfs_log_dinode, di_next_unlinked))
|
||||
|
||||
/*
|
||||
* Buffer Log Format definitions
|
||||
|
|
|
@ -46,7 +46,7 @@ xfs_refcount_lookup_le(
|
|||
xfs_agblock_t bno,
|
||||
int *stat)
|
||||
{
|
||||
trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
|
||||
trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
|
||||
XFS_LOOKUP_LE);
|
||||
cur->bc_rec.rc.rc_startblock = bno;
|
||||
cur->bc_rec.rc.rc_blockcount = 0;
|
||||
|
@ -63,7 +63,7 @@ xfs_refcount_lookup_ge(
|
|||
xfs_agblock_t bno,
|
||||
int *stat)
|
||||
{
|
||||
trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
|
||||
trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
|
||||
XFS_LOOKUP_GE);
|
||||
cur->bc_rec.rc.rc_startblock = bno;
|
||||
cur->bc_rec.rc.rc_blockcount = 0;
|
||||
|
@ -80,7 +80,7 @@ xfs_refcount_lookup_eq(
|
|||
xfs_agblock_t bno,
|
||||
int *stat)
|
||||
{
|
||||
trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
|
||||
trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.agno, bno,
|
||||
XFS_LOOKUP_LE);
|
||||
cur->bc_rec.rc.rc_startblock = bno;
|
||||
cur->bc_rec.rc.rc_blockcount = 0;
|
||||
|
@ -108,7 +108,7 @@ xfs_refcount_get_rec(
|
|||
int *stat)
|
||||
{
|
||||
struct xfs_mount *mp = cur->bc_mp;
|
||||
xfs_agnumber_t agno = cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = cur->bc_ag.agno;
|
||||
union xfs_btree_rec *rec;
|
||||
int error;
|
||||
xfs_agblock_t realstart;
|
||||
|
@ -119,7 +119,7 @@ xfs_refcount_get_rec(
|
|||
|
||||
xfs_refcount_btrec_to_irec(rec, irec);
|
||||
|
||||
agno = cur->bc_private.a.agno;
|
||||
agno = cur->bc_ag.agno;
|
||||
if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN)
|
||||
goto out_bad_rec;
|
||||
|
||||
|
@ -144,7 +144,7 @@ xfs_refcount_get_rec(
|
|||
if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT)
|
||||
goto out_bad_rec;
|
||||
|
||||
trace_xfs_refcount_get(cur->bc_mp, cur->bc_private.a.agno, irec);
|
||||
trace_xfs_refcount_get(cur->bc_mp, cur->bc_ag.agno, irec);
|
||||
return 0;
|
||||
|
||||
out_bad_rec:
|
||||
|
@ -169,14 +169,14 @@ xfs_refcount_update(
|
|||
union xfs_btree_rec rec;
|
||||
int error;
|
||||
|
||||
trace_xfs_refcount_update(cur->bc_mp, cur->bc_private.a.agno, irec);
|
||||
trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.agno, irec);
|
||||
rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock);
|
||||
rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
|
||||
rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
|
||||
error = xfs_btree_update(cur, &rec);
|
||||
if (error)
|
||||
trace_xfs_refcount_update_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,7 @@ xfs_refcount_insert(
|
|||
{
|
||||
int error;
|
||||
|
||||
trace_xfs_refcount_insert(cur->bc_mp, cur->bc_private.a.agno, irec);
|
||||
trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.agno, irec);
|
||||
cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
|
||||
cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
|
||||
cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
|
||||
|
@ -208,7 +208,7 @@ xfs_refcount_insert(
|
|||
out_error:
|
||||
if (error)
|
||||
trace_xfs_refcount_insert_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -234,7 +234,7 @@ xfs_refcount_delete(
|
|||
error = -EFSCORRUPTED;
|
||||
goto out_error;
|
||||
}
|
||||
trace_xfs_refcount_delete(cur->bc_mp, cur->bc_private.a.agno, &irec);
|
||||
trace_xfs_refcount_delete(cur->bc_mp, cur->bc_ag.agno, &irec);
|
||||
error = xfs_btree_delete(cur, i);
|
||||
if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
|
||||
error = -EFSCORRUPTED;
|
||||
|
@ -246,7 +246,7 @@ xfs_refcount_delete(
|
|||
out_error:
|
||||
if (error)
|
||||
trace_xfs_refcount_delete_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -366,7 +366,7 @@ xfs_refcount_split_extent(
|
|||
return 0;
|
||||
|
||||
*shape_changed = true;
|
||||
trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_ag.agno,
|
||||
&rcext, agbno);
|
||||
|
||||
/* Establish the right extent. */
|
||||
|
@ -391,7 +391,7 @@ xfs_refcount_split_extent(
|
|||
|
||||
out_error:
|
||||
trace_xfs_refcount_split_extent_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -411,7 +411,7 @@ xfs_refcount_merge_center_extents(
|
|||
int found_rec;
|
||||
|
||||
trace_xfs_refcount_merge_center_extents(cur->bc_mp,
|
||||
cur->bc_private.a.agno, left, center, right);
|
||||
cur->bc_ag.agno, left, center, right);
|
||||
|
||||
/*
|
||||
* Make sure the center and right extents are not in the btree.
|
||||
|
@ -468,7 +468,7 @@ xfs_refcount_merge_center_extents(
|
|||
|
||||
out_error:
|
||||
trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -487,7 +487,7 @@ xfs_refcount_merge_left_extent(
|
|||
int found_rec;
|
||||
|
||||
trace_xfs_refcount_merge_left_extent(cur->bc_mp,
|
||||
cur->bc_private.a.agno, left, cleft);
|
||||
cur->bc_ag.agno, left, cleft);
|
||||
|
||||
/* If the extent at agbno (cleft) wasn't synthesized, remove it. */
|
||||
if (cleft->rc_refcount > 1) {
|
||||
|
@ -530,7 +530,7 @@ xfs_refcount_merge_left_extent(
|
|||
|
||||
out_error:
|
||||
trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,7 @@ xfs_refcount_merge_right_extent(
|
|||
int found_rec;
|
||||
|
||||
trace_xfs_refcount_merge_right_extent(cur->bc_mp,
|
||||
cur->bc_private.a.agno, cright, right);
|
||||
cur->bc_ag.agno, cright, right);
|
||||
|
||||
/*
|
||||
* If the extent ending at agbno+aglen (cright) wasn't synthesized,
|
||||
|
@ -594,7 +594,7 @@ xfs_refcount_merge_right_extent(
|
|||
|
||||
out_error:
|
||||
trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -679,13 +679,13 @@ xfs_refcount_find_left_extents(
|
|||
cleft->rc_blockcount = aglen;
|
||||
cleft->rc_refcount = 1;
|
||||
}
|
||||
trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.agno,
|
||||
left, cleft, agbno);
|
||||
return error;
|
||||
|
||||
out_error:
|
||||
trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -768,13 +768,13 @@ xfs_refcount_find_right_extents(
|
|||
cright->rc_blockcount = aglen;
|
||||
cright->rc_refcount = 1;
|
||||
}
|
||||
trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.agno,
|
||||
cright, right, agbno + aglen);
|
||||
return error;
|
||||
|
||||
out_error:
|
||||
trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -883,7 +883,7 @@ xfs_refcount_still_have_space(
|
|||
{
|
||||
unsigned long overhead;
|
||||
|
||||
overhead = cur->bc_private.a.priv.refc.shape_changes *
|
||||
overhead = cur->bc_ag.refc.shape_changes *
|
||||
xfs_allocfree_log_count(cur->bc_mp, 1);
|
||||
overhead *= cur->bc_mp->m_sb.sb_blocksize;
|
||||
|
||||
|
@ -891,17 +891,17 @@ xfs_refcount_still_have_space(
|
|||
* Only allow 2 refcount extent updates per transaction if the
|
||||
* refcount continue update "error" has been injected.
|
||||
*/
|
||||
if (cur->bc_private.a.priv.refc.nr_ops > 2 &&
|
||||
if (cur->bc_ag.refc.nr_ops > 2 &&
|
||||
XFS_TEST_ERROR(false, cur->bc_mp,
|
||||
XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
|
||||
return false;
|
||||
|
||||
if (cur->bc_private.a.priv.refc.nr_ops == 0)
|
||||
if (cur->bc_ag.refc.nr_ops == 0)
|
||||
return true;
|
||||
else if (overhead > cur->bc_tp->t_log_res)
|
||||
return false;
|
||||
return cur->bc_tp->t_log_res - overhead >
|
||||
cur->bc_private.a.priv.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
|
||||
cur->bc_ag.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -952,7 +952,7 @@ xfs_refcount_adjust_extents(
|
|||
ext.rc_startblock - *agbno);
|
||||
tmp.rc_refcount = 1 + adj;
|
||||
trace_xfs_refcount_modify_extent(cur->bc_mp,
|
||||
cur->bc_private.a.agno, &tmp);
|
||||
cur->bc_ag.agno, &tmp);
|
||||
|
||||
/*
|
||||
* Either cover the hole (increment) or
|
||||
|
@ -968,10 +968,10 @@ xfs_refcount_adjust_extents(
|
|||
error = -EFSCORRUPTED;
|
||||
goto out_error;
|
||||
}
|
||||
cur->bc_private.a.priv.refc.nr_ops++;
|
||||
cur->bc_ag.refc.nr_ops++;
|
||||
} else {
|
||||
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
|
||||
cur->bc_private.a.agno,
|
||||
cur->bc_ag.agno,
|
||||
tmp.rc_startblock);
|
||||
xfs_bmap_add_free(cur->bc_tp, fsbno,
|
||||
tmp.rc_blockcount, oinfo);
|
||||
|
@ -998,12 +998,12 @@ xfs_refcount_adjust_extents(
|
|||
goto skip;
|
||||
ext.rc_refcount += adj;
|
||||
trace_xfs_refcount_modify_extent(cur->bc_mp,
|
||||
cur->bc_private.a.agno, &ext);
|
||||
cur->bc_ag.agno, &ext);
|
||||
if (ext.rc_refcount > 1) {
|
||||
error = xfs_refcount_update(cur, &ext);
|
||||
if (error)
|
||||
goto out_error;
|
||||
cur->bc_private.a.priv.refc.nr_ops++;
|
||||
cur->bc_ag.refc.nr_ops++;
|
||||
} else if (ext.rc_refcount == 1) {
|
||||
error = xfs_refcount_delete(cur, &found_rec);
|
||||
if (error)
|
||||
|
@ -1012,11 +1012,11 @@ xfs_refcount_adjust_extents(
|
|||
error = -EFSCORRUPTED;
|
||||
goto out_error;
|
||||
}
|
||||
cur->bc_private.a.priv.refc.nr_ops++;
|
||||
cur->bc_ag.refc.nr_ops++;
|
||||
goto advloop;
|
||||
} else {
|
||||
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
|
||||
cur->bc_private.a.agno,
|
||||
cur->bc_ag.agno,
|
||||
ext.rc_startblock);
|
||||
xfs_bmap_add_free(cur->bc_tp, fsbno, ext.rc_blockcount,
|
||||
oinfo);
|
||||
|
@ -1035,7 +1035,7 @@ xfs_refcount_adjust_extents(
|
|||
return error;
|
||||
out_error:
|
||||
trace_xfs_refcount_modify_extent_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1057,10 +1057,10 @@ xfs_refcount_adjust(
|
|||
*new_agbno = agbno;
|
||||
*new_aglen = aglen;
|
||||
if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
|
||||
trace_xfs_refcount_increase(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.agno,
|
||||
agbno, aglen);
|
||||
else
|
||||
trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.agno,
|
||||
agbno, aglen);
|
||||
|
||||
/*
|
||||
|
@ -1088,7 +1088,7 @@ xfs_refcount_adjust(
|
|||
if (shape_changed)
|
||||
shape_changes++;
|
||||
if (shape_changes)
|
||||
cur->bc_private.a.priv.refc.shape_changes++;
|
||||
cur->bc_ag.refc.shape_changes++;
|
||||
|
||||
/* Now that we've taken care of the ends, adjust the middle extents */
|
||||
error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen,
|
||||
|
@ -1099,7 +1099,7 @@ xfs_refcount_adjust(
|
|||
return 0;
|
||||
|
||||
out_error:
|
||||
trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_ag.agno,
|
||||
error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
@ -1115,7 +1115,7 @@ xfs_refcount_finish_one_cleanup(
|
|||
|
||||
if (rcur == NULL)
|
||||
return;
|
||||
agbp = rcur->bc_private.a.agbp;
|
||||
agbp = rcur->bc_ag.agbp;
|
||||
xfs_btree_del_cursor(rcur, error);
|
||||
if (error)
|
||||
xfs_trans_brelse(tp, agbp);
|
||||
|
@ -1165,9 +1165,9 @@ xfs_refcount_finish_one(
|
|||
* the startblock, get one now.
|
||||
*/
|
||||
rcur = *pcur;
|
||||
if (rcur != NULL && rcur->bc_private.a.agno != agno) {
|
||||
nr_ops = rcur->bc_private.a.priv.refc.nr_ops;
|
||||
shape_changes = rcur->bc_private.a.priv.refc.shape_changes;
|
||||
if (rcur != NULL && rcur->bc_ag.agno != agno) {
|
||||
nr_ops = rcur->bc_ag.refc.nr_ops;
|
||||
shape_changes = rcur->bc_ag.refc.shape_changes;
|
||||
xfs_refcount_finish_one_cleanup(tp, rcur, 0);
|
||||
rcur = NULL;
|
||||
*pcur = NULL;
|
||||
|
@ -1183,8 +1183,8 @@ xfs_refcount_finish_one(
|
|||
error = -ENOMEM;
|
||||
goto out_cur;
|
||||
}
|
||||
rcur->bc_private.a.priv.refc.nr_ops = nr_ops;
|
||||
rcur->bc_private.a.priv.refc.shape_changes = shape_changes;
|
||||
rcur->bc_ag.refc.nr_ops = nr_ops;
|
||||
rcur->bc_ag.refc.shape_changes = shape_changes;
|
||||
}
|
||||
*pcur = rcur;
|
||||
|
||||
|
@ -1303,7 +1303,7 @@ xfs_refcount_find_shared(
|
|||
int have;
|
||||
int error;
|
||||
|
||||
trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_ag.agno,
|
||||
agbno, aglen);
|
||||
|
||||
/* By default, skip the whole range */
|
||||
|
@ -1383,12 +1383,12 @@ xfs_refcount_find_shared(
|
|||
|
||||
done:
|
||||
trace_xfs_refcount_find_shared_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, *fbno, *flen);
|
||||
cur->bc_ag.agno, *fbno, *flen);
|
||||
|
||||
out_error:
|
||||
if (error)
|
||||
trace_xfs_refcount_find_shared_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1485,7 +1485,7 @@ xfs_refcount_adjust_cow_extents(
|
|||
tmp.rc_blockcount = aglen;
|
||||
tmp.rc_refcount = 1;
|
||||
trace_xfs_refcount_modify_extent(cur->bc_mp,
|
||||
cur->bc_private.a.agno, &tmp);
|
||||
cur->bc_ag.agno, &tmp);
|
||||
|
||||
error = xfs_refcount_insert(cur, &tmp,
|
||||
&found_tmp);
|
||||
|
@ -1513,7 +1513,7 @@ xfs_refcount_adjust_cow_extents(
|
|||
|
||||
ext.rc_refcount = 0;
|
||||
trace_xfs_refcount_modify_extent(cur->bc_mp,
|
||||
cur->bc_private.a.agno, &ext);
|
||||
cur->bc_ag.agno, &ext);
|
||||
error = xfs_refcount_delete(cur, &found_rec);
|
||||
if (error)
|
||||
goto out_error;
|
||||
|
@ -1529,7 +1529,7 @@ xfs_refcount_adjust_cow_extents(
|
|||
return error;
|
||||
out_error:
|
||||
trace_xfs_refcount_modify_extent_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1575,7 +1575,7 @@ xfs_refcount_adjust_cow(
|
|||
return 0;
|
||||
|
||||
out_error:
|
||||
trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_ag.agno,
|
||||
error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
@ -1589,7 +1589,7 @@ __xfs_refcount_cow_alloc(
|
|||
xfs_agblock_t agbno,
|
||||
xfs_extlen_t aglen)
|
||||
{
|
||||
trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
|
||||
trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_ag.agno,
|
||||
agbno, aglen);
|
||||
|
||||
/* Add refcount btree reservation */
|
||||
|
@ -1606,7 +1606,7 @@ __xfs_refcount_cow_free(
|
|||
xfs_agblock_t agbno,
|
||||
xfs_extlen_t aglen)
|
||||
{
|
||||
trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
|
||||
trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_ag.agno,
|
||||
agbno, aglen);
|
||||
|
||||
/* Remove refcount btree reservation */
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "xfs_sb.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_staging.h"
|
||||
#include "xfs_refcount_btree.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_error.h"
|
||||
|
@ -25,7 +26,7 @@ xfs_refcountbt_dup_cursor(
|
|||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
|
||||
cur->bc_private.a.agbp, cur->bc_private.a.agno);
|
||||
cur->bc_ag.agbp, cur->bc_ag.agno);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
|
@ -34,8 +35,8 @@ xfs_refcountbt_set_root(
|
|||
union xfs_btree_ptr *ptr,
|
||||
int inc)
|
||||
{
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
|
||||
struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
|
||||
|
||||
|
@ -57,8 +58,8 @@ xfs_refcountbt_alloc_block(
|
|||
union xfs_btree_ptr *new,
|
||||
int *stat)
|
||||
{
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
struct xfs_alloc_arg args; /* block allocation args */
|
||||
int error; /* error return value */
|
||||
|
||||
|
@ -66,7 +67,7 @@ xfs_refcountbt_alloc_block(
|
|||
args.tp = cur->bc_tp;
|
||||
args.mp = cur->bc_mp;
|
||||
args.type = XFS_ALLOCTYPE_NEAR_BNO;
|
||||
args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
|
||||
args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
|
||||
xfs_refc_block(args.mp));
|
||||
args.oinfo = XFS_RMAP_OINFO_REFC;
|
||||
args.minlen = args.maxlen = args.prod = 1;
|
||||
|
@ -75,13 +76,13 @@ xfs_refcountbt_alloc_block(
|
|||
error = xfs_alloc_vextent(&args);
|
||||
if (error)
|
||||
goto out_error;
|
||||
trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
|
||||
args.agbno, 1);
|
||||
if (args.fsbno == NULLFSBLOCK) {
|
||||
*stat = 0;
|
||||
return 0;
|
||||
}
|
||||
ASSERT(args.agno == cur->bc_private.a.agno);
|
||||
ASSERT(args.agno == cur->bc_ag.agno);
|
||||
ASSERT(args.len == 1);
|
||||
|
||||
new->s = cpu_to_be32(args.agbno);
|
||||
|
@ -101,12 +102,12 @@ xfs_refcountbt_free_block(
|
|||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_mount *mp = cur->bc_mp;
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
|
||||
int error;
|
||||
|
||||
trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.agno,
|
||||
XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
|
||||
be32_add_cpu(&agf->agf_refcount_blocks, -1);
|
||||
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
|
||||
|
@ -169,9 +170,9 @@ xfs_refcountbt_init_ptr_from_cur(
|
|||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
|
||||
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
|
||||
|
||||
ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
|
||||
ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
|
||||
|
||||
ptr->s = agf->agf_refcount_root;
|
||||
}
|
||||
|
@ -311,8 +312,36 @@ static const struct xfs_btree_ops xfs_refcountbt_ops = {
|
|||
};
|
||||
|
||||
/*
|
||||
* Allocate a new refcount btree cursor.
|
||||
* Initialize a new refcount btree cursor.
|
||||
*/
|
||||
static struct xfs_btree_cur *
|
||||
xfs_refcountbt_init_common(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
xfs_agnumber_t agno)
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
ASSERT(agno != NULLAGNUMBER);
|
||||
ASSERT(agno < mp->m_sb.sb_agcount);
|
||||
|
||||
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
|
||||
cur->bc_tp = tp;
|
||||
cur->bc_mp = mp;
|
||||
cur->bc_btnum = XFS_BTNUM_REFC;
|
||||
cur->bc_blocklog = mp->m_sb.sb_blocklog;
|
||||
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
|
||||
|
||||
cur->bc_ag.agno = agno;
|
||||
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
|
||||
|
||||
cur->bc_ag.refc.nr_ops = 0;
|
||||
cur->bc_ag.refc.shape_changes = 0;
|
||||
cur->bc_ops = &xfs_refcountbt_ops;
|
||||
return cur;
|
||||
}
|
||||
|
||||
/* Create a btree cursor. */
|
||||
struct xfs_btree_cur *
|
||||
xfs_refcountbt_init_cursor(
|
||||
struct xfs_mount *mp,
|
||||
|
@ -320,32 +349,53 @@ xfs_refcountbt_init_cursor(
|
|||
struct xfs_buf *agbp,
|
||||
xfs_agnumber_t agno)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
ASSERT(agno != NULLAGNUMBER);
|
||||
ASSERT(agno < mp->m_sb.sb_agcount);
|
||||
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
|
||||
|
||||
cur->bc_tp = tp;
|
||||
cur->bc_mp = mp;
|
||||
cur->bc_btnum = XFS_BTNUM_REFC;
|
||||
cur->bc_blocklog = mp->m_sb.sb_blocklog;
|
||||
cur->bc_ops = &xfs_refcountbt_ops;
|
||||
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
|
||||
|
||||
cur = xfs_refcountbt_init_common(mp, tp, agno);
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
|
||||
|
||||
cur->bc_private.a.agbp = agbp;
|
||||
cur->bc_private.a.agno = agno;
|
||||
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
|
||||
|
||||
cur->bc_private.a.priv.refc.nr_ops = 0;
|
||||
cur->bc_private.a.priv.refc.shape_changes = 0;
|
||||
|
||||
cur->bc_ag.agbp = agbp;
|
||||
return cur;
|
||||
}
|
||||
|
||||
/* Create a btree cursor with a fake root for staging. */
|
||||
struct xfs_btree_cur *
|
||||
xfs_refcountbt_stage_cursor(
|
||||
struct xfs_mount *mp,
|
||||
struct xbtree_afakeroot *afake,
|
||||
xfs_agnumber_t agno)
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
cur = xfs_refcountbt_init_common(mp, NULL, agno);
|
||||
xfs_btree_stage_afakeroot(cur, afake);
|
||||
return cur;
|
||||
}
|
||||
|
||||
/*
|
||||
* Swap in the new btree root. Once we pass this point the newly rebuilt btree
|
||||
* is in place and we have to kill off all the old btree blocks.
|
||||
*/
|
||||
void
|
||||
xfs_refcountbt_commit_staged_btree(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *agbp)
|
||||
{
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
|
||||
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
|
||||
agf->agf_refcount_root = cpu_to_be32(afake->af_root);
|
||||
agf->agf_refcount_level = cpu_to_be32(afake->af_levels);
|
||||
agf->agf_refcount_blocks = cpu_to_be32(afake->af_blocks);
|
||||
xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
|
||||
XFS_AGF_REFCOUNT_ROOT |
|
||||
XFS_AGF_REFCOUNT_LEVEL);
|
||||
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the number of records in a refcount btree block.
|
||||
*/
|
||||
|
@ -420,7 +470,7 @@ xfs_refcountbt_calc_reserves(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
agf = XFS_BUF_TO_AGF(agbp);
|
||||
agf = agbp->b_addr;
|
||||
agblocks = be32_to_cpu(agf->agf_length);
|
||||
tree_len = be32_to_cpu(agf->agf_refcount_blocks);
|
||||
xfs_trans_brelse(tp, agbp);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
struct xfs_buf;
|
||||
struct xfs_btree_cur;
|
||||
struct xfs_mount;
|
||||
struct xbtree_afakeroot;
|
||||
|
||||
/*
|
||||
* Btree block header size
|
||||
|
@ -46,6 +47,8 @@ struct xfs_mount;
|
|||
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
|
||||
struct xfs_trans *tp, struct xfs_buf *agbp,
|
||||
xfs_agnumber_t agno);
|
||||
struct xfs_btree_cur *xfs_refcountbt_stage_cursor(struct xfs_mount *mp,
|
||||
struct xbtree_afakeroot *afake, xfs_agnumber_t agno);
|
||||
extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
|
||||
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
|
||||
|
||||
|
@ -58,4 +61,7 @@ extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
|
|||
struct xfs_trans *tp, xfs_agnumber_t agno, xfs_extlen_t *ask,
|
||||
xfs_extlen_t *used);
|
||||
|
||||
void xfs_refcountbt_commit_staged_btree(struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp, struct xfs_buf *agbp);
|
||||
|
||||
#endif /* __XFS_REFCOUNT_BTREE_H__ */
|
||||
|
|
|
@ -79,7 +79,7 @@ xfs_rmap_update(
|
|||
union xfs_btree_rec rec;
|
||||
int error;
|
||||
|
||||
trace_xfs_rmap_update(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_update(cur->bc_mp, cur->bc_ag.agno,
|
||||
irec->rm_startblock, irec->rm_blockcount,
|
||||
irec->rm_owner, irec->rm_offset, irec->rm_flags);
|
||||
|
||||
|
@ -91,7 +91,7 @@ xfs_rmap_update(
|
|||
error = xfs_btree_update(cur, &rec);
|
||||
if (error)
|
||||
trace_xfs_rmap_update_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ xfs_rmap_insert(
|
|||
int i;
|
||||
int error;
|
||||
|
||||
trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_private.a.agno, agbno,
|
||||
trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_ag.agno, agbno,
|
||||
len, owner, offset, flags);
|
||||
|
||||
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
|
||||
|
@ -133,7 +133,7 @@ xfs_rmap_insert(
|
|||
done:
|
||||
if (error)
|
||||
trace_xfs_rmap_insert_error(rcur->bc_mp,
|
||||
rcur->bc_private.a.agno, error, _RET_IP_);
|
||||
rcur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -149,7 +149,7 @@ xfs_rmap_delete(
|
|||
int i;
|
||||
int error;
|
||||
|
||||
trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_private.a.agno, agbno,
|
||||
trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_ag.agno, agbno,
|
||||
len, owner, offset, flags);
|
||||
|
||||
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
|
||||
|
@ -170,7 +170,7 @@ xfs_rmap_delete(
|
|||
done:
|
||||
if (error)
|
||||
trace_xfs_rmap_delete_error(rcur->bc_mp,
|
||||
rcur->bc_private.a.agno, error, _RET_IP_);
|
||||
rcur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,7 @@ xfs_rmap_get_rec(
|
|||
int *stat)
|
||||
{
|
||||
struct xfs_mount *mp = cur->bc_mp;
|
||||
xfs_agnumber_t agno = cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = cur->bc_ag.agno;
|
||||
union xfs_btree_rec *rec;
|
||||
int error;
|
||||
|
||||
|
@ -260,7 +260,7 @@ xfs_rmap_find_left_neighbor_helper(
|
|||
struct xfs_find_left_neighbor_info *info = priv;
|
||||
|
||||
trace_xfs_rmap_find_left_neighbor_candidate(cur->bc_mp,
|
||||
cur->bc_private.a.agno, rec->rm_startblock,
|
||||
cur->bc_ag.agno, rec->rm_startblock,
|
||||
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
|
||||
rec->rm_flags);
|
||||
|
||||
|
@ -312,7 +312,7 @@ xfs_rmap_find_left_neighbor(
|
|||
info.stat = stat;
|
||||
|
||||
trace_xfs_rmap_find_left_neighbor_query(cur->bc_mp,
|
||||
cur->bc_private.a.agno, bno, 0, owner, offset, flags);
|
||||
cur->bc_ag.agno, bno, 0, owner, offset, flags);
|
||||
|
||||
error = xfs_rmap_query_range(cur, &info.high, &info.high,
|
||||
xfs_rmap_find_left_neighbor_helper, &info);
|
||||
|
@ -320,7 +320,7 @@ xfs_rmap_find_left_neighbor(
|
|||
error = 0;
|
||||
if (*stat)
|
||||
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, irec->rm_startblock,
|
||||
cur->bc_ag.agno, irec->rm_startblock,
|
||||
irec->rm_blockcount, irec->rm_owner,
|
||||
irec->rm_offset, irec->rm_flags);
|
||||
return error;
|
||||
|
@ -336,7 +336,7 @@ xfs_rmap_lookup_le_range_helper(
|
|||
struct xfs_find_left_neighbor_info *info = priv;
|
||||
|
||||
trace_xfs_rmap_lookup_le_range_candidate(cur->bc_mp,
|
||||
cur->bc_private.a.agno, rec->rm_startblock,
|
||||
cur->bc_ag.agno, rec->rm_startblock,
|
||||
rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
|
||||
rec->rm_flags);
|
||||
|
||||
|
@ -385,14 +385,14 @@ xfs_rmap_lookup_le_range(
|
|||
info.stat = stat;
|
||||
|
||||
trace_xfs_rmap_lookup_le_range(cur->bc_mp,
|
||||
cur->bc_private.a.agno, bno, 0, owner, offset, flags);
|
||||
cur->bc_ag.agno, bno, 0, owner, offset, flags);
|
||||
error = xfs_rmap_query_range(cur, &info.high, &info.high,
|
||||
xfs_rmap_lookup_le_range_helper, &info);
|
||||
if (error == -ECANCELED)
|
||||
error = 0;
|
||||
if (*stat)
|
||||
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, irec->rm_startblock,
|
||||
cur->bc_ag.agno, irec->rm_startblock,
|
||||
irec->rm_blockcount, irec->rm_owner,
|
||||
irec->rm_offset, irec->rm_flags);
|
||||
return error;
|
||||
|
@ -498,7 +498,7 @@ xfs_rmap_unmap(
|
|||
(flags & XFS_RMAP_BMBT_BLOCK);
|
||||
if (unwritten)
|
||||
flags |= XFS_RMAP_UNWRITTEN;
|
||||
trace_xfs_rmap_unmap(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_unmap(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
|
||||
/*
|
||||
|
@ -522,7 +522,7 @@ xfs_rmap_unmap(
|
|||
goto out_error;
|
||||
}
|
||||
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, ltrec.rm_startblock,
|
||||
cur->bc_ag.agno, ltrec.rm_startblock,
|
||||
ltrec.rm_blockcount, ltrec.rm_owner,
|
||||
ltrec.rm_offset, ltrec.rm_flags);
|
||||
ltoff = ltrec.rm_offset;
|
||||
|
@ -588,7 +588,7 @@ xfs_rmap_unmap(
|
|||
|
||||
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
|
||||
/* exact match, simply remove the record from rmap tree */
|
||||
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
|
||||
ltrec.rm_startblock, ltrec.rm_blockcount,
|
||||
ltrec.rm_owner, ltrec.rm_offset,
|
||||
ltrec.rm_flags);
|
||||
|
@ -666,7 +666,7 @@ xfs_rmap_unmap(
|
|||
else
|
||||
cur->bc_rec.r.rm_offset = offset + len;
|
||||
cur->bc_rec.r.rm_flags = flags;
|
||||
trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_insert(mp, cur->bc_ag.agno,
|
||||
cur->bc_rec.r.rm_startblock,
|
||||
cur->bc_rec.r.rm_blockcount,
|
||||
cur->bc_rec.r.rm_owner,
|
||||
|
@ -678,11 +678,11 @@ xfs_rmap_unmap(
|
|||
}
|
||||
|
||||
out_done:
|
||||
trace_xfs_rmap_unmap_done(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_unmap_done(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
out_error:
|
||||
if (error)
|
||||
trace_xfs_rmap_unmap_error(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_unmap_error(mp, cur->bc_ag.agno,
|
||||
error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
@ -773,7 +773,7 @@ xfs_rmap_map(
|
|||
(flags & XFS_RMAP_BMBT_BLOCK);
|
||||
if (unwritten)
|
||||
flags |= XFS_RMAP_UNWRITTEN;
|
||||
trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_map(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
|
||||
|
||||
|
@ -795,7 +795,7 @@ xfs_rmap_map(
|
|||
goto out_error;
|
||||
}
|
||||
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, ltrec.rm_startblock,
|
||||
cur->bc_ag.agno, ltrec.rm_startblock,
|
||||
ltrec.rm_blockcount, ltrec.rm_owner,
|
||||
ltrec.rm_offset, ltrec.rm_flags);
|
||||
|
||||
|
@ -831,7 +831,7 @@ xfs_rmap_map(
|
|||
goto out_error;
|
||||
}
|
||||
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, gtrec.rm_startblock,
|
||||
cur->bc_ag.agno, gtrec.rm_startblock,
|
||||
gtrec.rm_blockcount, gtrec.rm_owner,
|
||||
gtrec.rm_offset, gtrec.rm_flags);
|
||||
if (!xfs_rmap_is_mergeable(>rec, owner, flags))
|
||||
|
@ -870,7 +870,7 @@ xfs_rmap_map(
|
|||
* result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
|
||||
*/
|
||||
ltrec.rm_blockcount += gtrec.rm_blockcount;
|
||||
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
|
||||
gtrec.rm_startblock,
|
||||
gtrec.rm_blockcount,
|
||||
gtrec.rm_owner,
|
||||
|
@ -921,7 +921,7 @@ xfs_rmap_map(
|
|||
cur->bc_rec.r.rm_owner = owner;
|
||||
cur->bc_rec.r.rm_offset = offset;
|
||||
cur->bc_rec.r.rm_flags = flags;
|
||||
trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno, len,
|
||||
owner, offset, flags);
|
||||
error = xfs_btree_insert(cur, &i);
|
||||
if (error)
|
||||
|
@ -932,11 +932,11 @@ xfs_rmap_map(
|
|||
}
|
||||
}
|
||||
|
||||
trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_map_done(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
out_error:
|
||||
if (error)
|
||||
trace_xfs_rmap_map_error(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_map_error(mp, cur->bc_ag.agno,
|
||||
error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
@ -1010,7 +1010,7 @@ xfs_rmap_convert(
|
|||
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
|
||||
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
|
||||
new_endoff = offset + len;
|
||||
trace_xfs_rmap_convert(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_convert(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
|
||||
/*
|
||||
|
@ -1034,7 +1034,7 @@ xfs_rmap_convert(
|
|||
goto done;
|
||||
}
|
||||
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, PREV.rm_startblock,
|
||||
cur->bc_ag.agno, PREV.rm_startblock,
|
||||
PREV.rm_blockcount, PREV.rm_owner,
|
||||
PREV.rm_offset, PREV.rm_flags);
|
||||
|
||||
|
@ -1076,7 +1076,7 @@ xfs_rmap_convert(
|
|||
goto done;
|
||||
}
|
||||
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, LEFT.rm_startblock,
|
||||
cur->bc_ag.agno, LEFT.rm_startblock,
|
||||
LEFT.rm_blockcount, LEFT.rm_owner,
|
||||
LEFT.rm_offset, LEFT.rm_flags);
|
||||
if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
|
||||
|
@ -1114,7 +1114,7 @@ xfs_rmap_convert(
|
|||
goto done;
|
||||
}
|
||||
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, RIGHT.rm_startblock,
|
||||
cur->bc_ag.agno, RIGHT.rm_startblock,
|
||||
RIGHT.rm_blockcount, RIGHT.rm_owner,
|
||||
RIGHT.rm_offset, RIGHT.rm_flags);
|
||||
if (bno + len == RIGHT.rm_startblock &&
|
||||
|
@ -1132,7 +1132,7 @@ xfs_rmap_convert(
|
|||
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
|
||||
state &= ~RMAP_RIGHT_CONTIG;
|
||||
|
||||
trace_xfs_rmap_convert_state(mp, cur->bc_private.a.agno, state,
|
||||
trace_xfs_rmap_convert_state(mp, cur->bc_ag.agno, state,
|
||||
_RET_IP_);
|
||||
|
||||
/* reset the cursor back to PREV */
|
||||
|
@ -1162,7 +1162,7 @@ xfs_rmap_convert(
|
|||
error = -EFSCORRUPTED;
|
||||
goto done;
|
||||
}
|
||||
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
|
||||
RIGHT.rm_startblock, RIGHT.rm_blockcount,
|
||||
RIGHT.rm_owner, RIGHT.rm_offset,
|
||||
RIGHT.rm_flags);
|
||||
|
@ -1180,7 +1180,7 @@ xfs_rmap_convert(
|
|||
error = -EFSCORRUPTED;
|
||||
goto done;
|
||||
}
|
||||
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
|
||||
PREV.rm_startblock, PREV.rm_blockcount,
|
||||
PREV.rm_owner, PREV.rm_offset,
|
||||
PREV.rm_flags);
|
||||
|
@ -1210,7 +1210,7 @@ xfs_rmap_convert(
|
|||
* Setting all of a previous oldext extent to newext.
|
||||
* The left neighbor is contiguous, the right is not.
|
||||
*/
|
||||
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
|
||||
PREV.rm_startblock, PREV.rm_blockcount,
|
||||
PREV.rm_owner, PREV.rm_offset,
|
||||
PREV.rm_flags);
|
||||
|
@ -1247,7 +1247,7 @@ xfs_rmap_convert(
|
|||
error = -EFSCORRUPTED;
|
||||
goto done;
|
||||
}
|
||||
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_delete(mp, cur->bc_ag.agno,
|
||||
RIGHT.rm_startblock, RIGHT.rm_blockcount,
|
||||
RIGHT.rm_owner, RIGHT.rm_offset,
|
||||
RIGHT.rm_flags);
|
||||
|
@ -1326,7 +1326,7 @@ xfs_rmap_convert(
|
|||
NEW.rm_blockcount = len;
|
||||
NEW.rm_flags = newext;
|
||||
cur->bc_rec.r = NEW;
|
||||
trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
|
||||
trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno,
|
||||
len, owner, offset, newext);
|
||||
error = xfs_btree_insert(cur, &i);
|
||||
if (error)
|
||||
|
@ -1383,7 +1383,7 @@ xfs_rmap_convert(
|
|||
NEW.rm_blockcount = len;
|
||||
NEW.rm_flags = newext;
|
||||
cur->bc_rec.r = NEW;
|
||||
trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
|
||||
trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno,
|
||||
len, owner, offset, newext);
|
||||
error = xfs_btree_insert(cur, &i);
|
||||
if (error)
|
||||
|
@ -1414,7 +1414,7 @@ xfs_rmap_convert(
|
|||
NEW = PREV;
|
||||
NEW.rm_blockcount = offset - PREV.rm_offset;
|
||||
cur->bc_rec.r = NEW;
|
||||
trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmap_insert(mp, cur->bc_ag.agno,
|
||||
NEW.rm_startblock, NEW.rm_blockcount,
|
||||
NEW.rm_owner, NEW.rm_offset,
|
||||
NEW.rm_flags);
|
||||
|
@ -1441,7 +1441,7 @@ xfs_rmap_convert(
|
|||
/* new middle extent - newext */
|
||||
cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
|
||||
cur->bc_rec.r.rm_flags |= newext;
|
||||
trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_insert(mp, cur->bc_ag.agno, bno, len,
|
||||
owner, offset, newext);
|
||||
error = xfs_btree_insert(cur, &i);
|
||||
if (error)
|
||||
|
@ -1465,12 +1465,12 @@ xfs_rmap_convert(
|
|||
ASSERT(0);
|
||||
}
|
||||
|
||||
trace_xfs_rmap_convert_done(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_convert_done(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
done:
|
||||
if (error)
|
||||
trace_xfs_rmap_convert_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1506,7 +1506,7 @@ xfs_rmap_convert_shared(
|
|||
(flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
|
||||
oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
|
||||
new_endoff = offset + len;
|
||||
trace_xfs_rmap_convert(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_convert(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
|
||||
/*
|
||||
|
@ -1573,7 +1573,7 @@ xfs_rmap_convert_shared(
|
|||
goto done;
|
||||
}
|
||||
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, RIGHT.rm_startblock,
|
||||
cur->bc_ag.agno, RIGHT.rm_startblock,
|
||||
RIGHT.rm_blockcount, RIGHT.rm_owner,
|
||||
RIGHT.rm_offset, RIGHT.rm_flags);
|
||||
if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
|
||||
|
@ -1589,7 +1589,7 @@ xfs_rmap_convert_shared(
|
|||
RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
|
||||
state &= ~RMAP_RIGHT_CONTIG;
|
||||
|
||||
trace_xfs_rmap_convert_state(mp, cur->bc_private.a.agno, state,
|
||||
trace_xfs_rmap_convert_state(mp, cur->bc_ag.agno, state,
|
||||
_RET_IP_);
|
||||
/*
|
||||
* Switch out based on the FILLING and CONTIG state bits.
|
||||
|
@ -1880,12 +1880,12 @@ xfs_rmap_convert_shared(
|
|||
ASSERT(0);
|
||||
}
|
||||
|
||||
trace_xfs_rmap_convert_done(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_convert_done(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
done:
|
||||
if (error)
|
||||
trace_xfs_rmap_convert_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1923,7 +1923,7 @@ xfs_rmap_unmap_shared(
|
|||
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
|
||||
if (unwritten)
|
||||
flags |= XFS_RMAP_UNWRITTEN;
|
||||
trace_xfs_rmap_unmap(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_unmap(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
|
||||
/*
|
||||
|
@ -2072,12 +2072,12 @@ xfs_rmap_unmap_shared(
|
|||
goto out_error;
|
||||
}
|
||||
|
||||
trace_xfs_rmap_unmap_done(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_unmap_done(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
out_error:
|
||||
if (error)
|
||||
trace_xfs_rmap_unmap_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -2112,7 +2112,7 @@ xfs_rmap_map_shared(
|
|||
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
|
||||
if (unwritten)
|
||||
flags |= XFS_RMAP_UNWRITTEN;
|
||||
trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_map(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
|
||||
/* Is there a left record that abuts our range? */
|
||||
|
@ -2138,7 +2138,7 @@ xfs_rmap_map_shared(
|
|||
goto out_error;
|
||||
}
|
||||
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
|
||||
cur->bc_private.a.agno, gtrec.rm_startblock,
|
||||
cur->bc_ag.agno, gtrec.rm_startblock,
|
||||
gtrec.rm_blockcount, gtrec.rm_owner,
|
||||
gtrec.rm_offset, gtrec.rm_flags);
|
||||
|
||||
|
@ -2231,12 +2231,12 @@ xfs_rmap_map_shared(
|
|||
goto out_error;
|
||||
}
|
||||
|
||||
trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
|
||||
trace_xfs_rmap_map_done(mp, cur->bc_ag.agno, bno, len,
|
||||
unwritten, oinfo);
|
||||
out_error:
|
||||
if (error)
|
||||
trace_xfs_rmap_map_error(cur->bc_mp,
|
||||
cur->bc_private.a.agno, error, _RET_IP_);
|
||||
cur->bc_ag.agno, error, _RET_IP_);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -2336,7 +2336,7 @@ xfs_rmap_finish_one_cleanup(
|
|||
|
||||
if (rcur == NULL)
|
||||
return;
|
||||
agbp = rcur->bc_private.a.agbp;
|
||||
agbp = rcur->bc_ag.agbp;
|
||||
xfs_btree_del_cursor(rcur, error);
|
||||
if (error)
|
||||
xfs_trans_brelse(tp, agbp);
|
||||
|
@ -2386,7 +2386,7 @@ xfs_rmap_finish_one(
|
|||
* the startblock, get one now.
|
||||
*/
|
||||
rcur = *pcur;
|
||||
if (rcur != NULL && rcur->bc_private.a.agno != agno) {
|
||||
if (rcur != NULL && rcur->bc_ag.agno != agno) {
|
||||
xfs_rmap_finish_one_cleanup(tp, rcur, 0);
|
||||
rcur = NULL;
|
||||
*pcur = NULL;
|
||||
|
@ -2694,7 +2694,6 @@ struct xfs_rmap_key_state {
|
|||
uint64_t owner;
|
||||
uint64_t offset;
|
||||
unsigned int flags;
|
||||
bool has_rmap;
|
||||
};
|
||||
|
||||
/* For each rmap given, figure out if it doesn't match the key we want. */
|
||||
|
@ -2709,7 +2708,6 @@ xfs_rmap_has_other_keys_helper(
|
|||
if (rks->owner == rec->rm_owner && rks->offset == rec->rm_offset &&
|
||||
((rks->flags & rec->rm_flags) & XFS_RMAP_KEY_FLAGS) == rks->flags)
|
||||
return 0;
|
||||
rks->has_rmap = true;
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
|
@ -2731,7 +2729,7 @@ xfs_rmap_has_other_keys(
|
|||
int error;
|
||||
|
||||
xfs_owner_info_unpack(oinfo, &rks.owner, &rks.offset, &rks.flags);
|
||||
rks.has_rmap = false;
|
||||
*has_rmap = false;
|
||||
|
||||
low.rm_startblock = bno;
|
||||
memset(&high, 0xFF, sizeof(high));
|
||||
|
@ -2739,11 +2737,12 @@ xfs_rmap_has_other_keys(
|
|||
|
||||
error = xfs_rmap_query_range(cur, &low, &high,
|
||||
xfs_rmap_has_other_keys_helper, &rks);
|
||||
if (error < 0)
|
||||
return error;
|
||||
if (error == -ECANCELED) {
|
||||
*has_rmap = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*has_rmap = rks.has_rmap;
|
||||
return 0;
|
||||
return error;
|
||||
}
|
||||
|
||||
const struct xfs_owner_info XFS_RMAP_OINFO_SKIP_UPDATE = {
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "xfs_trans.h"
|
||||
#include "xfs_alloc.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_btree_staging.h"
|
||||
#include "xfs_rmap.h"
|
||||
#include "xfs_rmap_btree.h"
|
||||
#include "xfs_trace.h"
|
||||
|
@ -51,7 +52,7 @@ xfs_rmapbt_dup_cursor(
|
|||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
|
||||
cur->bc_private.a.agbp, cur->bc_private.a.agno);
|
||||
cur->bc_ag.agbp, cur->bc_ag.agno);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
|
@ -60,8 +61,8 @@ xfs_rmapbt_set_root(
|
|||
union xfs_btree_ptr *ptr,
|
||||
int inc)
|
||||
{
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
|
||||
int btnum = cur->bc_btnum;
|
||||
struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
|
||||
|
@ -83,25 +84,25 @@ xfs_rmapbt_alloc_block(
|
|||
union xfs_btree_ptr *new,
|
||||
int *stat)
|
||||
{
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
int error;
|
||||
xfs_agblock_t bno;
|
||||
|
||||
/* Allocate the new block from the freelist. If we can't, give up. */
|
||||
error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
|
||||
error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
|
||||
&bno, 1);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
|
||||
bno, 1);
|
||||
if (bno == NULLAGBLOCK) {
|
||||
*stat = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
|
||||
xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1,
|
||||
false);
|
||||
|
||||
xfs_trans_agbtree_delta(cur->bc_tp, 1);
|
||||
|
@ -109,7 +110,7 @@ xfs_rmapbt_alloc_block(
|
|||
be32_add_cpu(&agf->agf_rmap_blocks, 1);
|
||||
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
|
||||
|
||||
xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_private.a.agno);
|
||||
xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_ag.agno);
|
||||
|
||||
*stat = 1;
|
||||
return 0;
|
||||
|
@ -120,13 +121,13 @@ xfs_rmapbt_free_block(
|
|||
struct xfs_btree_cur *cur,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_buf *agbp = cur->bc_ag.agbp;
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
xfs_agblock_t bno;
|
||||
int error;
|
||||
|
||||
bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
|
||||
trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
|
||||
trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_ag.agno,
|
||||
bno, 1);
|
||||
be32_add_cpu(&agf->agf_rmap_blocks, -1);
|
||||
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
|
||||
|
@ -138,7 +139,7 @@ xfs_rmapbt_free_block(
|
|||
XFS_EXTENT_BUSY_SKIP_DISCARD);
|
||||
xfs_trans_agbtree_delta(cur->bc_tp, -1);
|
||||
|
||||
xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_private.a.agno);
|
||||
xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_ag.agno);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -215,9 +216,9 @@ xfs_rmapbt_init_ptr_from_cur(
|
|||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_ptr *ptr)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
|
||||
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
|
||||
|
||||
ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
|
||||
ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
|
||||
|
||||
ptr->s = agf->agf_roots[cur->bc_btnum];
|
||||
}
|
||||
|
@ -448,17 +449,12 @@ static const struct xfs_btree_ops xfs_rmapbt_ops = {
|
|||
.recs_inorder = xfs_rmapbt_recs_inorder,
|
||||
};
|
||||
|
||||
/*
|
||||
* Allocate a new allocation btree cursor.
|
||||
*/
|
||||
struct xfs_btree_cur *
|
||||
xfs_rmapbt_init_cursor(
|
||||
static struct xfs_btree_cur *
|
||||
xfs_rmapbt_init_common(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *agbp,
|
||||
xfs_agnumber_t agno)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
|
||||
|
@ -468,16 +464,67 @@ xfs_rmapbt_init_cursor(
|
|||
cur->bc_btnum = XFS_BTNUM_RMAP;
|
||||
cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
|
||||
cur->bc_blocklog = mp->m_sb.sb_blocklog;
|
||||
cur->bc_ops = &xfs_rmapbt_ops;
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
|
||||
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
|
||||
|
||||
cur->bc_private.a.agbp = agbp;
|
||||
cur->bc_private.a.agno = agno;
|
||||
cur->bc_ag.agno = agno;
|
||||
cur->bc_ops = &xfs_rmapbt_ops;
|
||||
|
||||
return cur;
|
||||
}
|
||||
|
||||
/* Create a new reverse mapping btree cursor. */
|
||||
struct xfs_btree_cur *
|
||||
xfs_rmapbt_init_cursor(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *agbp,
|
||||
xfs_agnumber_t agno)
|
||||
{
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
cur = xfs_rmapbt_init_common(mp, tp, agno);
|
||||
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
|
||||
cur->bc_ag.agbp = agbp;
|
||||
return cur;
|
||||
}
|
||||
|
||||
/* Create a new reverse mapping btree cursor with a fake root for staging. */
|
||||
struct xfs_btree_cur *
|
||||
xfs_rmapbt_stage_cursor(
|
||||
struct xfs_mount *mp,
|
||||
struct xbtree_afakeroot *afake,
|
||||
xfs_agnumber_t agno)
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
|
||||
cur = xfs_rmapbt_init_common(mp, NULL, agno);
|
||||
xfs_btree_stage_afakeroot(cur, afake);
|
||||
return cur;
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a new reverse mapping btree root. Caller is responsible for
|
||||
* invalidating and freeing the old btree blocks.
|
||||
*/
|
||||
void
|
||||
xfs_rmapbt_commit_staged_btree(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_buf *agbp)
|
||||
{
|
||||
struct xfs_agf *agf = agbp->b_addr;
|
||||
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
|
||||
|
||||
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
|
||||
|
||||
agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
|
||||
agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
|
||||
agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
|
||||
xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
|
||||
XFS_AGF_RMAP_BLOCKS);
|
||||
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate number of records in an rmap btree block.
|
||||
*/
|
||||
|
@ -569,7 +616,7 @@ xfs_rmapbt_calc_reserves(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
agf = XFS_BUF_TO_AGF(agbp);
|
||||
agf = agbp->b_addr;
|
||||
agblocks = be32_to_cpu(agf->agf_length);
|
||||
tree_len = be32_to_cpu(agf->agf_rmap_blocks);
|
||||
xfs_trans_brelse(tp, agbp);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
struct xfs_buf;
|
||||
struct xfs_btree_cur;
|
||||
struct xfs_mount;
|
||||
struct xbtree_afakeroot;
|
||||
|
||||
/* rmaps only exist on crc enabled filesystems */
|
||||
#define XFS_RMAP_BLOCK_LEN XFS_BTREE_SBLOCK_CRC_LEN
|
||||
|
@ -43,6 +44,10 @@ struct xfs_mount;
|
|||
struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
|
||||
struct xfs_trans *tp, struct xfs_buf *bp,
|
||||
xfs_agnumber_t agno);
|
||||
struct xfs_btree_cur *xfs_rmapbt_stage_cursor(struct xfs_mount *mp,
|
||||
struct xbtree_afakeroot *afake, xfs_agnumber_t agno);
|
||||
void xfs_rmapbt_commit_staged_btree(struct xfs_btree_cur *cur,
|
||||
struct xfs_trans *tp, struct xfs_buf *agbp);
|
||||
int xfs_rmapbt_maxrecs(int blocklen, int leaf);
|
||||
extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ xfs_validate_sb_common(
|
|||
struct xfs_buf *bp,
|
||||
struct xfs_sb *sbp)
|
||||
{
|
||||
struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
|
||||
struct xfs_dsb *dsb = bp->b_addr;
|
||||
uint32_t agcount = 0;
|
||||
uint32_t rem;
|
||||
|
||||
|
@ -681,7 +681,7 @@ xfs_sb_read_verify(
|
|||
{
|
||||
struct xfs_sb sb;
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
|
||||
struct xfs_dsb *dsb = bp->b_addr;
|
||||
int error;
|
||||
|
||||
/*
|
||||
|
@ -707,7 +707,7 @@ xfs_sb_read_verify(
|
|||
* Check all the superblock fields. Don't byteswap the xquota flags
|
||||
* because _verify_common checks the on-disk values.
|
||||
*/
|
||||
__xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
|
||||
__xfs_sb_from_disk(&sb, dsb, false);
|
||||
error = xfs_validate_sb_common(mp, bp, &sb);
|
||||
if (error)
|
||||
goto out_error;
|
||||
|
@ -730,7 +730,7 @@ static void
|
|||
xfs_sb_quiet_read_verify(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
|
||||
struct xfs_dsb *dsb = bp->b_addr;
|
||||
|
||||
if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
|
||||
/* XFS filesystem, verify noisily! */
|
||||
|
@ -748,13 +748,14 @@ xfs_sb_write_verify(
|
|||
struct xfs_sb sb;
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
struct xfs_buf_log_item *bip = bp->b_log_item;
|
||||
struct xfs_dsb *dsb = bp->b_addr;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Check all the superblock fields. Don't byteswap the xquota flags
|
||||
* because _verify_common checks the on-disk values.
|
||||
*/
|
||||
__xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
|
||||
__xfs_sb_from_disk(&sb, dsb, false);
|
||||
error = xfs_validate_sb_common(mp, bp, &sb);
|
||||
if (error)
|
||||
goto out_error;
|
||||
|
@ -766,7 +767,7 @@ xfs_sb_write_verify(
|
|||
return;
|
||||
|
||||
if (bip)
|
||||
XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
dsb->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
|
||||
|
||||
xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF);
|
||||
return;
|
||||
|
@ -927,7 +928,7 @@ xfs_log_sb(
|
|||
mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree);
|
||||
mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
|
||||
|
||||
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
|
||||
xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
|
||||
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
|
||||
xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1);
|
||||
}
|
||||
|
@ -1007,7 +1008,7 @@ xfs_update_secondary_sbs(
|
|||
bp->b_ops = &xfs_sb_buf_ops;
|
||||
xfs_buf_oneshot(bp);
|
||||
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
|
||||
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
|
||||
xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
|
||||
xfs_buf_delwri_queue(bp, &buffer_list);
|
||||
xfs_buf_relse(bp);
|
||||
|
||||
|
|
|
@ -187,7 +187,7 @@ xfs_calc_inode_chunk_res(
|
|||
XFS_FSB_TO_B(mp, 1));
|
||||
if (alloc) {
|
||||
/* icreate tx uses ordered buffers */
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb))
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb))
|
||||
return res;
|
||||
size = XFS_FSB_TO_B(mp, 1);
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ xchk_superblock(
|
|||
if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
|
||||
return error;
|
||||
|
||||
sb = XFS_BUF_TO_SBP(bp);
|
||||
sb = bp->b_addr;
|
||||
|
||||
/*
|
||||
* Verify the geometries match. Fields that are permanently
|
||||
|
@ -358,7 +358,7 @@ static inline void
|
|||
xchk_agf_xref_freeblks(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
|
||||
struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
|
||||
xfs_extlen_t blocks = 0;
|
||||
int error;
|
||||
|
||||
|
@ -378,7 +378,7 @@ static inline void
|
|||
xchk_agf_xref_cntbt(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
|
||||
struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
|
||||
xfs_agblock_t agbno;
|
||||
xfs_extlen_t blocks;
|
||||
int have;
|
||||
|
@ -410,7 +410,7 @@ STATIC void
|
|||
xchk_agf_xref_btreeblks(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
|
||||
struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
xfs_agblock_t blocks;
|
||||
xfs_agblock_t btreeblks;
|
||||
|
@ -456,7 +456,7 @@ static inline void
|
|||
xchk_agf_xref_refcblks(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
|
||||
struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
|
||||
xfs_agblock_t blocks;
|
||||
int error;
|
||||
|
||||
|
@ -525,7 +525,7 @@ xchk_agf(
|
|||
goto out;
|
||||
xchk_buffer_recheck(sc, sc->sa.agf_bp);
|
||||
|
||||
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
|
||||
agf = sc->sa.agf_bp->b_addr;
|
||||
|
||||
/* Check the AG length */
|
||||
eoag = be32_to_cpu(agf->agf_length);
|
||||
|
@ -711,7 +711,7 @@ xchk_agfl(
|
|||
goto out;
|
||||
|
||||
/* Allocate buffer to ensure uniqueness of AGFL entries. */
|
||||
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
|
||||
agf = sc->sa.agf_bp->b_addr;
|
||||
agflcount = be32_to_cpu(agf->agf_flcount);
|
||||
if (agflcount > xfs_agfl_size(sc->mp)) {
|
||||
xchk_block_set_corrupt(sc, sc->sa.agf_bp);
|
||||
|
@ -728,7 +728,7 @@ xchk_agfl(
|
|||
}
|
||||
|
||||
/* Check the blocks in the AGFL. */
|
||||
error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
|
||||
error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr,
|
||||
sc->sa.agfl_bp, xchk_agfl_block, &sai);
|
||||
if (error == -ECANCELED) {
|
||||
error = 0;
|
||||
|
@ -765,7 +765,7 @@ static inline void
|
|||
xchk_agi_xref_icounts(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
|
||||
struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
|
||||
xfs_agino_t icount;
|
||||
xfs_agino_t freecount;
|
||||
int error;
|
||||
|
@ -834,7 +834,7 @@ xchk_agi(
|
|||
goto out;
|
||||
xchk_buffer_recheck(sc, sc->sa.agi_bp);
|
||||
|
||||
agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
|
||||
agi = sc->sa.agi_bp->b_addr;
|
||||
|
||||
/* Check the AG length */
|
||||
eoag = be32_to_cpu(agi->agi_length);
|
||||
|
|
|
@ -49,7 +49,7 @@ xrep_superblock(
|
|||
|
||||
/* Copy AG 0's superblock to this one. */
|
||||
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
|
||||
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
|
||||
xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
|
||||
|
||||
/* Write this to disk. */
|
||||
xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF);
|
||||
|
@ -140,7 +140,7 @@ xrep_agf_find_btrees(
|
|||
struct xrep_find_ag_btree *fab,
|
||||
struct xfs_buf *agfl_bp)
|
||||
{
|
||||
struct xfs_agf *old_agf = XFS_BUF_TO_AGF(agf_bp);
|
||||
struct xfs_agf *old_agf = agf_bp->b_addr;
|
||||
int error;
|
||||
|
||||
/* Go find the root data. */
|
||||
|
@ -181,7 +181,7 @@ xrep_agf_init_header(
|
|||
struct xfs_agf *old_agf)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
|
||||
struct xfs_agf *agf = agf_bp->b_addr;
|
||||
|
||||
memcpy(old_agf, agf, sizeof(*old_agf));
|
||||
memset(agf, 0, BBTOB(agf_bp->b_length));
|
||||
|
@ -238,7 +238,7 @@ xrep_agf_calc_from_btrees(
|
|||
{
|
||||
struct xrep_agf_allocbt raa = { .sc = sc };
|
||||
struct xfs_btree_cur *cur = NULL;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
|
||||
struct xfs_agf *agf = agf_bp->b_addr;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
xfs_agblock_t btreeblks;
|
||||
xfs_agblock_t blocks;
|
||||
|
@ -302,7 +302,7 @@ xrep_agf_commit_new(
|
|||
struct xfs_buf *agf_bp)
|
||||
{
|
||||
struct xfs_perag *pag;
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
|
||||
struct xfs_agf *agf = agf_bp->b_addr;
|
||||
|
||||
/* Trigger fdblocks recalculation */
|
||||
xfs_force_summary_recalc(sc->mp);
|
||||
|
@ -376,7 +376,7 @@ xrep_agf(
|
|||
if (error)
|
||||
return error;
|
||||
agf_bp->b_ops = &xfs_agf_buf_ops;
|
||||
agf = XFS_BUF_TO_AGF(agf_bp);
|
||||
agf = agf_bp->b_addr;
|
||||
|
||||
/*
|
||||
* Load the AGFL so that we can screen out OWN_AG blocks that are on
|
||||
|
@ -395,7 +395,7 @@ xrep_agf(
|
|||
* Spot-check the AGFL blocks; if they're obviously corrupt then
|
||||
* there's nothing we can do but bail out.
|
||||
*/
|
||||
error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(agf_bp), agfl_bp,
|
||||
error = xfs_agfl_walk(sc->mp, agf_bp->b_addr, agfl_bp,
|
||||
xrep_agf_check_agfl_block, sc);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -429,10 +429,10 @@ xrep_agf(
|
|||
|
||||
struct xrep_agfl {
|
||||
/* Bitmap of other OWN_AG metadata blocks. */
|
||||
struct xfs_bitmap agmetablocks;
|
||||
struct xbitmap agmetablocks;
|
||||
|
||||
/* Bitmap of free space. */
|
||||
struct xfs_bitmap *freesp;
|
||||
struct xbitmap *freesp;
|
||||
|
||||
struct xfs_scrub *sc;
|
||||
};
|
||||
|
@ -453,14 +453,14 @@ xrep_agfl_walk_rmap(
|
|||
|
||||
/* Record all the OWN_AG blocks. */
|
||||
if (rec->rm_owner == XFS_RMAP_OWN_AG) {
|
||||
fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
|
||||
fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
|
||||
rec->rm_startblock);
|
||||
error = xfs_bitmap_set(ra->freesp, fsb, rec->rm_blockcount);
|
||||
error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
return xfs_bitmap_set_btcur_path(&ra->agmetablocks, cur);
|
||||
return xbitmap_set_btcur_path(&ra->agmetablocks, cur);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -476,19 +476,17 @@ STATIC int
|
|||
xrep_agfl_collect_blocks(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_buf *agf_bp,
|
||||
struct xfs_bitmap *agfl_extents,
|
||||
struct xbitmap *agfl_extents,
|
||||
xfs_agblock_t *flcount)
|
||||
{
|
||||
struct xrep_agfl ra;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_btree_cur *cur;
|
||||
struct xfs_bitmap_range *br;
|
||||
struct xfs_bitmap_range *n;
|
||||
int error;
|
||||
|
||||
ra.sc = sc;
|
||||
ra.freesp = agfl_extents;
|
||||
xfs_bitmap_init(&ra.agmetablocks);
|
||||
xbitmap_init(&ra.agmetablocks);
|
||||
|
||||
/* Find all space used by the free space btrees & rmapbt. */
|
||||
cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
|
||||
|
@ -500,7 +498,7 @@ xrep_agfl_collect_blocks(
|
|||
/* Find all blocks currently being used by the bnobt. */
|
||||
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
|
||||
XFS_BTNUM_BNO);
|
||||
error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
|
||||
error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
|
||||
if (error)
|
||||
goto err;
|
||||
xfs_btree_del_cursor(cur, error);
|
||||
|
@ -508,7 +506,7 @@ xrep_agfl_collect_blocks(
|
|||
/* Find all blocks currently being used by the cntbt. */
|
||||
cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
|
||||
XFS_BTNUM_CNT);
|
||||
error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
|
||||
error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
|
||||
if (error)
|
||||
goto err;
|
||||
|
||||
|
@ -518,8 +516,8 @@ xrep_agfl_collect_blocks(
|
|||
* Drop the freesp meta blocks that are in use by btrees.
|
||||
* The remaining blocks /should/ be AGFL blocks.
|
||||
*/
|
||||
error = xfs_bitmap_disunion(agfl_extents, &ra.agmetablocks);
|
||||
xfs_bitmap_destroy(&ra.agmetablocks);
|
||||
error = xbitmap_disunion(agfl_extents, &ra.agmetablocks);
|
||||
xbitmap_destroy(&ra.agmetablocks);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -527,18 +525,12 @@ xrep_agfl_collect_blocks(
|
|||
* Calculate the new AGFL size. If we found more blocks than fit in
|
||||
* the AGFL we'll free them later.
|
||||
*/
|
||||
*flcount = 0;
|
||||
for_each_xfs_bitmap_extent(br, n, agfl_extents) {
|
||||
*flcount += br->len;
|
||||
if (*flcount > xfs_agfl_size(mp))
|
||||
break;
|
||||
}
|
||||
if (*flcount > xfs_agfl_size(mp))
|
||||
*flcount = xfs_agfl_size(mp);
|
||||
*flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents),
|
||||
xfs_agfl_size(mp));
|
||||
return 0;
|
||||
|
||||
err:
|
||||
xfs_bitmap_destroy(&ra.agmetablocks);
|
||||
xbitmap_destroy(&ra.agmetablocks);
|
||||
xfs_btree_del_cursor(cur, error);
|
||||
return error;
|
||||
}
|
||||
|
@ -550,7 +542,7 @@ xrep_agfl_update_agf(
|
|||
struct xfs_buf *agf_bp,
|
||||
xfs_agblock_t flcount)
|
||||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
|
||||
struct xfs_agf *agf = agf_bp->b_addr;
|
||||
|
||||
ASSERT(flcount <= xfs_agfl_size(sc->mp));
|
||||
|
||||
|
@ -573,13 +565,13 @@ STATIC void
|
|||
xrep_agfl_init_header(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_buf *agfl_bp,
|
||||
struct xfs_bitmap *agfl_extents,
|
||||
struct xbitmap *agfl_extents,
|
||||
xfs_agblock_t flcount)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
__be32 *agfl_bno;
|
||||
struct xfs_bitmap_range *br;
|
||||
struct xfs_bitmap_range *n;
|
||||
struct xbitmap_range *br;
|
||||
struct xbitmap_range *n;
|
||||
struct xfs_agfl *agfl;
|
||||
xfs_agblock_t agbno;
|
||||
unsigned int fl_off;
|
||||
|
@ -602,8 +594,8 @@ xrep_agfl_init_header(
|
|||
* step.
|
||||
*/
|
||||
fl_off = 0;
|
||||
agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp);
|
||||
for_each_xfs_bitmap_extent(br, n, agfl_extents) {
|
||||
agfl_bno = xfs_buf_to_agfl_bno(agfl_bp);
|
||||
for_each_xbitmap_extent(br, n, agfl_extents) {
|
||||
agbno = XFS_FSB_TO_AGBNO(mp, br->start);
|
||||
|
||||
trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
|
||||
|
@ -637,7 +629,7 @@ int
|
|||
xrep_agfl(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_bitmap agfl_extents;
|
||||
struct xbitmap agfl_extents;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_buf *agf_bp;
|
||||
struct xfs_buf *agfl_bp;
|
||||
|
@ -649,7 +641,7 @@ xrep_agfl(
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
xchk_perag_get(sc->mp, &sc->sa);
|
||||
xfs_bitmap_init(&agfl_extents);
|
||||
xbitmap_init(&agfl_extents);
|
||||
|
||||
/*
|
||||
* Read the AGF so that we can query the rmapbt. We hope that there's
|
||||
|
@ -696,10 +688,10 @@ xrep_agfl(
|
|||
goto err;
|
||||
|
||||
/* Dump any AGFL overflow. */
|
||||
return xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
|
||||
error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
|
||||
XFS_AG_RESV_AGFL);
|
||||
err:
|
||||
xfs_bitmap_destroy(&agfl_extents);
|
||||
xbitmap_destroy(&agfl_extents);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -761,7 +753,7 @@ xrep_agi_init_header(
|
|||
struct xfs_buf *agi_bp,
|
||||
struct xfs_agi *old_agi)
|
||||
{
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
|
||||
struct xfs_agi *agi = agi_bp->b_addr;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
|
||||
memcpy(old_agi, agi, sizeof(*old_agi));
|
||||
|
@ -807,7 +799,7 @@ xrep_agi_calc_from_btrees(
|
|||
struct xfs_buf *agi_bp)
|
||||
{
|
||||
struct xfs_btree_cur *cur;
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
|
||||
struct xfs_agi *agi = agi_bp->b_addr;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
xfs_agino_t count;
|
||||
xfs_agino_t freecount;
|
||||
|
@ -835,7 +827,7 @@ xrep_agi_commit_new(
|
|||
struct xfs_buf *agi_bp)
|
||||
{
|
||||
struct xfs_perag *pag;
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
|
||||
struct xfs_agi *agi = agi_bp->b_addr;
|
||||
|
||||
/* Trigger inode count recalculation */
|
||||
xfs_force_summary_recalc(sc->mp);
|
||||
|
@ -892,7 +884,7 @@ xrep_agi(
|
|||
if (error)
|
||||
return error;
|
||||
agi_bp->b_ops = &xfs_agi_buf_ops;
|
||||
agi = XFS_BUF_TO_AGI(agi_bp);
|
||||
agi = agi_bp->b_addr;
|
||||
|
||||
/* Find the AGI btree roots. */
|
||||
error = xrep_agi_find_btrees(sc, fab);
|
||||
|
|
|
@ -94,7 +94,7 @@ xchk_allocbt_rec(
|
|||
union xfs_btree_rec *rec)
|
||||
{
|
||||
struct xfs_mount *mp = bs->cur->bc_mp;
|
||||
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = bs->cur->bc_ag.agno;
|
||||
xfs_agblock_t bno;
|
||||
xfs_extlen_t len;
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ struct xchk_xattr {
|
|||
/*
|
||||
* Check that an extended attribute key can be looked up by hash.
|
||||
*
|
||||
* We use the XFS attribute list iterator (i.e. xfs_attr_list_int_ilocked)
|
||||
* We use the XFS attribute list iterator (i.e. xfs_attr_list_ilocked)
|
||||
* to call this function for every attribute key in an inode. Once
|
||||
* we're here, we load the attribute value to see if any errors happen,
|
||||
* or if we get more or less data than we expected.
|
||||
|
@ -147,11 +147,8 @@ xchk_xattr_listent(
|
|||
return;
|
||||
}
|
||||
|
||||
args.flags = ATTR_KERNOTIME;
|
||||
if (flags & XFS_ATTR_ROOT)
|
||||
args.flags |= ATTR_ROOT;
|
||||
else if (flags & XFS_ATTR_SECURE)
|
||||
args.flags |= ATTR_SECURE;
|
||||
args.op_flags = XFS_DA_OP_NOTIME;
|
||||
args.attr_filter = flags & XFS_ATTR_NSP_ONDISK_MASK;
|
||||
args.geo = context->dp->i_mount->m_attr_geo;
|
||||
args.whichfork = XFS_ATTR_FORK;
|
||||
args.dp = context->dp;
|
||||
|
@ -162,7 +159,10 @@ xchk_xattr_listent(
|
|||
args.value = xchk_xattr_valuebuf(sx->sc);
|
||||
args.valuelen = valuelen;
|
||||
|
||||
error = xfs_attr_get_ilocked(context->dp, &args);
|
||||
error = xfs_attr_get_ilocked(&args);
|
||||
/* ENODATA means the hash lookup failed and the attr is bad */
|
||||
if (error == -ENODATA)
|
||||
error = -EFSCORRUPTED;
|
||||
if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
|
||||
&error))
|
||||
goto fail_xref;
|
||||
|
@ -474,7 +474,6 @@ xchk_xattr(
|
|||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xchk_xattr sx;
|
||||
struct attrlist_cursor_kern cursor = { 0 };
|
||||
xfs_dablk_t last_checked = -1U;
|
||||
int error = 0;
|
||||
|
||||
|
@ -493,11 +492,10 @@ xchk_xattr(
|
|||
|
||||
/* Check that every attr key can also be looked up by hash. */
|
||||
sx.context.dp = sc->ip;
|
||||
sx.context.cursor = &cursor;
|
||||
sx.context.resynch = 1;
|
||||
sx.context.put_listent = xchk_xattr_listent;
|
||||
sx.context.tp = sc->tp;
|
||||
sx.context.flags = ATTR_INCOMPLETE;
|
||||
sx.context.allow_incomplete = true;
|
||||
sx.sc = sc;
|
||||
|
||||
/*
|
||||
|
@ -516,7 +514,7 @@ xchk_xattr(
|
|||
* iteration, which doesn't really follow the usual buffer
|
||||
* locking order.
|
||||
*/
|
||||
error = xfs_attr_list_int_ilocked(&sx.context);
|
||||
error = xfs_attr_list_ilocked(&sx.context);
|
||||
if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -18,14 +18,14 @@
|
|||
* This is the logical equivalent of bitmap |= mask(start, len).
|
||||
*/
|
||||
int
|
||||
xfs_bitmap_set(
|
||||
struct xfs_bitmap *bitmap,
|
||||
xbitmap_set(
|
||||
struct xbitmap *bitmap,
|
||||
uint64_t start,
|
||||
uint64_t len)
|
||||
{
|
||||
struct xfs_bitmap_range *bmr;
|
||||
struct xbitmap_range *bmr;
|
||||
|
||||
bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
|
||||
bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
|
||||
if (!bmr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -39,13 +39,13 @@ xfs_bitmap_set(
|
|||
|
||||
/* Free everything related to this bitmap. */
|
||||
void
|
||||
xfs_bitmap_destroy(
|
||||
struct xfs_bitmap *bitmap)
|
||||
xbitmap_destroy(
|
||||
struct xbitmap *bitmap)
|
||||
{
|
||||
struct xfs_bitmap_range *bmr;
|
||||
struct xfs_bitmap_range *n;
|
||||
struct xbitmap_range *bmr;
|
||||
struct xbitmap_range *n;
|
||||
|
||||
for_each_xfs_bitmap_extent(bmr, n, bitmap) {
|
||||
for_each_xbitmap_extent(bmr, n, bitmap) {
|
||||
list_del(&bmr->list);
|
||||
kmem_free(bmr);
|
||||
}
|
||||
|
@ -53,24 +53,24 @@ xfs_bitmap_destroy(
|
|||
|
||||
/* Set up a per-AG block bitmap. */
|
||||
void
|
||||
xfs_bitmap_init(
|
||||
struct xfs_bitmap *bitmap)
|
||||
xbitmap_init(
|
||||
struct xbitmap *bitmap)
|
||||
{
|
||||
INIT_LIST_HEAD(&bitmap->list);
|
||||
}
|
||||
|
||||
/* Compare two btree extents. */
|
||||
static int
|
||||
xfs_bitmap_range_cmp(
|
||||
xbitmap_range_cmp(
|
||||
void *priv,
|
||||
struct list_head *a,
|
||||
struct list_head *b)
|
||||
{
|
||||
struct xfs_bitmap_range *ap;
|
||||
struct xfs_bitmap_range *bp;
|
||||
struct xbitmap_range *ap;
|
||||
struct xbitmap_range *bp;
|
||||
|
||||
ap = container_of(a, struct xfs_bitmap_range, list);
|
||||
bp = container_of(b, struct xfs_bitmap_range, list);
|
||||
ap = container_of(a, struct xbitmap_range, list);
|
||||
bp = container_of(b, struct xbitmap_range, list);
|
||||
|
||||
if (ap->start > bp->start)
|
||||
return 1;
|
||||
|
@ -96,14 +96,14 @@ xfs_bitmap_range_cmp(
|
|||
#define LEFT_ALIGNED (1 << 0)
|
||||
#define RIGHT_ALIGNED (1 << 1)
|
||||
int
|
||||
xfs_bitmap_disunion(
|
||||
struct xfs_bitmap *bitmap,
|
||||
struct xfs_bitmap *sub)
|
||||
xbitmap_disunion(
|
||||
struct xbitmap *bitmap,
|
||||
struct xbitmap *sub)
|
||||
{
|
||||
struct list_head *lp;
|
||||
struct xfs_bitmap_range *br;
|
||||
struct xfs_bitmap_range *new_br;
|
||||
struct xfs_bitmap_range *sub_br;
|
||||
struct xbitmap_range *br;
|
||||
struct xbitmap_range *new_br;
|
||||
struct xbitmap_range *sub_br;
|
||||
uint64_t sub_start;
|
||||
uint64_t sub_len;
|
||||
int state;
|
||||
|
@ -113,8 +113,8 @@ xfs_bitmap_disunion(
|
|||
return 0;
|
||||
ASSERT(!list_empty(&sub->list));
|
||||
|
||||
list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
|
||||
list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
|
||||
list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
|
||||
list_sort(NULL, &sub->list, xbitmap_range_cmp);
|
||||
|
||||
/*
|
||||
* Now that we've sorted both lists, we iterate bitmap once, rolling
|
||||
|
@ -124,11 +124,11 @@ xfs_bitmap_disunion(
|
|||
* list traversal is similar to merge sort, but we're deleting
|
||||
* instead. In this manner we avoid O(n^2) operations.
|
||||
*/
|
||||
sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
|
||||
sub_br = list_first_entry(&sub->list, struct xbitmap_range,
|
||||
list);
|
||||
lp = bitmap->list.next;
|
||||
while (lp != &bitmap->list) {
|
||||
br = list_entry(lp, struct xfs_bitmap_range, list);
|
||||
br = list_entry(lp, struct xbitmap_range, list);
|
||||
|
||||
/*
|
||||
* Advance sub_br and/or br until we find a pair that
|
||||
|
@ -181,7 +181,7 @@ xfs_bitmap_disunion(
|
|||
* Deleting from the middle: add the new right extent
|
||||
* and then shrink the left extent.
|
||||
*/
|
||||
new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
|
||||
new_br = kmem_alloc(sizeof(struct xbitmap_range),
|
||||
KM_MAYFAIL);
|
||||
if (!new_br) {
|
||||
error = -ENOMEM;
|
||||
|
@ -247,8 +247,8 @@ xfs_bitmap_disunion(
|
|||
* blocks going from the leaf towards the root.
|
||||
*/
|
||||
int
|
||||
xfs_bitmap_set_btcur_path(
|
||||
struct xfs_bitmap *bitmap,
|
||||
xbitmap_set_btcur_path(
|
||||
struct xbitmap *bitmap,
|
||||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
struct xfs_buf *bp;
|
||||
|
@ -261,7 +261,7 @@ xfs_bitmap_set_btcur_path(
|
|||
if (!bp)
|
||||
continue;
|
||||
fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
|
||||
error = xfs_bitmap_set(bitmap, fsb, 1);
|
||||
error = xbitmap_set(bitmap, fsb, 1);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
@ -271,12 +271,12 @@ xfs_bitmap_set_btcur_path(
|
|||
|
||||
/* Collect a btree's block in the bitmap. */
|
||||
STATIC int
|
||||
xfs_bitmap_collect_btblock(
|
||||
xbitmap_collect_btblock(
|
||||
struct xfs_btree_cur *cur,
|
||||
int level,
|
||||
void *priv)
|
||||
{
|
||||
struct xfs_bitmap *bitmap = priv;
|
||||
struct xbitmap *bitmap = priv;
|
||||
struct xfs_buf *bp;
|
||||
xfs_fsblock_t fsbno;
|
||||
|
||||
|
@ -285,15 +285,30 @@ xfs_bitmap_collect_btblock(
|
|||
return 0;
|
||||
|
||||
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
|
||||
return xfs_bitmap_set(bitmap, fsbno, 1);
|
||||
return xbitmap_set(bitmap, fsbno, 1);
|
||||
}
|
||||
|
||||
/* Walk the btree and mark the bitmap wherever a btree block is found. */
|
||||
int
|
||||
xfs_bitmap_set_btblocks(
|
||||
struct xfs_bitmap *bitmap,
|
||||
xbitmap_set_btblocks(
|
||||
struct xbitmap *bitmap,
|
||||
struct xfs_btree_cur *cur)
|
||||
{
|
||||
return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock,
|
||||
return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
|
||||
XFS_BTREE_VISIT_ALL, bitmap);
|
||||
}
|
||||
|
||||
/* How many bits are set in this bitmap? */
|
||||
uint64_t
|
||||
xbitmap_hweight(
|
||||
struct xbitmap *bitmap)
|
||||
{
|
||||
struct xbitmap_range *bmr;
|
||||
struct xbitmap_range *n;
|
||||
uint64_t ret = 0;
|
||||
|
||||
for_each_xbitmap_extent(bmr, n, bitmap)
|
||||
ret += bmr->len;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -6,31 +6,32 @@
|
|||
#ifndef __XFS_SCRUB_BITMAP_H__
|
||||
#define __XFS_SCRUB_BITMAP_H__
|
||||
|
||||
struct xfs_bitmap_range {
|
||||
struct xbitmap_range {
|
||||
struct list_head list;
|
||||
uint64_t start;
|
||||
uint64_t len;
|
||||
};
|
||||
|
||||
struct xfs_bitmap {
|
||||
struct xbitmap {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
void xfs_bitmap_init(struct xfs_bitmap *bitmap);
|
||||
void xfs_bitmap_destroy(struct xfs_bitmap *bitmap);
|
||||
void xbitmap_init(struct xbitmap *bitmap);
|
||||
void xbitmap_destroy(struct xbitmap *bitmap);
|
||||
|
||||
#define for_each_xfs_bitmap_extent(bex, n, bitmap) \
|
||||
#define for_each_xbitmap_extent(bex, n, bitmap) \
|
||||
list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
|
||||
|
||||
#define for_each_xfs_bitmap_block(b, bex, n, bitmap) \
|
||||
#define for_each_xbitmap_block(b, bex, n, bitmap) \
|
||||
list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
|
||||
for ((b) = bex->start; (b) < bex->start + bex->len; (b)++)
|
||||
for ((b) = (bex)->start; (b) < (bex)->start + (bex)->len; (b)++)
|
||||
|
||||
int xfs_bitmap_set(struct xfs_bitmap *bitmap, uint64_t start, uint64_t len);
|
||||
int xfs_bitmap_disunion(struct xfs_bitmap *bitmap, struct xfs_bitmap *sub);
|
||||
int xfs_bitmap_set_btcur_path(struct xfs_bitmap *bitmap,
|
||||
int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
|
||||
int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
|
||||
int xbitmap_set_btcur_path(struct xbitmap *bitmap,
|
||||
struct xfs_btree_cur *cur);
|
||||
int xfs_bitmap_set_btblocks(struct xfs_bitmap *bitmap,
|
||||
int xbitmap_set_btblocks(struct xbitmap *bitmap,
|
||||
struct xfs_btree_cur *cur);
|
||||
uint64_t xbitmap_hweight(struct xbitmap *bitmap);
|
||||
|
||||
#endif /* __XFS_SCRUB_BITMAP_H__ */
|
||||
|
|
|
@ -374,7 +374,7 @@ xchk_bmapbt_rec(
|
|||
struct xfs_bmbt_irec iext_irec;
|
||||
struct xfs_iext_cursor icur;
|
||||
struct xchk_bmap_info *info = bs->private;
|
||||
struct xfs_inode *ip = bs->cur->bc_private.b.ip;
|
||||
struct xfs_inode *ip = bs->cur->bc_ino.ip;
|
||||
struct xfs_buf *bp = NULL;
|
||||
struct xfs_btree_block *block;
|
||||
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, info->whichfork);
|
||||
|
@ -501,7 +501,7 @@ xchk_bmap_check_rmap(
|
|||
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
|
||||
rec->rm_offset);
|
||||
if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
|
||||
cur->bc_private.a.agno, rec->rm_startblock))
|
||||
cur->bc_ag.agno, rec->rm_startblock))
|
||||
xchk_fblock_set_corrupt(sc, sbcri->whichfork,
|
||||
rec->rm_offset);
|
||||
if (irec.br_blockcount > rec->rm_blockcount)
|
||||
|
|
|
@ -219,19 +219,21 @@ xchk_da_btree_block_check_sibling(
|
|||
int direction,
|
||||
xfs_dablk_t sibling)
|
||||
{
|
||||
struct xfs_da_state_path *path = &ds->state->path;
|
||||
struct xfs_da_state_path *altpath = &ds->state->altpath;
|
||||
int retval;
|
||||
int plevel;
|
||||
int error;
|
||||
|
||||
memcpy(&ds->state->altpath, &ds->state->path,
|
||||
sizeof(ds->state->altpath));
|
||||
memcpy(altpath, path, sizeof(ds->state->altpath));
|
||||
|
||||
/*
|
||||
* If the pointer is null, we shouldn't be able to move the upper
|
||||
* level pointer anywhere.
|
||||
*/
|
||||
if (sibling == 0) {
|
||||
error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
|
||||
direction, false, &retval);
|
||||
error = xfs_da3_path_shift(ds->state, altpath, direction,
|
||||
false, &retval);
|
||||
if (error == 0 && retval == 0)
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
error = 0;
|
||||
|
@ -239,27 +241,33 @@ xchk_da_btree_block_check_sibling(
|
|||
}
|
||||
|
||||
/* Move the alternate cursor one block in the direction given. */
|
||||
error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
|
||||
direction, false, &retval);
|
||||
error = xfs_da3_path_shift(ds->state, altpath, direction, false,
|
||||
&retval);
|
||||
if (!xchk_da_process_error(ds, level, &error))
|
||||
return error;
|
||||
goto out;
|
||||
if (retval) {
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
return error;
|
||||
goto out;
|
||||
}
|
||||
if (ds->state->altpath.blk[level].bp)
|
||||
xchk_buffer_recheck(ds->sc,
|
||||
ds->state->altpath.blk[level].bp);
|
||||
if (altpath->blk[level].bp)
|
||||
xchk_buffer_recheck(ds->sc, altpath->blk[level].bp);
|
||||
|
||||
/* Compare upper level pointer to sibling pointer. */
|
||||
if (ds->state->altpath.blk[level].blkno != sibling)
|
||||
if (altpath->blk[level].blkno != sibling)
|
||||
xchk_da_set_corrupt(ds, level);
|
||||
if (ds->state->altpath.blk[level].bp) {
|
||||
xfs_trans_brelse(ds->dargs.trans,
|
||||
ds->state->altpath.blk[level].bp);
|
||||
ds->state->altpath.blk[level].bp = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Free all buffers in the altpath that aren't referenced from path. */
|
||||
for (plevel = 0; plevel < altpath->active; plevel++) {
|
||||
if (altpath->blk[plevel].bp == NULL ||
|
||||
(plevel < path->active &&
|
||||
altpath->blk[plevel].bp == path->blk[plevel].bp))
|
||||
continue;
|
||||
|
||||
xfs_trans_brelse(ds->dargs.trans, altpath->blk[plevel].bp);
|
||||
altpath->blk[plevel].bp = NULL;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -155,6 +155,9 @@ xchk_dir_actor(
|
|||
xname.type = XFS_DIR3_FT_UNKNOWN;
|
||||
|
||||
error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
|
||||
/* ENOENT means the hash lookup failed and the dir is corrupt */
|
||||
if (error == -ENOENT)
|
||||
error = -EFSCORRUPTED;
|
||||
if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
|
||||
&error))
|
||||
goto out;
|
||||
|
@ -500,7 +503,7 @@ xchk_directory_leaf1_bestfree(
|
|||
/* Read the free space block. */
|
||||
error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, &bp);
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
|
||||
goto out;
|
||||
return error;
|
||||
xchk_buffer_recheck(sc, bp);
|
||||
|
||||
leaf = bp->b_addr;
|
||||
|
@ -565,9 +568,10 @@ xchk_directory_leaf1_bestfree(
|
|||
xchk_directory_check_freesp(sc, lblk, dbp, best);
|
||||
xfs_trans_brelse(sc->tp, dbp);
|
||||
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
out:
|
||||
xfs_trans_brelse(sc->tp, bp);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -589,7 +593,7 @@ xchk_directory_free_bestfree(
|
|||
/* Read the free space block */
|
||||
error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
|
||||
goto out;
|
||||
return error;
|
||||
xchk_buffer_recheck(sc, bp);
|
||||
|
||||
if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
|
||||
|
@ -612,7 +616,7 @@ xchk_directory_free_bestfree(
|
|||
0, &dbp);
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
|
||||
&error))
|
||||
break;
|
||||
goto out;
|
||||
xchk_directory_check_freesp(sc, lblk, dbp, best);
|
||||
xfs_trans_brelse(sc->tp, dbp);
|
||||
}
|
||||
|
@ -620,6 +624,7 @@ xchk_directory_free_bestfree(
|
|||
if (freehdr.nused + stale != freehdr.nvalid)
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
|
||||
out:
|
||||
xfs_trans_brelse(sc->tp, bp);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ xchk_iallocbt_chunk(
|
|||
xfs_extlen_t len)
|
||||
{
|
||||
struct xfs_mount *mp = bs->cur->bc_mp;
|
||||
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = bs->cur->bc_ag.agno;
|
||||
xfs_agblock_t bno;
|
||||
|
||||
bno = XFS_AGINO_TO_AGBNO(mp, agino);
|
||||
|
@ -164,7 +164,7 @@ xchk_iallocbt_check_cluster_ifree(
|
|||
* the record, compute which fs inode we're talking about.
|
||||
*/
|
||||
agino = irec->ir_startino + irec_ino;
|
||||
fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
|
||||
fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.agno, agino);
|
||||
irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
|
||||
|
||||
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
|
||||
|
@ -215,7 +215,7 @@ xchk_iallocbt_check_cluster(
|
|||
struct xfs_dinode *dip;
|
||||
struct xfs_buf *cluster_bp;
|
||||
unsigned int nr_inodes;
|
||||
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = bs->cur->bc_ag.agno;
|
||||
xfs_agblock_t agbno;
|
||||
unsigned int cluster_index;
|
||||
uint16_t cluster_mask = 0;
|
||||
|
@ -426,7 +426,7 @@ xchk_iallocbt_rec(
|
|||
struct xchk_iallocbt *iabt = bs->private;
|
||||
struct xfs_inobt_rec_incore irec;
|
||||
uint64_t holes;
|
||||
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = bs->cur->bc_ag.agno;
|
||||
xfs_agino_t agino;
|
||||
xfs_extlen_t len;
|
||||
int holecount;
|
||||
|
|
|
@ -336,7 +336,7 @@ xchk_refcountbt_rec(
|
|||
{
|
||||
struct xfs_mount *mp = bs->cur->bc_mp;
|
||||
xfs_agblock_t *cow_blocks = bs->private;
|
||||
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = bs->cur->bc_ag.agno;
|
||||
xfs_agblock_t bno;
|
||||
xfs_extlen_t len;
|
||||
xfs_nlink_t refcount;
|
||||
|
|
|
@ -208,8 +208,10 @@ xrep_calc_ag_resblks(
|
|||
/* Now grab the block counters from the AGF. */
|
||||
error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp);
|
||||
if (!error) {
|
||||
aglen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_length);
|
||||
freelen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_freeblks);
|
||||
struct xfs_agf *agf = bp->b_addr;
|
||||
|
||||
aglen = be32_to_cpu(agf->agf_length);
|
||||
freelen = be32_to_cpu(agf->agf_freeblks);
|
||||
usedlen = aglen - freelen;
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
|
@ -434,10 +436,10 @@ xrep_init_btblock(
|
|||
int
|
||||
xrep_invalidate_blocks(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_bitmap *bitmap)
|
||||
struct xbitmap *bitmap)
|
||||
{
|
||||
struct xfs_bitmap_range *bmr;
|
||||
struct xfs_bitmap_range *n;
|
||||
struct xbitmap_range *bmr;
|
||||
struct xbitmap_range *n;
|
||||
struct xfs_buf *bp;
|
||||
xfs_fsblock_t fsbno;
|
||||
|
||||
|
@ -449,7 +451,7 @@ xrep_invalidate_blocks(
|
|||
* because we never own those; and if we can't TRYLOCK the buffer we
|
||||
* assume it's owned by someone else.
|
||||
*/
|
||||
for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
|
||||
for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
|
||||
/* Skip AG headers and post-EOFS blocks */
|
||||
if (!xfs_verify_fsbno(sc->mp, fsbno))
|
||||
continue;
|
||||
|
@ -595,18 +597,18 @@ xrep_reap_block(
|
|||
int
|
||||
xrep_reap_extents(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_bitmap *bitmap,
|
||||
struct xbitmap *bitmap,
|
||||
const struct xfs_owner_info *oinfo,
|
||||
enum xfs_ag_resv_type type)
|
||||
{
|
||||
struct xfs_bitmap_range *bmr;
|
||||
struct xfs_bitmap_range *n;
|
||||
struct xbitmap_range *bmr;
|
||||
struct xbitmap_range *n;
|
||||
xfs_fsblock_t fsbno;
|
||||
int error = 0;
|
||||
|
||||
ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
|
||||
|
||||
for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
|
||||
for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
|
||||
ASSERT(sc->ip != NULL ||
|
||||
XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
|
||||
trace_xrep_dispose_btree_extent(sc->mp,
|
||||
|
@ -615,11 +617,9 @@ xrep_reap_extents(
|
|||
|
||||
error = xrep_reap_block(sc, fsbno, oinfo, type);
|
||||
if (error)
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
xfs_bitmap_destroy(bitmap);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -879,7 +879,7 @@ xrep_find_ag_btree_roots(
|
|||
|
||||
ri.sc = sc;
|
||||
ri.btree_info = btree_info;
|
||||
ri.agf = XFS_BUF_TO_AGF(agf_bp);
|
||||
ri.agf = agf_bp->b_addr;
|
||||
ri.agfl_bp = agfl_bp;
|
||||
for (fab = btree_info; fab->buf_ops; fab++) {
|
||||
ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
|
||||
|
|
|
@ -28,11 +28,11 @@ int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb,
|
|||
struct xfs_buf **bpp, xfs_btnum_t btnum,
|
||||
const struct xfs_buf_ops *ops);
|
||||
|
||||
struct xfs_bitmap;
|
||||
struct xbitmap;
|
||||
|
||||
int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
|
||||
int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xfs_bitmap *btlist);
|
||||
int xrep_reap_extents(struct xfs_scrub *sc, struct xfs_bitmap *exlist,
|
||||
int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xbitmap *btlist);
|
||||
int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *exlist,
|
||||
const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
|
||||
|
||||
struct xrep_find_ag_btree {
|
||||
|
|
|
@ -92,7 +92,7 @@ xchk_rmapbt_rec(
|
|||
{
|
||||
struct xfs_mount *mp = bs->cur->bc_mp;
|
||||
struct xfs_rmap_irec irec;
|
||||
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
|
||||
xfs_agnumber_t agno = bs->cur->bc_ag.agno;
|
||||
bool non_inode;
|
||||
bool is_unwritten;
|
||||
bool is_bmbt;
|
||||
|
|
|
@ -168,6 +168,7 @@ xchk_teardown(
|
|||
xfs_irele(sc->ip);
|
||||
sc->ip = NULL;
|
||||
}
|
||||
sb_end_write(sc->mp->m_super);
|
||||
if (sc->flags & XCHK_REAPING_DISABLED)
|
||||
xchk_start_reaping(sc);
|
||||
if (sc->flags & XCHK_HAS_QUOTAOFFLOCK) {
|
||||
|
@ -490,6 +491,14 @@ xfs_scrub_metadata(
|
|||
sc.ops = &meta_scrub_ops[sm->sm_type];
|
||||
sc.sick_mask = xchk_health_mask_for_scrub_type(sm->sm_type);
|
||||
retry_op:
|
||||
/*
|
||||
* If freeze runs concurrently with a scrub, the freeze can be delayed
|
||||
* indefinitely as we walk the filesystem and iterate over metadata
|
||||
* buffers. Freeze quiesces the log (which waits for the buffer LRU to
|
||||
* be emptied) and that won't happen while checking is running.
|
||||
*/
|
||||
sb_start_write(mp->m_super);
|
||||
|
||||
/* Set up for the operation. */
|
||||
error = sc.ops->setup(&sc, ip);
|
||||
if (error)
|
||||
|
|
|
@ -24,9 +24,9 @@ xchk_btree_cur_fsbno(
|
|||
return XFS_DADDR_TO_FSB(cur->bc_mp, cur->bc_bufs[level]->b_bn);
|
||||
else if (level == cur->bc_nlevels - 1 &&
|
||||
cur->bc_flags & XFS_BTREE_LONG_PTRS)
|
||||
return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_private.b.ip->i_ino);
|
||||
return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino);
|
||||
else if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS))
|
||||
return XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno, 0);
|
||||
return XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno, 0);
|
||||
return NULLFSBLOCK;
|
||||
}
|
||||
|
||||
|
|
|
@ -379,7 +379,7 @@ TRACE_EVENT(xchk_ifork_btree_op_error,
|
|||
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
|
||||
__entry->dev = sc->mp->m_super->s_dev;
|
||||
__entry->ino = sc->ip->i_ino;
|
||||
__entry->whichfork = cur->bc_private.b.whichfork;
|
||||
__entry->whichfork = cur->bc_ino.whichfork;
|
||||
__entry->type = sc->sm->sm_type;
|
||||
__entry->btnum = cur->bc_btnum;
|
||||
__entry->level = level;
|
||||
|
@ -459,7 +459,7 @@ TRACE_EVENT(xchk_ifork_btree_error,
|
|||
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
|
||||
__entry->dev = sc->mp->m_super->s_dev;
|
||||
__entry->ino = sc->ip->i_ino;
|
||||
__entry->whichfork = cur->bc_private.b.whichfork;
|
||||
__entry->whichfork = cur->bc_ino.whichfork;
|
||||
__entry->type = sc->sm->sm_type;
|
||||
__entry->btnum = cur->bc_btnum;
|
||||
__entry->level = level;
|
||||
|
|
132
fs/xfs/xfs_acl.c
132
fs/xfs/xfs_acl.c
|
@ -14,6 +14,8 @@
|
|||
#include "xfs_trace.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_da_btree.h"
|
||||
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
|
||||
|
@ -67,10 +69,12 @@ xfs_acl_from_disk(
|
|||
|
||||
switch (acl_e->e_tag) {
|
||||
case ACL_USER:
|
||||
acl_e->e_uid = xfs_uid_to_kuid(be32_to_cpu(ace->ae_id));
|
||||
acl_e->e_uid = make_kuid(&init_user_ns,
|
||||
be32_to_cpu(ace->ae_id));
|
||||
break;
|
||||
case ACL_GROUP:
|
||||
acl_e->e_gid = xfs_gid_to_kgid(be32_to_cpu(ace->ae_id));
|
||||
acl_e->e_gid = make_kgid(&init_user_ns,
|
||||
be32_to_cpu(ace->ae_id));
|
||||
break;
|
||||
case ACL_USER_OBJ:
|
||||
case ACL_GROUP_OBJ:
|
||||
|
@ -103,10 +107,12 @@ xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
|
|||
ace->ae_tag = cpu_to_be32(acl_e->e_tag);
|
||||
switch (acl_e->e_tag) {
|
||||
case ACL_USER:
|
||||
ace->ae_id = cpu_to_be32(xfs_kuid_to_uid(acl_e->e_uid));
|
||||
ace->ae_id = cpu_to_be32(
|
||||
from_kuid(&init_user_ns, acl_e->e_uid));
|
||||
break;
|
||||
case ACL_GROUP:
|
||||
ace->ae_id = cpu_to_be32(xfs_kgid_to_gid(acl_e->e_gid));
|
||||
ace->ae_id = cpu_to_be32(
|
||||
from_kgid(&init_user_ns, acl_e->e_gid));
|
||||
break;
|
||||
default:
|
||||
ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
|
||||
|
@ -120,102 +126,86 @@ xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
|
|||
struct posix_acl *
|
||||
xfs_get_acl(struct inode *inode, int type)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct posix_acl *acl = NULL;
|
||||
struct xfs_acl *xfs_acl = NULL;
|
||||
unsigned char *ea_name;
|
||||
int error;
|
||||
int len;
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct posix_acl *acl = NULL;
|
||||
struct xfs_da_args args = {
|
||||
.dp = ip,
|
||||
.attr_filter = XFS_ATTR_ROOT,
|
||||
.valuelen = XFS_ACL_MAX_SIZE(mp),
|
||||
};
|
||||
int error;
|
||||
|
||||
trace_xfs_get_acl(ip);
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
ea_name = SGI_ACL_FILE;
|
||||
args.name = SGI_ACL_FILE;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
ea_name = SGI_ACL_DEFAULT;
|
||||
args.name = SGI_ACL_DEFAULT;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
args.namelen = strlen(args.name);
|
||||
|
||||
/*
|
||||
* If we have a cached ACLs value just return it, not need to
|
||||
* go out to the disk.
|
||||
* If the attribute doesn't exist make sure we have a negative cache
|
||||
* entry, for any other error assume it is transient.
|
||||
*/
|
||||
len = XFS_ACL_MAX_SIZE(ip->i_mount);
|
||||
error = xfs_attr_get(ip, ea_name, strlen(ea_name),
|
||||
(unsigned char **)&xfs_acl, &len,
|
||||
ATTR_ALLOC | ATTR_ROOT);
|
||||
if (error) {
|
||||
/*
|
||||
* If the attribute doesn't exist make sure we have a negative
|
||||
* cache entry, for any other error assume it is transient.
|
||||
*/
|
||||
if (error != -ENOATTR)
|
||||
acl = ERR_PTR(error);
|
||||
} else {
|
||||
acl = xfs_acl_from_disk(ip->i_mount, xfs_acl, len,
|
||||
XFS_ACL_MAX_ENTRIES(ip->i_mount));
|
||||
kmem_free(xfs_acl);
|
||||
error = xfs_attr_get(&args);
|
||||
if (!error) {
|
||||
acl = xfs_acl_from_disk(mp, args.value, args.valuelen,
|
||||
XFS_ACL_MAX_ENTRIES(mp));
|
||||
} else if (error != -ENOATTR) {
|
||||
acl = ERR_PTR(error);
|
||||
}
|
||||
|
||||
kmem_free(args.value);
|
||||
return acl;
|
||||
}
|
||||
|
||||
int
|
||||
__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
unsigned char *ea_name;
|
||||
int error;
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_da_args args = {
|
||||
.dp = ip,
|
||||
.attr_filter = XFS_ATTR_ROOT,
|
||||
};
|
||||
int error;
|
||||
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
ea_name = SGI_ACL_FILE;
|
||||
args.name = SGI_ACL_FILE;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
return acl ? -EACCES : 0;
|
||||
ea_name = SGI_ACL_DEFAULT;
|
||||
args.name = SGI_ACL_DEFAULT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
args.namelen = strlen(args.name);
|
||||
|
||||
if (acl) {
|
||||
struct xfs_acl *xfs_acl;
|
||||
int len = XFS_ACL_MAX_SIZE(ip->i_mount);
|
||||
|
||||
xfs_acl = kmem_zalloc_large(len, 0);
|
||||
if (!xfs_acl)
|
||||
args.valuelen = XFS_ACL_SIZE(acl->a_count);
|
||||
args.value = kmem_zalloc_large(args.valuelen, 0);
|
||||
if (!args.value)
|
||||
return -ENOMEM;
|
||||
|
||||
xfs_acl_to_disk(xfs_acl, acl);
|
||||
|
||||
/* subtract away the unused acl entries */
|
||||
len -= sizeof(struct xfs_acl_entry) *
|
||||
(XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
|
||||
|
||||
error = xfs_attr_set(ip, ea_name, strlen(ea_name),
|
||||
(unsigned char *)xfs_acl, len, ATTR_ROOT);
|
||||
|
||||
kmem_free(xfs_acl);
|
||||
} else {
|
||||
/*
|
||||
* A NULL ACL argument means we want to remove the ACL.
|
||||
*/
|
||||
error = xfs_attr_remove(ip, ea_name,
|
||||
strlen(ea_name),
|
||||
ATTR_ROOT);
|
||||
|
||||
/*
|
||||
* If the attribute didn't exist to start with that's fine.
|
||||
*/
|
||||
if (error == -ENOATTR)
|
||||
error = 0;
|
||||
xfs_acl_to_disk(args.value, acl);
|
||||
}
|
||||
|
||||
error = xfs_attr_set(&args);
|
||||
kmem_free(args.value);
|
||||
|
||||
/*
|
||||
* If the attribute didn't exist to start with that's fine.
|
||||
*/
|
||||
if (!acl && error == -ENOATTR)
|
||||
error = 0;
|
||||
if (!error)
|
||||
set_cached_acl(inode, type, acl);
|
||||
return error;
|
||||
|
@ -275,3 +265,19 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
|||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate any cached ACLs if the user has bypassed the ACL interface.
|
||||
* We don't validate the content whatsoever so it is caller responsibility to
|
||||
* provide data in valid format and ensure i_mode is consistent.
|
||||
*/
|
||||
void
|
||||
xfs_forget_acl(
|
||||
struct inode *inode,
|
||||
const char *name)
|
||||
{
|
||||
if (!strcmp(name, SGI_ACL_FILE))
|
||||
forget_cached_acl(inode, ACL_TYPE_ACCESS);
|
||||
else if (!strcmp(name, SGI_ACL_DEFAULT))
|
||||
forget_cached_acl(inode, ACL_TYPE_DEFAULT);
|
||||
}
|
||||
|
|
|
@ -13,14 +13,16 @@ struct posix_acl;
|
|||
extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
|
||||
extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
extern int __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
void xfs_forget_acl(struct inode *inode, const char *name);
|
||||
#else
|
||||
static inline struct posix_acl *xfs_get_acl(struct inode *inode, int type)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
# define xfs_set_acl NULL
|
||||
static inline void xfs_forget_acl(struct inode *inode, const char *name)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_XFS_POSIX_ACL */
|
||||
|
||||
extern void xfs_forget_acl(struct inode *inode, const char *name, int xflags);
|
||||
|
||||
#endif /* __XFS_ACL_H__ */
|
||||
|
|
|
@ -539,7 +539,7 @@ xfs_discard_page(
|
|||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
goto out_invalidate;
|
||||
|
||||
xfs_alert(mp,
|
||||
xfs_alert_ratelimited(mp,
|
||||
"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
|
||||
page, ip->i_ino, offset);
|
||||
|
||||
|
|
|
@ -145,8 +145,8 @@ xfs_attr3_node_inactive(
|
|||
* Since this code is recursive (gasp!) we must protect ourselves.
|
||||
*/
|
||||
if (level > XFS_DA_NODE_MAXDEPTH) {
|
||||
xfs_buf_mark_corrupt(bp);
|
||||
xfs_trans_brelse(*trans, bp); /* no locks for later trans */
|
||||
xfs_buf_corruption_error(bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -194,7 +194,7 @@ xfs_attr3_node_inactive(
|
|||
error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
|
||||
break;
|
||||
default:
|
||||
xfs_buf_corruption_error(child_bp);
|
||||
xfs_buf_mark_corrupt(child_bp);
|
||||
xfs_trans_brelse(*trans, child_bp);
|
||||
error = -EFSCORRUPTED;
|
||||
break;
|
||||
|
@ -289,7 +289,7 @@ xfs_attr3_root_inactive(
|
|||
break;
|
||||
default:
|
||||
error = -EFSCORRUPTED;
|
||||
xfs_buf_corruption_error(bp);
|
||||
xfs_buf_mark_corrupt(bp);
|
||||
xfs_trans_brelse(*trans, bp);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -52,24 +52,19 @@ static int
|
|||
xfs_attr_shortform_list(
|
||||
struct xfs_attr_list_context *context)
|
||||
{
|
||||
struct attrlist_cursor_kern *cursor;
|
||||
struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
|
||||
struct xfs_inode *dp = context->dp;
|
||||
struct xfs_attr_sf_sort *sbuf, *sbp;
|
||||
struct xfs_attr_shortform *sf;
|
||||
struct xfs_attr_sf_entry *sfe;
|
||||
struct xfs_inode *dp;
|
||||
int sbsize, nsbuf, count, i;
|
||||
int error = 0;
|
||||
|
||||
ASSERT(context != NULL);
|
||||
dp = context->dp;
|
||||
ASSERT(dp != NULL);
|
||||
ASSERT(dp->i_afp != NULL);
|
||||
sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
|
||||
ASSERT(sf != NULL);
|
||||
if (!sf->hdr.count)
|
||||
return 0;
|
||||
cursor = context->cursor;
|
||||
ASSERT(cursor != NULL);
|
||||
|
||||
trace_xfs_attr_list_sf(context);
|
||||
|
||||
|
@ -205,7 +200,7 @@ xfs_attr_shortform_list(
|
|||
STATIC int
|
||||
xfs_attr_node_list_lookup(
|
||||
struct xfs_attr_list_context *context,
|
||||
struct attrlist_cursor_kern *cursor,
|
||||
struct xfs_attrlist_cursor_kern *cursor,
|
||||
struct xfs_buf **pbp)
|
||||
{
|
||||
struct xfs_da3_icnode_hdr nodehdr;
|
||||
|
@ -279,7 +274,7 @@ xfs_attr_node_list_lookup(
|
|||
return 0;
|
||||
|
||||
out_corruptbuf:
|
||||
xfs_buf_corruption_error(bp);
|
||||
xfs_buf_mark_corrupt(bp);
|
||||
xfs_trans_brelse(tp, bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
@ -288,8 +283,8 @@ STATIC int
|
|||
xfs_attr_node_list(
|
||||
struct xfs_attr_list_context *context)
|
||||
{
|
||||
struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
|
||||
struct xfs_attr3_icleaf_hdr leafhdr;
|
||||
struct attrlist_cursor_kern *cursor;
|
||||
struct xfs_attr_leafblock *leaf;
|
||||
struct xfs_da_intnode *node;
|
||||
struct xfs_buf *bp;
|
||||
|
@ -299,7 +294,6 @@ xfs_attr_node_list(
|
|||
|
||||
trace_xfs_attr_node_list(context);
|
||||
|
||||
cursor = context->cursor;
|
||||
cursor->initted = 1;
|
||||
|
||||
/*
|
||||
|
@ -394,7 +388,7 @@ xfs_attr3_leaf_list_int(
|
|||
struct xfs_buf *bp,
|
||||
struct xfs_attr_list_context *context)
|
||||
{
|
||||
struct attrlist_cursor_kern *cursor;
|
||||
struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
|
||||
struct xfs_attr_leafblock *leaf;
|
||||
struct xfs_attr3_icleaf_hdr ichdr;
|
||||
struct xfs_attr_leaf_entry *entries;
|
||||
|
@ -408,7 +402,6 @@ xfs_attr3_leaf_list_int(
|
|||
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
|
||||
entries = xfs_attr3_leaf_entryp(leaf);
|
||||
|
||||
cursor = context->cursor;
|
||||
cursor->initted = 1;
|
||||
|
||||
/*
|
||||
|
@ -452,8 +445,8 @@ xfs_attr3_leaf_list_int(
|
|||
}
|
||||
|
||||
if ((entry->flags & XFS_ATTR_INCOMPLETE) &&
|
||||
!(context->flags & ATTR_INCOMPLETE))
|
||||
continue; /* skip incomplete entries */
|
||||
!context->allow_incomplete)
|
||||
continue;
|
||||
|
||||
if (entry->flags & XFS_ATTR_LOCAL) {
|
||||
xfs_attr_leaf_name_local_t *name_loc;
|
||||
|
@ -488,14 +481,15 @@ xfs_attr3_leaf_list_int(
|
|||
* Copy out attribute entries for attr_list(), for leaf attribute lists.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_attr_leaf_list(xfs_attr_list_context_t *context)
|
||||
xfs_attr_leaf_list(
|
||||
struct xfs_attr_list_context *context)
|
||||
{
|
||||
int error;
|
||||
struct xfs_buf *bp;
|
||||
struct xfs_buf *bp;
|
||||
int error;
|
||||
|
||||
trace_xfs_attr_leaf_list(context);
|
||||
|
||||
context->cursor->blkno = 0;
|
||||
context->cursor.blkno = 0;
|
||||
error = xfs_attr3_leaf_read(context->tp, context->dp, 0, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -506,7 +500,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
|
|||
}
|
||||
|
||||
int
|
||||
xfs_attr_list_int_ilocked(
|
||||
xfs_attr_list_ilocked(
|
||||
struct xfs_attr_list_context *context)
|
||||
{
|
||||
struct xfs_inode *dp = context->dp;
|
||||
|
@ -526,12 +520,12 @@ xfs_attr_list_int_ilocked(
|
|||
}
|
||||
|
||||
int
|
||||
xfs_attr_list_int(
|
||||
xfs_attr_list_context_t *context)
|
||||
xfs_attr_list(
|
||||
struct xfs_attr_list_context *context)
|
||||
{
|
||||
int error;
|
||||
xfs_inode_t *dp = context->dp;
|
||||
uint lock_mode;
|
||||
struct xfs_inode *dp = context->dp;
|
||||
uint lock_mode;
|
||||
int error;
|
||||
|
||||
XFS_STATS_INC(dp->i_mount, xs_attr_list);
|
||||
|
||||
|
@ -539,130 +533,7 @@ xfs_attr_list_int(
|
|||
return -EIO;
|
||||
|
||||
lock_mode = xfs_ilock_attr_map_shared(dp);
|
||||
error = xfs_attr_list_int_ilocked(context);
|
||||
error = xfs_attr_list_ilocked(context);
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
return error;
|
||||
}
|
||||
|
||||
#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
|
||||
(((struct attrlist_ent *) 0)->a_name - (char *) 0)
|
||||
#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
|
||||
((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(uint32_t)-1) \
|
||||
& ~(sizeof(uint32_t)-1))
|
||||
|
||||
/*
|
||||
* Format an attribute and copy it out to the user's buffer.
|
||||
* Take care to check values and protect against them changing later,
|
||||
* we may be reading them directly out of a user buffer.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_attr_put_listent(
|
||||
xfs_attr_list_context_t *context,
|
||||
int flags,
|
||||
unsigned char *name,
|
||||
int namelen,
|
||||
int valuelen)
|
||||
{
|
||||
struct attrlist *alist = (struct attrlist *)context->alist;
|
||||
attrlist_ent_t *aep;
|
||||
int arraytop;
|
||||
|
||||
ASSERT(!context->seen_enough);
|
||||
ASSERT(!(context->flags & ATTR_KERNOVAL));
|
||||
ASSERT(context->count >= 0);
|
||||
ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
|
||||
ASSERT(context->firstu >= sizeof(*alist));
|
||||
ASSERT(context->firstu <= context->bufsize);
|
||||
|
||||
/*
|
||||
* Only list entries in the right namespace.
|
||||
*/
|
||||
if (((context->flags & ATTR_SECURE) == 0) !=
|
||||
((flags & XFS_ATTR_SECURE) == 0))
|
||||
return;
|
||||
if (((context->flags & ATTR_ROOT) == 0) !=
|
||||
((flags & XFS_ATTR_ROOT) == 0))
|
||||
return;
|
||||
|
||||
arraytop = sizeof(*alist) +
|
||||
context->count * sizeof(alist->al_offset[0]);
|
||||
context->firstu -= ATTR_ENTSIZE(namelen);
|
||||
if (context->firstu < arraytop) {
|
||||
trace_xfs_attr_list_full(context);
|
||||
alist->al_more = 1;
|
||||
context->seen_enough = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
aep = (attrlist_ent_t *)&context->alist[context->firstu];
|
||||
aep->a_valuelen = valuelen;
|
||||
memcpy(aep->a_name, name, namelen);
|
||||
aep->a_name[namelen] = 0;
|
||||
alist->al_offset[context->count++] = context->firstu;
|
||||
alist->al_count = context->count;
|
||||
trace_xfs_attr_list_add(context);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate a list of extended attribute names and optionally
|
||||
* also value lengths. Positive return value follows the XFS
|
||||
* convention of being an error, zero or negative return code
|
||||
* is the length of the buffer returned (negated), indicating
|
||||
* success.
|
||||
*/
|
||||
int
|
||||
xfs_attr_list(
|
||||
xfs_inode_t *dp,
|
||||
char *buffer,
|
||||
int bufsize,
|
||||
int flags,
|
||||
attrlist_cursor_kern_t *cursor)
|
||||
{
|
||||
xfs_attr_list_context_t context;
|
||||
struct attrlist *alist;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Validate the cursor.
|
||||
*/
|
||||
if (cursor->pad1 || cursor->pad2)
|
||||
return -EINVAL;
|
||||
if ((cursor->initted == 0) &&
|
||||
(cursor->hashval || cursor->blkno || cursor->offset))
|
||||
return -EINVAL;
|
||||
|
||||
/* Only internal consumers can retrieve incomplete attrs. */
|
||||
if (flags & ATTR_INCOMPLETE)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Check for a properly aligned buffer.
|
||||
*/
|
||||
if (((long)buffer) & (sizeof(int)-1))
|
||||
return -EFAULT;
|
||||
if (flags & ATTR_KERNOVAL)
|
||||
bufsize = 0;
|
||||
|
||||
/*
|
||||
* Initialize the output buffer.
|
||||
*/
|
||||
memset(&context, 0, sizeof(context));
|
||||
context.dp = dp;
|
||||
context.cursor = cursor;
|
||||
context.resynch = 1;
|
||||
context.flags = flags;
|
||||
context.alist = buffer;
|
||||
context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */
|
||||
context.firstu = context.bufsize;
|
||||
context.put_listent = xfs_attr_put_listent;
|
||||
|
||||
alist = (struct attrlist *)context.alist;
|
||||
alist->al_count = 0;
|
||||
alist->al_more = 0;
|
||||
alist->al_offset[0] = context.bufsize;
|
||||
|
||||
error = xfs_attr_list_int(&context);
|
||||
ASSERT(error <= 0);
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -1062,7 +1062,6 @@ xfs_collapse_file_space(
|
|||
int error;
|
||||
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
|
||||
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
|
||||
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
|
||||
bool done = false;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
||||
|
@ -1078,32 +1077,34 @@ xfs_collapse_file_space(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
while (!error && !done) {
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
|
||||
&tp);
|
||||
if (error)
|
||||
break;
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
|
||||
ip->i_gdquot, ip->i_pdquot, resblks, 0,
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
while (!done) {
|
||||
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
|
||||
&done);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
if (done)
|
||||
break;
|
||||
|
||||
error = xfs_trans_commit(tp);
|
||||
/* finish any deferred frees and roll the transaction */
|
||||
error = xfs_defer_finish(&tp);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
error = xfs_trans_commit(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
|
||||
out_trans_cancel:
|
||||
xfs_trans_cancel(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1146,35 +1147,41 @@ xfs_insert_file_space(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
|
||||
XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
/*
|
||||
* The extent shifting code works on extent granularity. So, if stop_fsb
|
||||
* is not the starting block of extent, we need to split the extent at
|
||||
* stop_fsb.
|
||||
*/
|
||||
error = xfs_bmap_split_extent(ip, stop_fsb);
|
||||
error = xfs_bmap_split_extent(tp, ip, stop_fsb);
|
||||
if (error)
|
||||
return error;
|
||||
goto out_trans_cancel;
|
||||
|
||||
while (!error && !done) {
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
|
||||
&tp);
|
||||
do {
|
||||
error = xfs_trans_roll_inode(&tp, ip);
|
||||
if (error)
|
||||
break;
|
||||
goto out_trans_cancel;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
||||
error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
|
||||
&done, stop_fsb);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
} while (!done);
|
||||
|
||||
error = xfs_trans_commit(tp);
|
||||
}
|
||||
|
||||
error = xfs_trans_commit(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
|
||||
out_trans_cancel:
|
||||
xfs_trans_cancel(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1442,12 +1449,12 @@ xfs_swap_extent_forks(
|
|||
* event of a crash. Set the owner change log flags now and leave the
|
||||
* bmbt scan as the last step.
|
||||
*/
|
||||
if (ip->i_d.di_version == 3 &&
|
||||
ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
|
||||
(*target_log_flags) |= XFS_ILOG_DOWNER;
|
||||
if (tip->i_d.di_version == 3 &&
|
||||
tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
|
||||
(*src_log_flags) |= XFS_ILOG_DOWNER;
|
||||
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
||||
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
|
||||
(*target_log_flags) |= XFS_ILOG_DOWNER;
|
||||
if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
|
||||
(*src_log_flags) |= XFS_ILOG_DOWNER;
|
||||
}
|
||||
|
||||
/*
|
||||
* Swap the data forks of the inodes
|
||||
|
@ -1482,7 +1489,7 @@ xfs_swap_extent_forks(
|
|||
(*src_log_flags) |= XFS_ILOG_DEXT;
|
||||
break;
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
ASSERT(ip->i_d.di_version < 3 ||
|
||||
ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
|
||||
(*src_log_flags & XFS_ILOG_DOWNER));
|
||||
(*src_log_flags) |= XFS_ILOG_DBROOT;
|
||||
break;
|
||||
|
@ -1494,7 +1501,7 @@ xfs_swap_extent_forks(
|
|||
break;
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
(*target_log_flags) |= XFS_ILOG_DBROOT;
|
||||
ASSERT(tip->i_d.di_version < 3 ||
|
||||
ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
|
||||
(*target_log_flags & XFS_ILOG_DOWNER));
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -727,8 +727,9 @@ xfs_buf_get_map(
|
|||
if (!bp->b_addr) {
|
||||
error = _xfs_buf_map_pages(bp, flags);
|
||||
if (unlikely(error)) {
|
||||
xfs_warn(target->bt_mount,
|
||||
"%s: failed to map pagesn", __func__);
|
||||
xfs_warn_ratelimited(target->bt_mount,
|
||||
"%s: failed to map %u pages", __func__,
|
||||
bp->b_page_count);
|
||||
xfs_buf_relse(bp);
|
||||
return error;
|
||||
}
|
||||
|
@ -1238,7 +1239,7 @@ xfs_buf_ioerror_alert(
|
|||
struct xfs_buf *bp,
|
||||
xfs_failaddr_t func)
|
||||
{
|
||||
xfs_alert(bp->b_mount,
|
||||
xfs_alert_ratelimited(bp->b_mount,
|
||||
"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
|
||||
func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
|
||||
-bp->b_error);
|
||||
|
@ -1572,6 +1573,28 @@ xfs_buf_zero(
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Log a message about and stale a buffer that a caller has decided is corrupt.
|
||||
*
|
||||
* This function should be called for the kinds of metadata corruption that
|
||||
* cannot be detect from a verifier, such as incorrect inter-block relationship
|
||||
* data. Do /not/ call this function from a verifier function.
|
||||
*
|
||||
* The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will
|
||||
* be marked stale, but b_error will not be set. The caller is responsible for
|
||||
* releasing the buffer or fixing it.
|
||||
*/
|
||||
void
|
||||
__xfs_buf_mark_corrupt(
|
||||
struct xfs_buf *bp,
|
||||
xfs_failaddr_t fa)
|
||||
{
|
||||
ASSERT(bp->b_flags & XBF_DONE);
|
||||
|
||||
xfs_buf_corruption_error(bp, fa);
|
||||
xfs_buf_stale(bp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handling of buffer targets (buftargs).
|
||||
*/
|
||||
|
|
|
@ -272,6 +272,8 @@ static inline int xfs_buf_submit(struct xfs_buf *bp)
|
|||
}
|
||||
|
||||
void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
|
||||
void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
|
||||
#define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
|
||||
|
||||
/* Buffer Utility Routines */
|
||||
extern void *xfs_buf_offset(struct xfs_buf *, size_t);
|
||||
|
|
|
@ -345,7 +345,7 @@ xfs_buf_item_format(
|
|||
* occurs during recovery.
|
||||
*/
|
||||
if (bip->bli_flags & XFS_BLI_INODE_BUF) {
|
||||
if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
|
||||
if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
|
||||
!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
|
||||
xfs_log_item_in_current_chkpt(lip)))
|
||||
bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
|
||||
|
|
|
@ -147,7 +147,7 @@ xfs_dir2_block_getdents(
|
|||
xfs_off_t cook;
|
||||
struct xfs_da_geometry *geo = args->geo;
|
||||
int lock_mode;
|
||||
unsigned int offset;
|
||||
unsigned int offset, next_offset;
|
||||
unsigned int end;
|
||||
|
||||
/*
|
||||
|
@ -173,9 +173,10 @@ xfs_dir2_block_getdents(
|
|||
* Loop over the data portion of the block.
|
||||
* Each object is a real entry (dep) or an unused one (dup).
|
||||
*/
|
||||
offset = geo->data_entry_offset;
|
||||
end = xfs_dir3_data_end_offset(geo, bp->b_addr);
|
||||
while (offset < end) {
|
||||
for (offset = geo->data_entry_offset;
|
||||
offset < end;
|
||||
offset = next_offset) {
|
||||
struct xfs_dir2_data_unused *dup = bp->b_addr + offset;
|
||||
struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
|
||||
uint8_t filetype;
|
||||
|
@ -184,14 +185,15 @@ xfs_dir2_block_getdents(
|
|||
* Unused, skip it.
|
||||
*/
|
||||
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
|
||||
offset += be16_to_cpu(dup->length);
|
||||
next_offset = offset + be16_to_cpu(dup->length);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bump pointer for the next iteration.
|
||||
*/
|
||||
offset += xfs_dir2_data_entsize(dp->i_mount, dep->namelen);
|
||||
next_offset = offset +
|
||||
xfs_dir2_data_entsize(dp->i_mount, dep->namelen);
|
||||
|
||||
/*
|
||||
* The entry is before the desired starting point, skip it.
|
||||
|
|
|
@ -31,6 +31,7 @@ xfs_trim_extents(
|
|||
struct block_device *bdev = mp->m_ddev_targp->bt_bdev;
|
||||
struct xfs_btree_cur *cur;
|
||||
struct xfs_buf *agbp;
|
||||
struct xfs_agf *agf;
|
||||
struct xfs_perag *pag;
|
||||
int error;
|
||||
int i;
|
||||
|
@ -47,14 +48,14 @@ xfs_trim_extents(
|
|||
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
|
||||
if (error)
|
||||
goto out_put_perag;
|
||||
agf = agbp->b_addr;
|
||||
|
||||
cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
|
||||
|
||||
/*
|
||||
* Look up the longest btree in the AGF and start with it.
|
||||
*/
|
||||
error = xfs_alloc_lookup_ge(cur, 0,
|
||||
be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
|
||||
error = xfs_alloc_lookup_ge(cur, 0, be32_to_cpu(agf->agf_longest), &i);
|
||||
if (error)
|
||||
goto out_del_cursor;
|
||||
|
||||
|
@ -75,7 +76,7 @@ xfs_trim_extents(
|
|||
error = -EFSCORRUPTED;
|
||||
goto out_del_cursor;
|
||||
}
|
||||
ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
|
||||
ASSERT(flen <= be32_to_cpu(agf->agf_longest));
|
||||
|
||||
/*
|
||||
* use daddr format for all range/len calculations as that is
|
||||
|
|
|
@ -829,9 +829,9 @@ xfs_qm_id_for_quotatype(
|
|||
{
|
||||
switch (type) {
|
||||
case XFS_DQ_USER:
|
||||
return ip->i_d.di_uid;
|
||||
return i_uid_read(VFS_I(ip));
|
||||
case XFS_DQ_GROUP:
|
||||
return ip->i_d.di_gid;
|
||||
return i_gid_read(VFS_I(ip));
|
||||
case XFS_DQ_PROJ:
|
||||
return ip->i_d.di_projid;
|
||||
}
|
||||
|
|
|
@ -307,35 +307,61 @@ xfs_qm_qoffend_logitem_committed(
|
|||
{
|
||||
struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
|
||||
struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
|
||||
struct xfs_ail *ailp = qfs->qql_item.li_ailp;
|
||||
|
||||
/*
|
||||
* Delete the qoff-start logitem from the AIL.
|
||||
* xfs_trans_ail_delete() drops the AIL lock.
|
||||
*/
|
||||
spin_lock(&ailp->ail_lock);
|
||||
xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_qm_qoff_logitem_relse(qfs);
|
||||
|
||||
kmem_free(qfs->qql_item.li_lv_shadow);
|
||||
kmem_free(lip->li_lv_shadow);
|
||||
kmem_free(qfs);
|
||||
kmem_free(qfe);
|
||||
return (xfs_lsn_t)-1;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_qm_qoff_logitem_release(
|
||||
struct xfs_log_item *lip)
|
||||
{
|
||||
struct xfs_qoff_logitem *qoff = QOFF_ITEM(lip);
|
||||
|
||||
if (test_bit(XFS_LI_ABORTED, &lip->li_flags)) {
|
||||
if (qoff->qql_start_lip)
|
||||
xfs_qm_qoff_logitem_relse(qoff->qql_start_lip);
|
||||
xfs_qm_qoff_logitem_relse(qoff);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
|
||||
.iop_size = xfs_qm_qoff_logitem_size,
|
||||
.iop_format = xfs_qm_qoff_logitem_format,
|
||||
.iop_committed = xfs_qm_qoffend_logitem_committed,
|
||||
.iop_push = xfs_qm_qoff_logitem_push,
|
||||
.iop_release = xfs_qm_qoff_logitem_release,
|
||||
};
|
||||
|
||||
static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
|
||||
.iop_size = xfs_qm_qoff_logitem_size,
|
||||
.iop_format = xfs_qm_qoff_logitem_format,
|
||||
.iop_push = xfs_qm_qoff_logitem_push,
|
||||
.iop_release = xfs_qm_qoff_logitem_release,
|
||||
};
|
||||
|
||||
/*
|
||||
* Delete the quotaoff intent from the AIL and free it. On success,
|
||||
* this should only be called for the start item. It can be used for
|
||||
* either on shutdown or abort.
|
||||
*/
|
||||
void
|
||||
xfs_qm_qoff_logitem_relse(
|
||||
struct xfs_qoff_logitem *qoff)
|
||||
{
|
||||
struct xfs_log_item *lip = &qoff->qql_item;
|
||||
|
||||
ASSERT(test_bit(XFS_LI_IN_AIL, &lip->li_flags) ||
|
||||
test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
|
||||
XFS_FORCED_SHUTDOWN(lip->li_mountp));
|
||||
xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
|
||||
kmem_free(lip->li_lv_shadow);
|
||||
kmem_free(qoff);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and initialize an quotaoff item of the correct quota type(s).
|
||||
*/
|
||||
|
|
|
@ -28,6 +28,7 @@ void xfs_qm_dquot_logitem_init(struct xfs_dquot *dqp);
|
|||
struct xfs_qoff_logitem *xfs_qm_qoff_logitem_init(struct xfs_mount *mp,
|
||||
struct xfs_qoff_logitem *start,
|
||||
uint flags);
|
||||
void xfs_qm_qoff_logitem_relse(struct xfs_qoff_logitem *);
|
||||
struct xfs_qoff_logitem *xfs_trans_get_qoff_item(struct xfs_trans *tp,
|
||||
struct xfs_qoff_logitem *startqoff,
|
||||
uint flags);
|
||||
|
|
|
@ -345,16 +345,19 @@ xfs_corruption_error(
|
|||
* Complain about the kinds of metadata corruption that we can't detect from a
|
||||
* verifier, such as incorrect inter-block relationship data. Does not set
|
||||
* bp->b_error.
|
||||
*
|
||||
* Call xfs_buf_mark_corrupt, not this function.
|
||||
*/
|
||||
void
|
||||
xfs_buf_corruption_error(
|
||||
struct xfs_buf *bp)
|
||||
struct xfs_buf *bp,
|
||||
xfs_failaddr_t fa)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_mount;
|
||||
|
||||
xfs_alert_tag(mp, XFS_PTAG_VERIFIER_ERROR,
|
||||
"Metadata corruption detected at %pS, %s block 0x%llx",
|
||||
__return_address, bp->b_ops->name, bp->b_bn);
|
||||
fa, bp->b_ops->name, bp->b_bn);
|
||||
|
||||
xfs_alert(mp, "Unmount and run xfs_repair");
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ extern void xfs_corruption_error(const char *tag, int level,
|
|||
struct xfs_mount *mp, const void *buf, size_t bufsize,
|
||||
const char *filename, int linenum,
|
||||
xfs_failaddr_t failaddr);
|
||||
void xfs_buf_corruption_error(struct xfs_buf *bp);
|
||||
void xfs_buf_corruption_error(struct xfs_buf *bp, xfs_failaddr_t fa);
|
||||
extern void xfs_buf_verifier_error(struct xfs_buf *bp, int error,
|
||||
const char *name, const void *buf, size_t bufsz,
|
||||
xfs_failaddr_t failaddr);
|
||||
|
|
|
@ -344,7 +344,7 @@ xfs_getfsmap_datadev_helper(
|
|||
xfs_fsblock_t fsb;
|
||||
xfs_daddr_t rec_daddr;
|
||||
|
||||
fsb = XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, rec->rm_startblock);
|
||||
fsb = XFS_AGB_TO_FSB(mp, cur->bc_ag.agno, rec->rm_startblock);
|
||||
rec_daddr = XFS_FSB_TO_DADDR(mp, fsb);
|
||||
|
||||
return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
|
||||
|
@ -362,7 +362,7 @@ xfs_getfsmap_datadev_bnobt_helper(
|
|||
struct xfs_rmap_irec irec;
|
||||
xfs_daddr_t rec_daddr;
|
||||
|
||||
rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_private.a.agno,
|
||||
rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_ag.agno,
|
||||
rec->ar_startblock);
|
||||
|
||||
irec.rm_startblock = rec->ar_startblock;
|
||||
|
@ -896,6 +896,14 @@ xfs_getfsmap(
|
|||
info.format_arg = arg;
|
||||
info.head = head;
|
||||
|
||||
/*
|
||||
* If fsmap runs concurrently with a scrub, the freeze can be delayed
|
||||
* indefinitely as we walk the rmapbt and iterate over metadata
|
||||
* buffers. Freeze quiesces the log (which waits for the buffer LRU to
|
||||
* be emptied) and that won't happen while we're reading buffers.
|
||||
*/
|
||||
sb_start_write(mp->m_super);
|
||||
|
||||
/* For each device we support... */
|
||||
for (i = 0; i < XFS_GETFSMAP_DEVS; i++) {
|
||||
/* Is this device within the range the user asked for? */
|
||||
|
@ -935,6 +943,7 @@ xfs_getfsmap(
|
|||
|
||||
if (tp)
|
||||
xfs_trans_cancel(tp);
|
||||
sb_end_write(mp->m_super);
|
||||
head->fmh_oflags = FMH_OF_DEV_T;
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -289,6 +289,8 @@ xfs_reinit_inode(
|
|||
uint64_t version = inode_peek_iversion(inode);
|
||||
umode_t mode = inode->i_mode;
|
||||
dev_t dev = inode->i_rdev;
|
||||
kuid_t uid = inode->i_uid;
|
||||
kgid_t gid = inode->i_gid;
|
||||
|
||||
error = inode_init_always(mp->m_super, inode);
|
||||
|
||||
|
@ -297,6 +299,8 @@ xfs_reinit_inode(
|
|||
inode_set_iversion_queried(inode, version);
|
||||
inode->i_mode = mode;
|
||||
inode->i_rdev = dev;
|
||||
inode->i_uid = uid;
|
||||
inode->i_gid = gid;
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -801,26 +801,18 @@ xfs_ialloc(
|
|||
return error;
|
||||
ASSERT(ip != NULL);
|
||||
inode = VFS_I(ip);
|
||||
|
||||
/*
|
||||
* We always convert v1 inodes to v2 now - we only support filesystems
|
||||
* with >= v2 inode capability, so there is no reason for ever leaving
|
||||
* an inode in v1 format.
|
||||
*/
|
||||
if (ip->i_d.di_version == 1)
|
||||
ip->i_d.di_version = 2;
|
||||
|
||||
inode->i_mode = mode;
|
||||
set_nlink(inode, nlink);
|
||||
ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
|
||||
ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_rdev = rdev;
|
||||
ip->i_d.di_projid = prid;
|
||||
|
||||
if (pip && XFS_INHERIT_GID(pip)) {
|
||||
ip->i_d.di_gid = pip->i_d.di_gid;
|
||||
inode->i_gid = VFS_I(pip)->i_gid;
|
||||
if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
|
||||
inode->i_mode |= S_ISGID;
|
||||
} else {
|
||||
inode->i_gid = current_fsgid();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -828,9 +820,8 @@ xfs_ialloc(
|
|||
* ID or one of the supplementary group IDs, the S_ISGID bit is cleared
|
||||
* (and only if the irix_sgid_inherit compatibility variable is set).
|
||||
*/
|
||||
if ((irix_sgid_inherit) &&
|
||||
(inode->i_mode & S_ISGID) &&
|
||||
(!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
|
||||
if (irix_sgid_inherit &&
|
||||
(inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
|
||||
inode->i_mode &= ~S_ISGID;
|
||||
|
||||
ip->i_d.di_size = 0;
|
||||
|
@ -847,14 +838,13 @@ xfs_ialloc(
|
|||
ip->i_d.di_dmstate = 0;
|
||||
ip->i_d.di_flags = 0;
|
||||
|
||||
if (ip->i_d.di_version == 3) {
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
inode_set_iversion(inode, 1);
|
||||
ip->i_d.di_flags2 = 0;
|
||||
ip->i_d.di_cowextsize = 0;
|
||||
ip->i_d.di_crtime = tv;
|
||||
}
|
||||
|
||||
|
||||
flags = XFS_ILOG_CORE;
|
||||
switch (mode & S_IFMT) {
|
||||
case S_IFIFO:
|
||||
|
@ -907,20 +897,13 @@ xfs_ialloc(
|
|||
|
||||
ip->i_d.di_flags |= di_flags;
|
||||
}
|
||||
if (pip &&
|
||||
(pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
|
||||
pip->i_d.di_version == 3 &&
|
||||
ip->i_d.di_version == 3) {
|
||||
uint64_t di_flags2 = 0;
|
||||
|
||||
if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) {
|
||||
if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
|
||||
di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
|
||||
ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
|
||||
ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
|
||||
}
|
||||
if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
|
||||
di_flags2 |= XFS_DIFLAG2_DAX;
|
||||
|
||||
ip->i_d.di_flags2 |= di_flags2;
|
||||
ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
|
||||
}
|
||||
/* FALLTHROUGH */
|
||||
case S_IFLNK:
|
||||
|
@ -1122,7 +1105,6 @@ xfs_bumplink(
|
|||
{
|
||||
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
|
||||
|
||||
ASSERT(ip->i_d.di_version > 1);
|
||||
inc_nlink(VFS_I(ip));
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
}
|
||||
|
@ -1158,8 +1140,7 @@ xfs_create(
|
|||
/*
|
||||
* Make sure that we have allocated dquot(s) on disk.
|
||||
*/
|
||||
error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
|
||||
xfs_kgid_to_gid(current_fsgid()), prid,
|
||||
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
|
||||
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
|
||||
&udqp, &gdqp, &pdqp);
|
||||
if (error)
|
||||
|
@ -1309,8 +1290,7 @@ xfs_create_tmpfile(
|
|||
/*
|
||||
* Make sure that we have allocated dquot(s) on disk.
|
||||
*/
|
||||
error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
|
||||
xfs_kgid_to_gid(current_fsgid()), prid,
|
||||
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
|
||||
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
|
||||
&udqp, &gdqp, &pdqp);
|
||||
if (error)
|
||||
|
@ -2119,7 +2099,7 @@ xfs_iunlink_update_bucket(
|
|||
unsigned int bucket_index,
|
||||
xfs_agino_t new_agino)
|
||||
{
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
|
||||
struct xfs_agi *agi = agibp->b_addr;
|
||||
xfs_agino_t old_value;
|
||||
int offset;
|
||||
|
||||
|
@ -2135,7 +2115,7 @@ xfs_iunlink_update_bucket(
|
|||
* head of the list.
|
||||
*/
|
||||
if (old_value == new_agino) {
|
||||
xfs_buf_corruption_error(agibp);
|
||||
xfs_buf_mark_corrupt(agibp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -2259,7 +2239,7 @@ xfs_iunlink(
|
|||
error = xfs_read_agi(mp, tp, agno, &agibp);
|
||||
if (error)
|
||||
return error;
|
||||
agi = XFS_BUF_TO_AGI(agibp);
|
||||
agi = agibp->b_addr;
|
||||
|
||||
/*
|
||||
* Get the index into the agi hash table for the list this inode will
|
||||
|
@ -2269,7 +2249,7 @@ xfs_iunlink(
|
|||
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
|
||||
if (next_agino == agino ||
|
||||
!xfs_verify_agino_or_null(mp, agno, next_agino)) {
|
||||
xfs_buf_corruption_error(agibp);
|
||||
xfs_buf_mark_corrupt(agibp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -2443,7 +2423,7 @@ xfs_iunlink_remove(
|
|||
error = xfs_read_agi(mp, tp, agno, &agibp);
|
||||
if (error)
|
||||
return error;
|
||||
agi = XFS_BUF_TO_AGI(agibp);
|
||||
agi = agibp->b_addr;
|
||||
|
||||
/*
|
||||
* Get the index into the agi hash table for the list this inode will
|
||||
|
@ -3807,7 +3787,6 @@ xfs_iflush_int(
|
|||
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
|
||||
ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
|
||||
ASSERT(iip != NULL && iip->ili_fields != 0);
|
||||
ASSERT(ip->i_d.di_version > 1);
|
||||
|
||||
/* set *dip = inode's place in the buffer */
|
||||
dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
|
||||
|
@ -3868,7 +3847,7 @@ xfs_iflush_int(
|
|||
* backwards compatibility with old kernels that predate logging all
|
||||
* inode changes.
|
||||
*/
|
||||
if (ip->i_d.di_version < 3)
|
||||
if (!xfs_sb_version_has_v3inode(&mp->m_sb))
|
||||
ip->i_d.di_flushiter++;
|
||||
|
||||
/* Check the inline fork data before we write out. */
|
||||
|
|
|
@ -125,7 +125,7 @@ xfs_inode_item_size(
|
|||
|
||||
*nvecs += 2;
|
||||
*nbytes += sizeof(struct xfs_inode_log_format) +
|
||||
xfs_log_dinode_size(ip->i_d.di_version);
|
||||
xfs_log_dinode_size(ip->i_mount);
|
||||
|
||||
xfs_inode_item_data_fork_size(iip, nvecs, nbytes);
|
||||
if (XFS_IFORK_Q(ip))
|
||||
|
@ -305,11 +305,9 @@ xfs_inode_to_log_dinode(
|
|||
struct inode *inode = VFS_I(ip);
|
||||
|
||||
to->di_magic = XFS_DINODE_MAGIC;
|
||||
|
||||
to->di_version = from->di_version;
|
||||
to->di_format = from->di_format;
|
||||
to->di_uid = from->di_uid;
|
||||
to->di_gid = from->di_gid;
|
||||
to->di_uid = i_uid_read(inode);
|
||||
to->di_gid = i_gid_read(inode);
|
||||
to->di_projid_lo = from->di_projid & 0xffff;
|
||||
to->di_projid_hi = from->di_projid >> 16;
|
||||
|
||||
|
@ -339,7 +337,8 @@ xfs_inode_to_log_dinode(
|
|||
/* log a dummy value to ensure log structure is fully initialised */
|
||||
to->di_next_unlinked = NULLAGINO;
|
||||
|
||||
if (from->di_version == 3) {
|
||||
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
||||
to->di_version = 3;
|
||||
to->di_changecount = inode_peek_iversion(inode);
|
||||
to->di_crtime.t_sec = from->di_crtime.tv_sec;
|
||||
to->di_crtime.t_nsec = from->di_crtime.tv_nsec;
|
||||
|
@ -351,6 +350,7 @@ xfs_inode_to_log_dinode(
|
|||
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
|
||||
to->di_flushiter = 0;
|
||||
} else {
|
||||
to->di_version = 2;
|
||||
to->di_flushiter = from->di_flushiter;
|
||||
}
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ xfs_inode_item_format_core(
|
|||
|
||||
dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE);
|
||||
xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn);
|
||||
xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_d.di_version));
|
||||
xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -395,8 +395,6 @@ xfs_inode_item_format(
|
|||
struct xfs_log_iovec *vecp = NULL;
|
||||
struct xfs_inode_log_format *ilf;
|
||||
|
||||
ASSERT(ip->i_d.di_version > 1);
|
||||
|
||||
ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT);
|
||||
ilf->ilf_type = XFS_LI_INODE;
|
||||
ilf->ilf_ino = ip->i_ino;
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
#include "xfs_health.h"
|
||||
#include "xfs_reflink.h"
|
||||
#include "xfs_ioctl.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_da_btree.h"
|
||||
|
||||
#include <linux/mount.h>
|
||||
#include <linux/namei.h>
|
||||
|
@ -292,62 +294,173 @@ xfs_readlink_by_handle(
|
|||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_attrlist_by_handle(
|
||||
struct file *parfilp,
|
||||
void __user *arg)
|
||||
/*
|
||||
* Format an attribute and copy it out to the user's buffer.
|
||||
* Take care to check values and protect against them changing later,
|
||||
* we may be reading them directly out of a user buffer.
|
||||
*/
|
||||
static void
|
||||
xfs_ioc_attr_put_listent(
|
||||
struct xfs_attr_list_context *context,
|
||||
int flags,
|
||||
unsigned char *name,
|
||||
int namelen,
|
||||
int valuelen)
|
||||
{
|
||||
int error = -ENOMEM;
|
||||
attrlist_cursor_kern_t *cursor;
|
||||
struct xfs_fsop_attrlist_handlereq __user *p = arg;
|
||||
xfs_fsop_attrlist_handlereq_t al_hreq;
|
||||
struct dentry *dentry;
|
||||
char *kbuf;
|
||||
struct xfs_attrlist *alist = context->buffer;
|
||||
struct xfs_attrlist_ent *aep;
|
||||
int arraytop;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
|
||||
return -EFAULT;
|
||||
if (al_hreq.buflen < sizeof(struct attrlist) ||
|
||||
al_hreq.buflen > XFS_XATTR_LIST_MAX)
|
||||
ASSERT(!context->seen_enough);
|
||||
ASSERT(context->count >= 0);
|
||||
ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
|
||||
ASSERT(context->firstu >= sizeof(*alist));
|
||||
ASSERT(context->firstu <= context->bufsize);
|
||||
|
||||
/*
|
||||
* Only list entries in the right namespace.
|
||||
*/
|
||||
if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK))
|
||||
return;
|
||||
|
||||
arraytop = sizeof(*alist) +
|
||||
context->count * sizeof(alist->al_offset[0]);
|
||||
|
||||
/* decrement by the actual bytes used by the attr */
|
||||
context->firstu -= round_up(offsetof(struct xfs_attrlist_ent, a_name) +
|
||||
namelen + 1, sizeof(uint32_t));
|
||||
if (context->firstu < arraytop) {
|
||||
trace_xfs_attr_list_full(context);
|
||||
alist->al_more = 1;
|
||||
context->seen_enough = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
aep = context->buffer + context->firstu;
|
||||
aep->a_valuelen = valuelen;
|
||||
memcpy(aep->a_name, name, namelen);
|
||||
aep->a_name[namelen] = 0;
|
||||
alist->al_offset[context->count++] = context->firstu;
|
||||
alist->al_count = context->count;
|
||||
trace_xfs_attr_list_add(context);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
xfs_attr_filter(
|
||||
u32 ioc_flags)
|
||||
{
|
||||
if (ioc_flags & XFS_IOC_ATTR_ROOT)
|
||||
return XFS_ATTR_ROOT;
|
||||
if (ioc_flags & XFS_IOC_ATTR_SECURE)
|
||||
return XFS_ATTR_SECURE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
xfs_attr_flags(
|
||||
u32 ioc_flags)
|
||||
{
|
||||
if (ioc_flags & XFS_IOC_ATTR_CREATE)
|
||||
return XATTR_CREATE;
|
||||
if (ioc_flags & XFS_IOC_ATTR_REPLACE)
|
||||
return XATTR_REPLACE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_ioc_attr_list(
|
||||
struct xfs_inode *dp,
|
||||
void __user *ubuf,
|
||||
int bufsize,
|
||||
int flags,
|
||||
struct xfs_attrlist_cursor __user *ucursor)
|
||||
{
|
||||
struct xfs_attr_list_context context = { };
|
||||
struct xfs_attrlist *alist;
|
||||
void *buffer;
|
||||
int error;
|
||||
|
||||
if (bufsize < sizeof(struct xfs_attrlist) ||
|
||||
bufsize > XFS_XATTR_LIST_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Reject flags, only allow namespaces.
|
||||
*/
|
||||
if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
|
||||
if (flags & ~(XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
|
||||
return -EINVAL;
|
||||
if (flags == (XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Validate the cursor.
|
||||
*/
|
||||
if (copy_from_user(&context.cursor, ucursor, sizeof(context.cursor)))
|
||||
return -EFAULT;
|
||||
if (context.cursor.pad1 || context.cursor.pad2)
|
||||
return -EINVAL;
|
||||
if (!context.cursor.initted &&
|
||||
(context.cursor.hashval || context.cursor.blkno ||
|
||||
context.cursor.offset))
|
||||
return -EINVAL;
|
||||
|
||||
buffer = kmem_zalloc_large(bufsize, 0);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Initialize the output buffer.
|
||||
*/
|
||||
context.dp = dp;
|
||||
context.resynch = 1;
|
||||
context.attr_filter = xfs_attr_filter(flags);
|
||||
context.buffer = buffer;
|
||||
context.bufsize = round_down(bufsize, sizeof(uint32_t));
|
||||
context.firstu = context.bufsize;
|
||||
context.put_listent = xfs_ioc_attr_put_listent;
|
||||
|
||||
alist = context.buffer;
|
||||
alist->al_count = 0;
|
||||
alist->al_more = 0;
|
||||
alist->al_offset[0] = context.bufsize;
|
||||
|
||||
error = xfs_attr_list(&context);
|
||||
if (error)
|
||||
goto out_free;
|
||||
|
||||
if (copy_to_user(ubuf, buffer, bufsize) ||
|
||||
copy_to_user(ucursor, &context.cursor, sizeof(context.cursor)))
|
||||
error = -EFAULT;
|
||||
out_free:
|
||||
kmem_free(buffer);
|
||||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_attrlist_by_handle(
|
||||
struct file *parfilp,
|
||||
struct xfs_fsop_attrlist_handlereq __user *p)
|
||||
{
|
||||
struct xfs_fsop_attrlist_handlereq al_hreq;
|
||||
struct dentry *dentry;
|
||||
int error = -ENOMEM;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
if (copy_from_user(&al_hreq, p, sizeof(al_hreq)))
|
||||
return -EFAULT;
|
||||
|
||||
dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
|
||||
if (IS_ERR(dentry))
|
||||
return PTR_ERR(dentry);
|
||||
|
||||
kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
|
||||
if (!kbuf)
|
||||
goto out_dput;
|
||||
|
||||
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
|
||||
error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
|
||||
al_hreq.flags, cursor);
|
||||
if (error)
|
||||
goto out_kfree;
|
||||
|
||||
if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
|
||||
error = -EFAULT;
|
||||
goto out_kfree;
|
||||
}
|
||||
|
||||
if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
|
||||
error = -EFAULT;
|
||||
|
||||
out_kfree:
|
||||
kmem_free(kbuf);
|
||||
out_dput:
|
||||
error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), al_hreq.buffer,
|
||||
al_hreq.buflen, al_hreq.flags, &p->pos);
|
||||
dput(dentry);
|
||||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
xfs_attrmulti_attr_get(
|
||||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
|
@ -355,31 +468,33 @@ xfs_attrmulti_attr_get(
|
|||
uint32_t *len,
|
||||
uint32_t flags)
|
||||
{
|
||||
unsigned char *kbuf;
|
||||
int error = -EFAULT;
|
||||
size_t namelen;
|
||||
struct xfs_da_args args = {
|
||||
.dp = XFS_I(inode),
|
||||
.attr_filter = xfs_attr_filter(flags),
|
||||
.attr_flags = xfs_attr_flags(flags),
|
||||
.name = name,
|
||||
.namelen = strlen(name),
|
||||
.valuelen = *len,
|
||||
};
|
||||
int error;
|
||||
|
||||
if (*len > XFS_XATTR_SIZE_MAX)
|
||||
return -EINVAL;
|
||||
kbuf = kmem_zalloc_large(*len, 0);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
namelen = strlen(name);
|
||||
error = xfs_attr_get(XFS_I(inode), name, namelen, &kbuf, (int *)len,
|
||||
flags);
|
||||
error = xfs_attr_get(&args);
|
||||
if (error)
|
||||
goto out_kfree;
|
||||
|
||||
if (copy_to_user(ubuf, kbuf, *len))
|
||||
*len = args.valuelen;
|
||||
if (copy_to_user(ubuf, args.value, args.valuelen))
|
||||
error = -EFAULT;
|
||||
|
||||
out_kfree:
|
||||
kmem_free(kbuf);
|
||||
kmem_free(args.value);
|
||||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
xfs_attrmulti_attr_set(
|
||||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
|
@ -387,42 +502,75 @@ xfs_attrmulti_attr_set(
|
|||
uint32_t len,
|
||||
uint32_t flags)
|
||||
{
|
||||
unsigned char *kbuf;
|
||||
struct xfs_da_args args = {
|
||||
.dp = XFS_I(inode),
|
||||
.attr_filter = xfs_attr_filter(flags),
|
||||
.attr_flags = xfs_attr_flags(flags),
|
||||
.name = name,
|
||||
.namelen = strlen(name),
|
||||
};
|
||||
int error;
|
||||
size_t namelen;
|
||||
|
||||
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
|
||||
return -EPERM;
|
||||
if (len > XFS_XATTR_SIZE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
kbuf = memdup_user(ubuf, len);
|
||||
if (IS_ERR(kbuf))
|
||||
return PTR_ERR(kbuf);
|
||||
if (ubuf) {
|
||||
if (len > XFS_XATTR_SIZE_MAX)
|
||||
return -EINVAL;
|
||||
args.value = memdup_user(ubuf, len);
|
||||
if (IS_ERR(args.value))
|
||||
return PTR_ERR(args.value);
|
||||
args.valuelen = len;
|
||||
}
|
||||
|
||||
namelen = strlen(name);
|
||||
error = xfs_attr_set(XFS_I(inode), name, namelen, kbuf, len, flags);
|
||||
if (!error)
|
||||
xfs_forget_acl(inode, name, flags);
|
||||
kfree(kbuf);
|
||||
error = xfs_attr_set(&args);
|
||||
if (!error && (flags & XFS_IOC_ATTR_ROOT))
|
||||
xfs_forget_acl(inode, name);
|
||||
kfree(args.value);
|
||||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_attrmulti_attr_remove(
|
||||
xfs_ioc_attrmulti_one(
|
||||
struct file *parfilp,
|
||||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
uint32_t opcode,
|
||||
void __user *uname,
|
||||
void __user *value,
|
||||
uint32_t *len,
|
||||
uint32_t flags)
|
||||
{
|
||||
unsigned char *name;
|
||||
int error;
|
||||
size_t namelen;
|
||||
|
||||
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
|
||||
return -EPERM;
|
||||
namelen = strlen(name);
|
||||
error = xfs_attr_remove(XFS_I(inode), name, namelen, flags);
|
||||
if (!error)
|
||||
xfs_forget_acl(inode, name, flags);
|
||||
if ((flags & XFS_IOC_ATTR_ROOT) && (flags & XFS_IOC_ATTR_SECURE))
|
||||
return -EINVAL;
|
||||
|
||||
name = strndup_user(uname, MAXNAMELEN);
|
||||
if (IS_ERR(name))
|
||||
return PTR_ERR(name);
|
||||
|
||||
switch (opcode) {
|
||||
case ATTR_OP_GET:
|
||||
error = xfs_attrmulti_attr_get(inode, name, value, len, flags);
|
||||
break;
|
||||
case ATTR_OP_REMOVE:
|
||||
value = NULL;
|
||||
*len = 0;
|
||||
/* fall through */
|
||||
case ATTR_OP_SET:
|
||||
error = mnt_want_write_file(parfilp);
|
||||
if (error)
|
||||
break;
|
||||
error = xfs_attrmulti_attr_set(inode, name, value, *len, flags);
|
||||
mnt_drop_write_file(parfilp);
|
||||
break;
|
||||
default:
|
||||
error = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(name);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -436,7 +584,6 @@ xfs_attrmulti_by_handle(
|
|||
xfs_fsop_attrmulti_handlereq_t am_hreq;
|
||||
struct dentry *dentry;
|
||||
unsigned int i, size;
|
||||
unsigned char *attr_name;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -462,63 +609,17 @@ xfs_attrmulti_by_handle(
|
|||
goto out_dput;
|
||||
}
|
||||
|
||||
error = -ENOMEM;
|
||||
attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
|
||||
if (!attr_name)
|
||||
goto out_kfree_ops;
|
||||
|
||||
error = 0;
|
||||
for (i = 0; i < am_hreq.opcount; i++) {
|
||||
if ((ops[i].am_flags & ATTR_ROOT) &&
|
||||
(ops[i].am_flags & ATTR_SECURE)) {
|
||||
ops[i].am_error = -EINVAL;
|
||||
continue;
|
||||
}
|
||||
ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
|
||||
|
||||
ops[i].am_error = strncpy_from_user((char *)attr_name,
|
||||
ops[i].am_attrname, MAXNAMELEN);
|
||||
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
|
||||
error = -ERANGE;
|
||||
if (ops[i].am_error < 0)
|
||||
break;
|
||||
|
||||
switch (ops[i].am_opcode) {
|
||||
case ATTR_OP_GET:
|
||||
ops[i].am_error = xfs_attrmulti_attr_get(
|
||||
d_inode(dentry), attr_name,
|
||||
ops[i].am_attrvalue, &ops[i].am_length,
|
||||
ops[i].am_flags);
|
||||
break;
|
||||
case ATTR_OP_SET:
|
||||
ops[i].am_error = mnt_want_write_file(parfilp);
|
||||
if (ops[i].am_error)
|
||||
break;
|
||||
ops[i].am_error = xfs_attrmulti_attr_set(
|
||||
d_inode(dentry), attr_name,
|
||||
ops[i].am_attrvalue, ops[i].am_length,
|
||||
ops[i].am_flags);
|
||||
mnt_drop_write_file(parfilp);
|
||||
break;
|
||||
case ATTR_OP_REMOVE:
|
||||
ops[i].am_error = mnt_want_write_file(parfilp);
|
||||
if (ops[i].am_error)
|
||||
break;
|
||||
ops[i].am_error = xfs_attrmulti_attr_remove(
|
||||
d_inode(dentry), attr_name,
|
||||
ops[i].am_flags);
|
||||
mnt_drop_write_file(parfilp);
|
||||
break;
|
||||
default:
|
||||
ops[i].am_error = -EINVAL;
|
||||
}
|
||||
ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
|
||||
d_inode(dentry), ops[i].am_opcode,
|
||||
ops[i].am_attrname, ops[i].am_attrvalue,
|
||||
&ops[i].am_length, ops[i].am_flags);
|
||||
}
|
||||
|
||||
if (copy_to_user(am_hreq.ops, ops, size))
|
||||
error = -EFAULT;
|
||||
|
||||
kfree(attr_name);
|
||||
out_kfree_ops:
|
||||
kfree(ops);
|
||||
out_dput:
|
||||
dput(dentry);
|
||||
|
@ -1162,7 +1263,7 @@ xfs_ioctl_setattr_xflags(
|
|||
|
||||
/* diflags2 only valid for v3 inodes. */
|
||||
di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
|
||||
if (di_flags2 && ip->i_d.di_version < 3)
|
||||
if (di_flags2 && !xfs_sb_version_has_v3inode(&mp->m_sb))
|
||||
return -EINVAL;
|
||||
|
||||
ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
|
||||
|
@ -1372,8 +1473,7 @@ xfs_ioctl_setattr_check_cowextsize(
|
|||
if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
|
||||
return 0;
|
||||
|
||||
if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) ||
|
||||
ip->i_d.di_version != 3)
|
||||
if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb))
|
||||
return -EINVAL;
|
||||
|
||||
if (fa->fsx_cowextsize == 0)
|
||||
|
@ -1434,9 +1534,9 @@ xfs_ioctl_setattr(
|
|||
* because the i_*dquot fields will get updated anyway.
|
||||
*/
|
||||
if (XFS_IS_QUOTA_ON(mp)) {
|
||||
code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
|
||||
ip->i_d.di_gid, fa->fsx_projid,
|
||||
XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
|
||||
code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
|
||||
VFS_I(ip)->i_gid, fa->fsx_projid,
|
||||
XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
|
||||
if (code)
|
||||
return code;
|
||||
}
|
||||
|
@ -1501,7 +1601,6 @@ xfs_ioctl_setattr(
|
|||
olddquot = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_pdquot, pdqp);
|
||||
}
|
||||
ASSERT(ip->i_d.di_version > 1);
|
||||
ip->i_d.di_projid = fa->fsx_projid;
|
||||
}
|
||||
|
||||
|
@ -1514,7 +1613,7 @@ xfs_ioctl_setattr(
|
|||
ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
|
||||
else
|
||||
ip->i_d.di_extsize = 0;
|
||||
if (ip->i_d.di_version == 3 &&
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
|
||||
(ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
|
||||
ip->i_d.di_cowextsize = fa->fsx_cowextsize >>
|
||||
mp->m_sb.sb_blocklog;
|
||||
|
|
|
@ -6,6 +6,11 @@
|
|||
#ifndef __XFS_IOCTL_H__
|
||||
#define __XFS_IOCTL_H__
|
||||
|
||||
struct xfs_bstat;
|
||||
struct xfs_ibulk;
|
||||
struct xfs_inogrp;
|
||||
|
||||
|
||||
extern int
|
||||
xfs_ioc_space(
|
||||
struct file *filp,
|
||||
|
@ -30,27 +35,11 @@ xfs_readlink_by_handle(
|
|||
struct file *parfilp,
|
||||
xfs_fsop_handlereq_t *hreq);
|
||||
|
||||
extern int
|
||||
xfs_attrmulti_attr_get(
|
||||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
unsigned char __user *ubuf,
|
||||
uint32_t *len,
|
||||
uint32_t flags);
|
||||
|
||||
extern int
|
||||
xfs_attrmulti_attr_set(
|
||||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
const unsigned char __user *ubuf,
|
||||
uint32_t len,
|
||||
uint32_t flags);
|
||||
|
||||
extern int
|
||||
xfs_attrmulti_attr_remove(
|
||||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
uint32_t flags);
|
||||
int xfs_ioc_attrmulti_one(struct file *parfilp, struct inode *inode,
|
||||
uint32_t opcode, void __user *uname, void __user *value,
|
||||
uint32_t *len, uint32_t flags);
|
||||
int xfs_ioc_attr_list(struct xfs_inode *dp, void __user *ubuf, int bufsize,
|
||||
int flags, struct xfs_attrlist_cursor __user *ucursor);
|
||||
|
||||
extern struct dentry *
|
||||
xfs_handle_to_dentry(
|
||||
|
@ -70,10 +59,6 @@ xfs_file_compat_ioctl(
|
|||
unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
struct xfs_ibulk;
|
||||
struct xfs_bstat;
|
||||
struct xfs_inogrp;
|
||||
|
||||
int xfs_fsbulkstat_one_fmt(struct xfs_ibulk *breq,
|
||||
const struct xfs_bulkstat *bstat);
|
||||
int xfs_fsinumbers_fmt(struct xfs_ibulk *breq, const struct xfs_inumbers *igrp);
|
||||
|
|
|
@ -352,56 +352,24 @@ xfs_compat_handlereq_to_dentry(
|
|||
STATIC int
|
||||
xfs_compat_attrlist_by_handle(
|
||||
struct file *parfilp,
|
||||
void __user *arg)
|
||||
compat_xfs_fsop_attrlist_handlereq_t __user *p)
|
||||
{
|
||||
int error;
|
||||
attrlist_cursor_kern_t *cursor;
|
||||
compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
|
||||
compat_xfs_fsop_attrlist_handlereq_t al_hreq;
|
||||
struct dentry *dentry;
|
||||
char *kbuf;
|
||||
int error;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
if (copy_from_user(&al_hreq, arg,
|
||||
sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
|
||||
if (copy_from_user(&al_hreq, p, sizeof(al_hreq)))
|
||||
return -EFAULT;
|
||||
if (al_hreq.buflen < sizeof(struct attrlist) ||
|
||||
al_hreq.buflen > XFS_XATTR_LIST_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Reject flags, only allow namespaces.
|
||||
*/
|
||||
if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
|
||||
return -EINVAL;
|
||||
|
||||
dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
|
||||
if (IS_ERR(dentry))
|
||||
return PTR_ERR(dentry);
|
||||
|
||||
error = -ENOMEM;
|
||||
kbuf = kmem_zalloc_large(al_hreq.buflen, 0);
|
||||
if (!kbuf)
|
||||
goto out_dput;
|
||||
|
||||
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
|
||||
error = xfs_attr_list(XFS_I(d_inode(dentry)), kbuf, al_hreq.buflen,
|
||||
al_hreq.flags, cursor);
|
||||
if (error)
|
||||
goto out_kfree;
|
||||
|
||||
if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
|
||||
error = -EFAULT;
|
||||
goto out_kfree;
|
||||
}
|
||||
|
||||
if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
|
||||
error = -EFAULT;
|
||||
|
||||
out_kfree:
|
||||
kmem_free(kbuf);
|
||||
out_dput:
|
||||
error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)),
|
||||
compat_ptr(al_hreq.buffer), al_hreq.buflen,
|
||||
al_hreq.flags, &p->pos);
|
||||
dput(dentry);
|
||||
return error;
|
||||
}
|
||||
|
@ -416,7 +384,6 @@ xfs_compat_attrmulti_by_handle(
|
|||
compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
|
||||
struct dentry *dentry;
|
||||
unsigned int i, size;
|
||||
unsigned char *attr_name;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -443,64 +410,18 @@ xfs_compat_attrmulti_by_handle(
|
|||
goto out_dput;
|
||||
}
|
||||
|
||||
error = -ENOMEM;
|
||||
attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
|
||||
if (!attr_name)
|
||||
goto out_kfree_ops;
|
||||
|
||||
error = 0;
|
||||
for (i = 0; i < am_hreq.opcount; i++) {
|
||||
if ((ops[i].am_flags & ATTR_ROOT) &&
|
||||
(ops[i].am_flags & ATTR_SECURE)) {
|
||||
ops[i].am_error = -EINVAL;
|
||||
continue;
|
||||
}
|
||||
ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
|
||||
|
||||
ops[i].am_error = strncpy_from_user((char *)attr_name,
|
||||
ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
|
||||
d_inode(dentry), ops[i].am_opcode,
|
||||
compat_ptr(ops[i].am_attrname),
|
||||
MAXNAMELEN);
|
||||
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
|
||||
error = -ERANGE;
|
||||
if (ops[i].am_error < 0)
|
||||
break;
|
||||
|
||||
switch (ops[i].am_opcode) {
|
||||
case ATTR_OP_GET:
|
||||
ops[i].am_error = xfs_attrmulti_attr_get(
|
||||
d_inode(dentry), attr_name,
|
||||
compat_ptr(ops[i].am_attrvalue),
|
||||
&ops[i].am_length, ops[i].am_flags);
|
||||
break;
|
||||
case ATTR_OP_SET:
|
||||
ops[i].am_error = mnt_want_write_file(parfilp);
|
||||
if (ops[i].am_error)
|
||||
break;
|
||||
ops[i].am_error = xfs_attrmulti_attr_set(
|
||||
d_inode(dentry), attr_name,
|
||||
compat_ptr(ops[i].am_attrvalue),
|
||||
ops[i].am_length, ops[i].am_flags);
|
||||
mnt_drop_write_file(parfilp);
|
||||
break;
|
||||
case ATTR_OP_REMOVE:
|
||||
ops[i].am_error = mnt_want_write_file(parfilp);
|
||||
if (ops[i].am_error)
|
||||
break;
|
||||
ops[i].am_error = xfs_attrmulti_attr_remove(
|
||||
d_inode(dentry), attr_name,
|
||||
ops[i].am_flags);
|
||||
mnt_drop_write_file(parfilp);
|
||||
break;
|
||||
default:
|
||||
ops[i].am_error = -EINVAL;
|
||||
}
|
||||
compat_ptr(ops[i].am_attrvalue),
|
||||
&ops[i].am_length, ops[i].am_flags);
|
||||
}
|
||||
|
||||
if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
|
||||
error = -EFAULT;
|
||||
|
||||
kfree(attr_name);
|
||||
out_kfree_ops:
|
||||
kfree(ops);
|
||||
out_dput:
|
||||
dput(dentry);
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include "xfs_iomap.h"
|
||||
#include "xfs_error.h"
|
||||
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/posix_acl.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/iversion.h>
|
||||
|
@ -50,10 +49,15 @@ xfs_initxattrs(
|
|||
int error = 0;
|
||||
|
||||
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
|
||||
error = xfs_attr_set(ip, xattr->name,
|
||||
strlen(xattr->name),
|
||||
xattr->value, xattr->value_len,
|
||||
ATTR_SECURE);
|
||||
struct xfs_da_args args = {
|
||||
.dp = ip,
|
||||
.attr_filter = XFS_ATTR_SECURE,
|
||||
.name = xattr->name,
|
||||
.namelen = strlen(xattr->name),
|
||||
.value = xattr->value,
|
||||
.valuelen = xattr->value_len,
|
||||
};
|
||||
error = xfs_attr_set(&args);
|
||||
if (error < 0)
|
||||
break;
|
||||
}
|
||||
|
@ -553,7 +557,7 @@ xfs_vn_getattr(
|
|||
stat->blocks =
|
||||
XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
|
||||
|
||||
if (ip->i_d.di_version == 3) {
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
if (request_mask & STATX_BTIME) {
|
||||
stat->result_mask |= STATX_BTIME;
|
||||
stat->btime = ip->i_d.di_crtime;
|
||||
|
@ -692,9 +696,7 @@ xfs_setattr_nonsize(
|
|||
*/
|
||||
ASSERT(udqp == NULL);
|
||||
ASSERT(gdqp == NULL);
|
||||
error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid),
|
||||
xfs_kgid_to_gid(gid),
|
||||
ip->i_d.di_projid,
|
||||
error = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid,
|
||||
qflags, &udqp, &gdqp, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -763,7 +765,6 @@ xfs_setattr_nonsize(
|
|||
olddquot1 = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_udquot, udqp);
|
||||
}
|
||||
ip->i_d.di_uid = xfs_kuid_to_uid(uid);
|
||||
inode->i_uid = uid;
|
||||
}
|
||||
if (!gid_eq(igid, gid)) {
|
||||
|
@ -775,7 +776,6 @@ xfs_setattr_nonsize(
|
|||
olddquot2 = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_gdquot, gdqp);
|
||||
}
|
||||
ip->i_d.di_gid = xfs_kgid_to_gid(gid);
|
||||
inode->i_gid = gid;
|
||||
}
|
||||
}
|
||||
|
@ -1304,9 +1304,6 @@ xfs_setup_inode(
|
|||
/* make the inode look hashed for the writeback code */
|
||||
inode_fake_hash(inode);
|
||||
|
||||
inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid);
|
||||
inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid);
|
||||
|
||||
i_size_write(inode, ip->i_d.di_size);
|
||||
xfs_diflags_to_iflags(inode, ip);
|
||||
|
||||
|
|
|
@ -86,8 +86,8 @@ xfs_bulkstat_one_int(
|
|||
*/
|
||||
buf->bs_projectid = ip->i_d.di_projid;
|
||||
buf->bs_ino = ino;
|
||||
buf->bs_uid = dic->di_uid;
|
||||
buf->bs_gid = dic->di_gid;
|
||||
buf->bs_uid = i_uid_read(inode);
|
||||
buf->bs_gid = i_gid_read(inode);
|
||||
buf->bs_size = dic->di_size;
|
||||
|
||||
buf->bs_nlink = inode->i_nlink;
|
||||
|
@ -110,7 +110,7 @@ xfs_bulkstat_one_int(
|
|||
buf->bs_forkoff = XFS_IFORK_BOFF(ip);
|
||||
buf->bs_version = XFS_BULKSTAT_VERSION_V5;
|
||||
|
||||
if (dic->di_version == 3) {
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
|
||||
buf->bs_cowextsize_blks = dic->di_cowextsize;
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ typedef __u32 xfs_nlink_t;
|
|||
#include <linux/list_sort.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/xattr.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/div64.h>
|
||||
|
@ -163,32 +164,6 @@ struct xstats {
|
|||
|
||||
extern struct xstats xfsstats;
|
||||
|
||||
/* Kernel uid/gid conversion. These are used to convert to/from the on disk
|
||||
* uid_t/gid_t types to the kuid_t/kgid_t types that the kernel uses internally.
|
||||
* The conversion here is type only, the value will remain the same since we
|
||||
* are converting to the init_user_ns. The uid is later mapped to a particular
|
||||
* user namespace value when crossing the kernel/user boundary.
|
||||
*/
|
||||
static inline uint32_t xfs_kuid_to_uid(kuid_t uid)
|
||||
{
|
||||
return from_kuid(&init_user_ns, uid);
|
||||
}
|
||||
|
||||
static inline kuid_t xfs_uid_to_kuid(uint32_t uid)
|
||||
{
|
||||
return make_kuid(&init_user_ns, uid);
|
||||
}
|
||||
|
||||
static inline uint32_t xfs_kgid_to_gid(kgid_t gid)
|
||||
{
|
||||
return from_kgid(&init_user_ns, gid);
|
||||
}
|
||||
|
||||
static inline kgid_t xfs_gid_to_kgid(uint32_t gid)
|
||||
{
|
||||
return make_kgid(&init_user_ns, gid);
|
||||
}
|
||||
|
||||
static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev)
|
||||
{
|
||||
return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
|
||||
|
|
476
fs/xfs/xfs_log.c
476
fs/xfs/xfs_log.c
|
@ -47,8 +47,7 @@ xlog_dealloc_log(
|
|||
|
||||
/* local state machine functions */
|
||||
STATIC void xlog_state_done_syncing(
|
||||
struct xlog_in_core *iclog,
|
||||
bool aborted);
|
||||
struct xlog_in_core *iclog);
|
||||
STATIC int
|
||||
xlog_state_get_iclog_space(
|
||||
struct xlog *log,
|
||||
|
@ -63,11 +62,6 @@ xlog_state_switch_iclogs(
|
|||
struct xlog_in_core *iclog,
|
||||
int eventual_size);
|
||||
STATIC void
|
||||
xlog_state_want_sync(
|
||||
struct xlog *log,
|
||||
struct xlog_in_core *iclog);
|
||||
|
||||
STATIC void
|
||||
xlog_grant_push_ail(
|
||||
struct xlog *log,
|
||||
int need_bytes);
|
||||
|
@ -597,26 +591,21 @@ xlog_state_release_iclog(
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
void
|
||||
xfs_log_release_iclog(
|
||||
struct xfs_mount *mp,
|
||||
struct xlog_in_core *iclog)
|
||||
{
|
||||
struct xlog *log = mp->m_log;
|
||||
bool sync;
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_IOERROR) {
|
||||
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
|
||||
return -EIO;
|
||||
}
|
||||
struct xlog *log = iclog->ic_log;
|
||||
bool sync = false;
|
||||
|
||||
if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) {
|
||||
sync = __xlog_state_release_iclog(log, iclog);
|
||||
if (iclog->ic_state != XLOG_STATE_IOERROR)
|
||||
sync = __xlog_state_release_iclog(log, iclog);
|
||||
spin_unlock(&log->l_icloglock);
|
||||
if (sync)
|
||||
xlog_sync(log, iclog);
|
||||
}
|
||||
return 0;
|
||||
|
||||
if (sync)
|
||||
xlog_sync(log, iclog);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -854,6 +843,31 @@ xfs_log_mount_cancel(
|
|||
xfs_log_unmount(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the iclog to be written disk, or return an error if the log has been
|
||||
* shut down.
|
||||
*/
|
||||
static int
|
||||
xlog_wait_on_iclog(
|
||||
struct xlog_in_core *iclog)
|
||||
__releases(iclog->ic_log->l_icloglock)
|
||||
{
|
||||
struct xlog *log = iclog->ic_log;
|
||||
|
||||
if (!XLOG_FORCED_SHUTDOWN(log) &&
|
||||
iclog->ic_state != XLOG_STATE_ACTIVE &&
|
||||
iclog->ic_state != XLOG_STATE_DIRTY) {
|
||||
XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
|
||||
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
|
||||
} else {
|
||||
spin_unlock(&log->l_icloglock);
|
||||
}
|
||||
|
||||
if (XLOG_FORCED_SHUTDOWN(log))
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Final log writes as part of unmount.
|
||||
*
|
||||
|
@ -919,20 +933,13 @@ xfs_log_write_unmount_record(
|
|||
spin_lock(&log->l_icloglock);
|
||||
iclog = log->l_iclog;
|
||||
atomic_inc(&iclog->ic_refcnt);
|
||||
xlog_state_want_sync(log, iclog);
|
||||
if (iclog->ic_state == XLOG_STATE_ACTIVE)
|
||||
xlog_state_switch_iclogs(log, iclog, 0);
|
||||
else
|
||||
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
|
||||
iclog->ic_state == XLOG_STATE_IOERROR);
|
||||
error = xlog_state_release_iclog(log, iclog);
|
||||
switch (iclog->ic_state) {
|
||||
default:
|
||||
if (!XLOG_FORCED_SHUTDOWN(log)) {
|
||||
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case XLOG_STATE_ACTIVE:
|
||||
case XLOG_STATE_DIRTY:
|
||||
spin_unlock(&log->l_icloglock);
|
||||
break;
|
||||
}
|
||||
xlog_wait_on_iclog(iclog);
|
||||
|
||||
if (tic) {
|
||||
trace_xfs_log_umount_write(log, tic);
|
||||
|
@ -941,6 +948,18 @@ xfs_log_write_unmount_record(
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_log_unmount_verify_iclog(
|
||||
struct xlog *log)
|
||||
{
|
||||
struct xlog_in_core *iclog = log->l_iclog;
|
||||
|
||||
do {
|
||||
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
|
||||
ASSERT(iclog->ic_offset == 0);
|
||||
} while ((iclog = iclog->ic_next) != log->l_iclog);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmount record used to have a string "Unmount filesystem--" in the
|
||||
* data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
|
||||
|
@ -948,16 +967,11 @@ xfs_log_write_unmount_record(
|
|||
* currently architecture converted and "Unmount" is a bit foo.
|
||||
* As far as I know, there weren't any dependencies on the old behaviour.
|
||||
*/
|
||||
|
||||
static int
|
||||
xfs_log_unmount_write(xfs_mount_t *mp)
|
||||
static void
|
||||
xfs_log_unmount_write(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
struct xlog *log = mp->m_log;
|
||||
xlog_in_core_t *iclog;
|
||||
#ifdef DEBUG
|
||||
xlog_in_core_t *first_iclog;
|
||||
#endif
|
||||
int error;
|
||||
struct xlog *log = mp->m_log;
|
||||
|
||||
/*
|
||||
* Don't write out unmount record on norecovery mounts or ro devices.
|
||||
|
@ -966,57 +980,16 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|
|||
if (mp->m_flags & XFS_MOUNT_NORECOVERY ||
|
||||
xfs_readonly_buftarg(log->l_targ)) {
|
||||
ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
error = xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
|
||||
#ifdef DEBUG
|
||||
first_iclog = iclog = log->l_iclog;
|
||||
do {
|
||||
if (iclog->ic_state != XLOG_STATE_IOERROR) {
|
||||
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
|
||||
ASSERT(iclog->ic_offset == 0);
|
||||
}
|
||||
iclog = iclog->ic_next;
|
||||
} while (iclog != first_iclog);
|
||||
#endif
|
||||
if (! (XLOG_FORCED_SHUTDOWN(log))) {
|
||||
xfs_log_write_unmount_record(mp);
|
||||
} else {
|
||||
/*
|
||||
* We're already in forced_shutdown mode, couldn't
|
||||
* even attempt to write out the unmount transaction.
|
||||
*
|
||||
* Go through the motions of sync'ing and releasing
|
||||
* the iclog, even though no I/O will actually happen,
|
||||
* we need to wait for other log I/Os that may already
|
||||
* be in progress. Do this as a separate section of
|
||||
* code so we'll know if we ever get stuck here that
|
||||
* we're in this odd situation of trying to unmount
|
||||
* a file system that went into forced_shutdown as
|
||||
* the result of an unmount..
|
||||
*/
|
||||
spin_lock(&log->l_icloglock);
|
||||
iclog = log->l_iclog;
|
||||
atomic_inc(&iclog->ic_refcnt);
|
||||
xlog_state_want_sync(log, iclog);
|
||||
error = xlog_state_release_iclog(log, iclog);
|
||||
switch (iclog->ic_state) {
|
||||
case XLOG_STATE_ACTIVE:
|
||||
case XLOG_STATE_DIRTY:
|
||||
case XLOG_STATE_IOERROR:
|
||||
spin_unlock(&log->l_icloglock);
|
||||
break;
|
||||
default:
|
||||
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return error;
|
||||
} /* xfs_log_unmount_write */
|
||||
if (XLOG_FORCED_SHUTDOWN(log))
|
||||
return;
|
||||
xfs_log_unmount_verify_iclog(log);
|
||||
xfs_log_write_unmount_record(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Empty the log for unmount/freeze.
|
||||
|
@ -1279,7 +1252,6 @@ xlog_ioend_work(
|
|||
struct xlog_in_core *iclog =
|
||||
container_of(work, struct xlog_in_core, ic_end_io_work);
|
||||
struct xlog *log = iclog->ic_log;
|
||||
bool aborted = false;
|
||||
int error;
|
||||
|
||||
error = blk_status_to_errno(iclog->ic_bio.bi_status);
|
||||
|
@ -1295,17 +1267,9 @@ xlog_ioend_work(
|
|||
if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
|
||||
xfs_alert(log->l_mp, "log I/O error %d", error);
|
||||
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
|
||||
/*
|
||||
* This flag will be propagated to the trans-committed
|
||||
* callback routines to let them know that the log-commit
|
||||
* didn't succeed.
|
||||
*/
|
||||
aborted = true;
|
||||
} else if (iclog->ic_state == XLOG_STATE_IOERROR) {
|
||||
aborted = true;
|
||||
}
|
||||
|
||||
xlog_state_done_syncing(iclog, aborted);
|
||||
xlog_state_done_syncing(iclog);
|
||||
bio_uninit(&iclog->ic_bio);
|
||||
|
||||
/*
|
||||
|
@ -1739,7 +1703,7 @@ xlog_bio_end_io(
|
|||
&iclog->ic_end_io_work);
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
xlog_map_iclog_data(
|
||||
struct bio *bio,
|
||||
void *data,
|
||||
|
@ -1750,11 +1714,14 @@ xlog_map_iclog_data(
|
|||
unsigned int off = offset_in_page(data);
|
||||
size_t len = min_t(size_t, count, PAGE_SIZE - off);
|
||||
|
||||
WARN_ON_ONCE(bio_add_page(bio, page, len, off) != len);
|
||||
if (bio_add_page(bio, page, len, off) != len)
|
||||
return -EIO;
|
||||
|
||||
data += len;
|
||||
count -= len;
|
||||
} while (count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
|
@ -1784,7 +1751,7 @@ xlog_write_iclog(
|
|||
* the buffer manually, the code needs to be kept in sync
|
||||
* with the I/O completion path.
|
||||
*/
|
||||
xlog_state_done_syncing(iclog, true);
|
||||
xlog_state_done_syncing(iclog);
|
||||
up(&iclog->ic_sema);
|
||||
return;
|
||||
}
|
||||
|
@ -1798,7 +1765,10 @@ xlog_write_iclog(
|
|||
if (need_flush)
|
||||
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
|
||||
|
||||
xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count);
|
||||
if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
|
||||
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
|
||||
return;
|
||||
}
|
||||
if (is_vmalloc_addr(iclog->ic_data))
|
||||
flush_kernel_vmap_range(iclog->ic_data, count);
|
||||
|
||||
|
@ -2328,7 +2298,11 @@ xlog_write_copy_finish(
|
|||
*record_cnt = 0;
|
||||
*data_cnt = 0;
|
||||
|
||||
xlog_state_want_sync(log, iclog);
|
||||
if (iclog->ic_state == XLOG_STATE_ACTIVE)
|
||||
xlog_state_switch_iclogs(log, iclog, 0);
|
||||
else
|
||||
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
|
||||
iclog->ic_state == XLOG_STATE_IOERROR);
|
||||
if (!commit_iclog)
|
||||
goto release_iclog;
|
||||
spin_unlock(&log->l_icloglock);
|
||||
|
@ -2575,111 +2549,106 @@ xlog_write(
|
|||
*****************************************************************************
|
||||
*/
|
||||
|
||||
static void
|
||||
xlog_state_activate_iclog(
|
||||
struct xlog_in_core *iclog,
|
||||
int *iclogs_changed)
|
||||
{
|
||||
ASSERT(list_empty_careful(&iclog->ic_callbacks));
|
||||
|
||||
/*
|
||||
* If the number of ops in this iclog indicate it just contains the
|
||||
* dummy transaction, we can change state into IDLE (the second time
|
||||
* around). Otherwise we should change the state into NEED a dummy.
|
||||
* We don't need to cover the dummy.
|
||||
*/
|
||||
if (*iclogs_changed == 0 &&
|
||||
iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
|
||||
*iclogs_changed = 1;
|
||||
} else {
|
||||
/*
|
||||
* We have two dirty iclogs so start over. This could also be
|
||||
* num of ops indicating this is not the dummy going out.
|
||||
*/
|
||||
*iclogs_changed = 2;
|
||||
}
|
||||
|
||||
iclog->ic_state = XLOG_STATE_ACTIVE;
|
||||
iclog->ic_offset = 0;
|
||||
iclog->ic_header.h_num_logops = 0;
|
||||
memset(iclog->ic_header.h_cycle_data, 0,
|
||||
sizeof(iclog->ic_header.h_cycle_data));
|
||||
iclog->ic_header.h_lsn = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* An iclog has just finished IO completion processing, so we need to update
|
||||
* the iclog state and propagate that up into the overall log state. Hence we
|
||||
* prepare the iclog for cleaning, and then clean all the pending dirty iclogs
|
||||
* starting from the head, and then wake up any threads that are waiting for the
|
||||
* iclog to be marked clean.
|
||||
*
|
||||
* The ordering of marking iclogs ACTIVE must be maintained, so an iclog
|
||||
* doesn't become ACTIVE beyond one that is SYNCING. This is also required to
|
||||
* maintain the notion that we use a ordered wait queue to hold off would be
|
||||
* writers to the log when every iclog is trying to sync to disk.
|
||||
*
|
||||
* Caller must hold the icloglock before calling us.
|
||||
*
|
||||
* State Change: !IOERROR -> DIRTY -> ACTIVE
|
||||
* Loop through all iclogs and mark all iclogs currently marked DIRTY as
|
||||
* ACTIVE after iclog I/O has completed.
|
||||
*/
|
||||
static void
|
||||
xlog_state_activate_iclogs(
|
||||
struct xlog *log,
|
||||
int *iclogs_changed)
|
||||
{
|
||||
struct xlog_in_core *iclog = log->l_iclog;
|
||||
|
||||
do {
|
||||
if (iclog->ic_state == XLOG_STATE_DIRTY)
|
||||
xlog_state_activate_iclog(iclog, iclogs_changed);
|
||||
/*
|
||||
* The ordering of marking iclogs ACTIVE must be maintained, so
|
||||
* an iclog doesn't become ACTIVE beyond one that is SYNCING.
|
||||
*/
|
||||
else if (iclog->ic_state != XLOG_STATE_ACTIVE)
|
||||
break;
|
||||
} while ((iclog = iclog->ic_next) != log->l_iclog);
|
||||
}
|
||||
|
||||
static int
|
||||
xlog_covered_state(
|
||||
int prev_state,
|
||||
int iclogs_changed)
|
||||
{
|
||||
/*
|
||||
* We usually go to NEED. But we go to NEED2 if the changed indicates we
|
||||
* are done writing the dummy record. If we are done with the second
|
||||
* dummy recored (DONE2), then we go to IDLE.
|
||||
*/
|
||||
switch (prev_state) {
|
||||
case XLOG_STATE_COVER_IDLE:
|
||||
case XLOG_STATE_COVER_NEED:
|
||||
case XLOG_STATE_COVER_NEED2:
|
||||
break;
|
||||
case XLOG_STATE_COVER_DONE:
|
||||
if (iclogs_changed == 1)
|
||||
return XLOG_STATE_COVER_NEED2;
|
||||
break;
|
||||
case XLOG_STATE_COVER_DONE2:
|
||||
if (iclogs_changed == 1)
|
||||
return XLOG_STATE_COVER_IDLE;
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
return XLOG_STATE_COVER_NEED;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xlog_state_clean_iclog(
|
||||
struct xlog *log,
|
||||
struct xlog_in_core *dirty_iclog)
|
||||
{
|
||||
struct xlog_in_core *iclog;
|
||||
int changed = 0;
|
||||
int iclogs_changed = 0;
|
||||
|
||||
/* Prepare the completed iclog. */
|
||||
if (dirty_iclog->ic_state != XLOG_STATE_IOERROR)
|
||||
dirty_iclog->ic_state = XLOG_STATE_DIRTY;
|
||||
dirty_iclog->ic_state = XLOG_STATE_DIRTY;
|
||||
|
||||
/* Walk all the iclogs to update the ordered active state. */
|
||||
iclog = log->l_iclog;
|
||||
do {
|
||||
if (iclog->ic_state == XLOG_STATE_DIRTY) {
|
||||
iclog->ic_state = XLOG_STATE_ACTIVE;
|
||||
iclog->ic_offset = 0;
|
||||
ASSERT(list_empty_careful(&iclog->ic_callbacks));
|
||||
/*
|
||||
* If the number of ops in this iclog indicate it just
|
||||
* contains the dummy transaction, we can
|
||||
* change state into IDLE (the second time around).
|
||||
* Otherwise we should change the state into
|
||||
* NEED a dummy.
|
||||
* We don't need to cover the dummy.
|
||||
*/
|
||||
if (!changed &&
|
||||
(be32_to_cpu(iclog->ic_header.h_num_logops) ==
|
||||
XLOG_COVER_OPS)) {
|
||||
changed = 1;
|
||||
} else {
|
||||
/*
|
||||
* We have two dirty iclogs so start over
|
||||
* This could also be num of ops indicates
|
||||
* this is not the dummy going out.
|
||||
*/
|
||||
changed = 2;
|
||||
}
|
||||
iclog->ic_header.h_num_logops = 0;
|
||||
memset(iclog->ic_header.h_cycle_data, 0,
|
||||
sizeof(iclog->ic_header.h_cycle_data));
|
||||
iclog->ic_header.h_lsn = 0;
|
||||
} else if (iclog->ic_state == XLOG_STATE_ACTIVE)
|
||||
/* do nothing */;
|
||||
else
|
||||
break; /* stop cleaning */
|
||||
iclog = iclog->ic_next;
|
||||
} while (iclog != log->l_iclog);
|
||||
|
||||
|
||||
/*
|
||||
* Wake up threads waiting in xfs_log_force() for the dirty iclog
|
||||
* to be cleaned.
|
||||
*/
|
||||
xlog_state_activate_iclogs(log, &iclogs_changed);
|
||||
wake_up_all(&dirty_iclog->ic_force_wait);
|
||||
|
||||
/*
|
||||
* Change state for the dummy log recording.
|
||||
* We usually go to NEED. But we go to NEED2 if the changed indicates
|
||||
* we are done writing the dummy record.
|
||||
* If we are done with the second dummy recored (DONE2), then
|
||||
* we go to IDLE.
|
||||
*/
|
||||
if (changed) {
|
||||
switch (log->l_covered_state) {
|
||||
case XLOG_STATE_COVER_IDLE:
|
||||
case XLOG_STATE_COVER_NEED:
|
||||
case XLOG_STATE_COVER_NEED2:
|
||||
log->l_covered_state = XLOG_STATE_COVER_NEED;
|
||||
break;
|
||||
|
||||
case XLOG_STATE_COVER_DONE:
|
||||
if (changed == 1)
|
||||
log->l_covered_state = XLOG_STATE_COVER_NEED2;
|
||||
else
|
||||
log->l_covered_state = XLOG_STATE_COVER_NEED;
|
||||
break;
|
||||
|
||||
case XLOG_STATE_COVER_DONE2:
|
||||
if (changed == 1)
|
||||
log->l_covered_state = XLOG_STATE_COVER_IDLE;
|
||||
else
|
||||
log->l_covered_state = XLOG_STATE_COVER_NEED;
|
||||
break;
|
||||
|
||||
default:
|
||||
ASSERT(0);
|
||||
}
|
||||
if (iclogs_changed) {
|
||||
log->l_covered_state = xlog_covered_state(log->l_covered_state,
|
||||
iclogs_changed);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2808,8 +2777,7 @@ xlog_state_iodone_process_iclog(
|
|||
static void
|
||||
xlog_state_do_iclog_callbacks(
|
||||
struct xlog *log,
|
||||
struct xlog_in_core *iclog,
|
||||
bool aborted)
|
||||
struct xlog_in_core *iclog)
|
||||
__releases(&log->l_icloglock)
|
||||
__acquires(&log->l_icloglock)
|
||||
{
|
||||
|
@ -2821,7 +2789,7 @@ xlog_state_do_iclog_callbacks(
|
|||
list_splice_init(&iclog->ic_callbacks, &tmp);
|
||||
|
||||
spin_unlock(&iclog->ic_callback_lock);
|
||||
xlog_cil_process_committed(&tmp, aborted);
|
||||
xlog_cil_process_committed(&tmp);
|
||||
spin_lock(&iclog->ic_callback_lock);
|
||||
}
|
||||
|
||||
|
@ -2836,8 +2804,7 @@ xlog_state_do_iclog_callbacks(
|
|||
|
||||
STATIC void
|
||||
xlog_state_do_callback(
|
||||
struct xlog *log,
|
||||
bool aborted)
|
||||
struct xlog *log)
|
||||
{
|
||||
struct xlog_in_core *iclog;
|
||||
struct xlog_in_core *first_iclog;
|
||||
|
@ -2878,9 +2845,11 @@ xlog_state_do_callback(
|
|||
* we'll have to run at least one more complete loop.
|
||||
*/
|
||||
cycled_icloglock = true;
|
||||
xlog_state_do_iclog_callbacks(log, iclog, aborted);
|
||||
|
||||
xlog_state_clean_iclog(log, iclog);
|
||||
xlog_state_do_iclog_callbacks(log, iclog);
|
||||
if (XLOG_FORCED_SHUTDOWN(log))
|
||||
wake_up_all(&iclog->ic_force_wait);
|
||||
else
|
||||
xlog_state_clean_iclog(log, iclog);
|
||||
iclog = iclog->ic_next;
|
||||
} while (first_iclog != iclog);
|
||||
|
||||
|
@ -2916,25 +2885,22 @@ xlog_state_do_callback(
|
|||
*/
|
||||
STATIC void
|
||||
xlog_state_done_syncing(
|
||||
struct xlog_in_core *iclog,
|
||||
bool aborted)
|
||||
struct xlog_in_core *iclog)
|
||||
{
|
||||
struct xlog *log = iclog->ic_log;
|
||||
|
||||
spin_lock(&log->l_icloglock);
|
||||
|
||||
ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
|
||||
|
||||
/*
|
||||
* If we got an error, either on the first buffer, or in the case of
|
||||
* split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
|
||||
* and none should ever be attempted to be written to disk
|
||||
* again.
|
||||
* split log writes, on the second, we shut down the file system and
|
||||
* no iclogs should ever be attempted to be written to disk again.
|
||||
*/
|
||||
if (iclog->ic_state == XLOG_STATE_SYNCING)
|
||||
if (!XLOG_FORCED_SHUTDOWN(log)) {
|
||||
ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
|
||||
iclog->ic_state = XLOG_STATE_DONE_SYNC;
|
||||
else
|
||||
ASSERT(iclog->ic_state == XLOG_STATE_IOERROR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Someone could be sleeping prior to writing out the next
|
||||
|
@ -2943,9 +2909,8 @@ xlog_state_done_syncing(
|
|||
*/
|
||||
wake_up_all(&iclog->ic_write_wait);
|
||||
spin_unlock(&log->l_icloglock);
|
||||
xlog_state_do_callback(log, aborted); /* also cleans log */
|
||||
} /* xlog_state_done_syncing */
|
||||
|
||||
xlog_state_do_callback(log); /* also cleans log */
|
||||
}
|
||||
|
||||
/*
|
||||
* If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
|
||||
|
@ -3152,11 +3117,12 @@ xlog_ungrant_log_space(
|
|||
}
|
||||
|
||||
/*
|
||||
* This routine will mark the current iclog in the ring as WANT_SYNC
|
||||
* and move the current iclog pointer to the next iclog in the ring.
|
||||
* When this routine is called from xlog_state_get_iclog_space(), the
|
||||
* exact size of the iclog has not yet been determined. All we know is
|
||||
* that every data block. We have run out of space in this log record.
|
||||
* Mark the current iclog in the ring as WANT_SYNC and move the current iclog
|
||||
* pointer to the next iclog in the ring.
|
||||
*
|
||||
* When called from xlog_state_get_iclog_space(), the exact size of the iclog
|
||||
* has not yet been determined, all we know is that we have run out of space in
|
||||
* the current iclog.
|
||||
*/
|
||||
STATIC void
|
||||
xlog_state_switch_iclogs(
|
||||
|
@ -3165,6 +3131,8 @@ xlog_state_switch_iclogs(
|
|||
int eventual_size)
|
||||
{
|
||||
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
|
||||
assert_spin_locked(&log->l_icloglock);
|
||||
|
||||
if (!eventual_size)
|
||||
eventual_size = iclog->ic_offset;
|
||||
iclog->ic_state = XLOG_STATE_WANT_SYNC;
|
||||
|
@ -3259,9 +3227,6 @@ xfs_log_force(
|
|||
* previous iclog and go to sleep.
|
||||
*/
|
||||
iclog = iclog->ic_prev;
|
||||
if (iclog->ic_state == XLOG_STATE_ACTIVE ||
|
||||
iclog->ic_state == XLOG_STATE_DIRTY)
|
||||
goto out_unlock;
|
||||
} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
|
||||
if (atomic_read(&iclog->ic_refcnt) == 0) {
|
||||
/*
|
||||
|
@ -3277,8 +3242,7 @@ xfs_log_force(
|
|||
if (xlog_state_release_iclog(log, iclog))
|
||||
goto out_error;
|
||||
|
||||
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn ||
|
||||
iclog->ic_state == XLOG_STATE_DIRTY)
|
||||
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
|
||||
goto out_unlock;
|
||||
} else {
|
||||
/*
|
||||
|
@ -3298,17 +3262,8 @@ xfs_log_force(
|
|||
;
|
||||
}
|
||||
|
||||
if (!(flags & XFS_LOG_SYNC))
|
||||
goto out_unlock;
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_IOERROR)
|
||||
goto out_error;
|
||||
XFS_STATS_INC(mp, xs_log_force_sleep);
|
||||
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
|
||||
if (iclog->ic_state == XLOG_STATE_IOERROR)
|
||||
return -EIO;
|
||||
return 0;
|
||||
|
||||
if (flags & XFS_LOG_SYNC)
|
||||
return xlog_wait_on_iclog(iclog);
|
||||
out_unlock:
|
||||
spin_unlock(&log->l_icloglock);
|
||||
return 0;
|
||||
|
@ -3339,9 +3294,6 @@ __xfs_log_force_lsn(
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_DIRTY)
|
||||
goto out_unlock;
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_ACTIVE) {
|
||||
/*
|
||||
* We sleep here if we haven't already slept (e.g. this is the
|
||||
|
@ -3375,20 +3327,8 @@ __xfs_log_force_lsn(
|
|||
*log_flushed = 1;
|
||||
}
|
||||
|
||||
if (!(flags & XFS_LOG_SYNC) ||
|
||||
(iclog->ic_state == XLOG_STATE_ACTIVE ||
|
||||
iclog->ic_state == XLOG_STATE_DIRTY))
|
||||
goto out_unlock;
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_IOERROR)
|
||||
goto out_error;
|
||||
|
||||
XFS_STATS_INC(mp, xs_log_force_sleep);
|
||||
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
|
||||
if (iclog->ic_state == XLOG_STATE_IOERROR)
|
||||
return -EIO;
|
||||
return 0;
|
||||
|
||||
if (flags & XFS_LOG_SYNC)
|
||||
return xlog_wait_on_iclog(iclog);
|
||||
out_unlock:
|
||||
spin_unlock(&log->l_icloglock);
|
||||
return 0;
|
||||
|
@ -3434,26 +3374,6 @@ xfs_log_force_lsn(
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when we want to mark the current iclog as being ready to sync to
|
||||
* disk.
|
||||
*/
|
||||
STATIC void
|
||||
xlog_state_want_sync(
|
||||
struct xlog *log,
|
||||
struct xlog_in_core *iclog)
|
||||
{
|
||||
assert_spin_locked(&log->l_icloglock);
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_ACTIVE) {
|
||||
xlog_state_switch_iclogs(log, iclog, 0);
|
||||
} else {
|
||||
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
|
||||
iclog->ic_state == XLOG_STATE_IOERROR);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* TICKET functions
|
||||
|
@ -3937,7 +3857,7 @@ xfs_log_force_umount(
|
|||
spin_lock(&log->l_cilp->xc_push_lock);
|
||||
wake_up_all(&log->l_cilp->xc_commit_wait);
|
||||
spin_unlock(&log->l_cilp->xc_push_lock);
|
||||
xlog_state_do_callback(log, true);
|
||||
xlog_state_do_callback(log);
|
||||
|
||||
/* return non-zero if log IOERROR transition had already happened */
|
||||
return retval;
|
||||
|
|
|
@ -121,8 +121,7 @@ void xfs_log_mount_cancel(struct xfs_mount *);
|
|||
xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
|
||||
xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
|
||||
void xfs_log_space_wake(struct xfs_mount *mp);
|
||||
int xfs_log_release_iclog(struct xfs_mount *mp,
|
||||
struct xlog_in_core *iclog);
|
||||
void xfs_log_release_iclog(struct xlog_in_core *iclog);
|
||||
int xfs_log_reserve(struct xfs_mount *mp,
|
||||
int length,
|
||||
int count,
|
||||
|
@ -138,7 +137,7 @@ void xfs_log_ticket_put(struct xlog_ticket *ticket);
|
|||
|
||||
void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_lsn_t *commit_lsn, bool regrant);
|
||||
void xlog_cil_process_committed(struct list_head *list, bool aborted);
|
||||
void xlog_cil_process_committed(struct list_head *list);
|
||||
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
|
||||
|
||||
void xfs_log_work_queue(struct xfs_mount *mp);
|
||||
|
|
|
@ -574,10 +574,10 @@ xlog_discard_busy_extents(
|
|||
*/
|
||||
static void
|
||||
xlog_cil_committed(
|
||||
struct xfs_cil_ctx *ctx,
|
||||
bool abort)
|
||||
struct xfs_cil_ctx *ctx)
|
||||
{
|
||||
struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
|
||||
bool abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log);
|
||||
|
||||
/*
|
||||
* If the I/O failed, we're aborting the commit and already shutdown.
|
||||
|
@ -613,37 +613,38 @@ xlog_cil_committed(
|
|||
|
||||
void
|
||||
xlog_cil_process_committed(
|
||||
struct list_head *list,
|
||||
bool aborted)
|
||||
struct list_head *list)
|
||||
{
|
||||
struct xfs_cil_ctx *ctx;
|
||||
|
||||
while ((ctx = list_first_entry_or_null(list,
|
||||
struct xfs_cil_ctx, iclog_entry))) {
|
||||
list_del(&ctx->iclog_entry);
|
||||
xlog_cil_committed(ctx, aborted);
|
||||
xlog_cil_committed(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Push the Committed Item List to the log. If @push_seq flag is zero, then it
|
||||
* is a background flush and so we can chose to ignore it. Otherwise, if the
|
||||
* current sequence is the same as @push_seq we need to do a flush. If
|
||||
* @push_seq is less than the current sequence, then it has already been
|
||||
* Push the Committed Item List to the log.
|
||||
*
|
||||
* If the current sequence is the same as xc_push_seq we need to do a flush. If
|
||||
* xc_push_seq is less than the current sequence, then it has already been
|
||||
* flushed and we don't need to do anything - the caller will wait for it to
|
||||
* complete if necessary.
|
||||
*
|
||||
* @push_seq is a value rather than a flag because that allows us to do an
|
||||
* unlocked check of the sequence number for a match. Hence we can allows log
|
||||
* forces to run racily and not issue pushes for the same sequence twice. If we
|
||||
* get a race between multiple pushes for the same sequence they will block on
|
||||
* the first one and then abort, hence avoiding needless pushes.
|
||||
* xc_push_seq is checked unlocked against the sequence number for a match.
|
||||
* Hence we can allow log forces to run racily and not issue pushes for the
|
||||
* same sequence twice. If we get a race between multiple pushes for the same
|
||||
* sequence they will block on the first one and then abort, hence avoiding
|
||||
* needless pushes.
|
||||
*/
|
||||
STATIC int
|
||||
xlog_cil_push(
|
||||
struct xlog *log)
|
||||
static void
|
||||
xlog_cil_push_work(
|
||||
struct work_struct *work)
|
||||
{
|
||||
struct xfs_cil *cil = log->l_cilp;
|
||||
struct xfs_cil *cil =
|
||||
container_of(work, struct xfs_cil, xc_push_work);
|
||||
struct xlog *log = cil->xc_log;
|
||||
struct xfs_log_vec *lv;
|
||||
struct xfs_cil_ctx *ctx;
|
||||
struct xfs_cil_ctx *new_ctx;
|
||||
|
@ -657,9 +658,6 @@ xlog_cil_push(
|
|||
xfs_lsn_t commit_lsn;
|
||||
xfs_lsn_t push_seq;
|
||||
|
||||
if (!cil)
|
||||
return 0;
|
||||
|
||||
new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
|
||||
new_ctx->ticket = xlog_cil_ticket_alloc(log);
|
||||
|
||||
|
@ -867,28 +865,20 @@ xlog_cil_push(
|
|||
spin_unlock(&cil->xc_push_lock);
|
||||
|
||||
/* release the hounds! */
|
||||
return xfs_log_release_iclog(log->l_mp, commit_iclog);
|
||||
xfs_log_release_iclog(commit_iclog);
|
||||
return;
|
||||
|
||||
out_skip:
|
||||
up_write(&cil->xc_ctx_lock);
|
||||
xfs_log_ticket_put(new_ctx->ticket);
|
||||
kmem_free(new_ctx);
|
||||
return 0;
|
||||
return;
|
||||
|
||||
out_abort_free_ticket:
|
||||
xfs_log_ticket_put(tic);
|
||||
out_abort:
|
||||
xlog_cil_committed(ctx, true);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static void
|
||||
xlog_cil_push_work(
|
||||
struct work_struct *work)
|
||||
{
|
||||
struct xfs_cil *cil = container_of(work, struct xfs_cil,
|
||||
xc_push_work);
|
||||
xlog_cil_push(cil->xc_log);
|
||||
ASSERT(XLOG_FORCED_SHUTDOWN(log));
|
||||
xlog_cil_committed(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -402,7 +402,8 @@ struct xlog {
|
|||
#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
|
||||
((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
|
||||
|
||||
#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
|
||||
#define XLOG_FORCED_SHUTDOWN(log) \
|
||||
(unlikely((log)->l_flags & XLOG_IO_ERROR))
|
||||
|
||||
/* common routines */
|
||||
extern int
|
||||
|
@ -524,12 +525,6 @@ xlog_cil_force(struct xlog *log)
|
|||
xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmount record type is used as a pseudo transaction type for the ticket.
|
||||
* It's value must be outside the range of XFS_TRANS_* values.
|
||||
*/
|
||||
#define XLOG_UNMOUNT_REC_TYPE (-1U)
|
||||
|
||||
/*
|
||||
* Wrapper function for waiting on a wait queue serialised against wakeups
|
||||
* by a spinlock. This matches the semantics of all the wait queues used in the
|
||||
|
|
|
@ -2869,8 +2869,8 @@ xfs_recover_inode_owner_change(
|
|||
return -ENOMEM;
|
||||
|
||||
/* instantiate the inode */
|
||||
ASSERT(dip->di_version >= 3);
|
||||
xfs_inode_from_disk(ip, dip);
|
||||
ASSERT(ip->i_d.di_version >= 3);
|
||||
|
||||
error = xfs_iformat_fork(ip, dip);
|
||||
if (error)
|
||||
|
@ -2997,7 +2997,7 @@ xlog_recover_inode_pass2(
|
|||
* superblock flag to determine whether we need to look at di_flushiter
|
||||
* to skip replay when the on disk inode is newer than the log one
|
||||
*/
|
||||
if (!xfs_sb_version_hascrc(&mp->m_sb) &&
|
||||
if (!xfs_sb_version_has_v3inode(&mp->m_sb) &&
|
||||
ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
|
||||
/*
|
||||
* Deal with the wrap case, DI_MAX_FLUSH is less
|
||||
|
@ -3068,7 +3068,7 @@ xlog_recover_inode_pass2(
|
|||
error = -EFSCORRUPTED;
|
||||
goto out_release;
|
||||
}
|
||||
isize = xfs_log_dinode_size(ldip->di_version);
|
||||
isize = xfs_log_dinode_size(mp);
|
||||
if (unlikely(item->ri_buf[1].i_len > isize)) {
|
||||
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
|
||||
XFS_ERRLEVEL_LOW, mp, ldip,
|
||||
|
@ -4947,7 +4947,7 @@ xlog_recover_clear_agi_bucket(
|
|||
if (error)
|
||||
goto out_abort;
|
||||
|
||||
agi = XFS_BUF_TO_AGI(agibp);
|
||||
agi = agibp->b_addr;
|
||||
agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
|
||||
offset = offsetof(xfs_agi_t, agi_unlinked) +
|
||||
(sizeof(xfs_agino_t) * bucket);
|
||||
|
@ -5083,7 +5083,7 @@ xlog_recover_process_iunlinks(
|
|||
* buffer reference though, so that it stays pinned in memory
|
||||
* while we need the buffer.
|
||||
*/
|
||||
agi = XFS_BUF_TO_AGI(agibp);
|
||||
agi = agibp->b_addr;
|
||||
xfs_buf_unlock(agibp);
|
||||
|
||||
for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
|
||||
|
@ -5636,7 +5636,7 @@ xlog_do_recover(
|
|||
|
||||
/* Convert superblock from on-disk format */
|
||||
sbp = &mp->m_sb;
|
||||
xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
|
||||
xfs_sb_from_disk(sbp, bp->b_addr);
|
||||
xfs_buf_relse(bp);
|
||||
|
||||
/* re-initialise in-core superblock and geometry structures */
|
||||
|
@ -5809,7 +5809,6 @@ xlog_recover_check_summary(
|
|||
struct xlog *log)
|
||||
{
|
||||
xfs_mount_t *mp;
|
||||
xfs_agf_t *agfp;
|
||||
xfs_buf_t *agfbp;
|
||||
xfs_buf_t *agibp;
|
||||
xfs_agnumber_t agno;
|
||||
|
@ -5829,7 +5828,8 @@ xlog_recover_check_summary(
|
|||
xfs_alert(mp, "%s agf read failed agno %d error %d",
|
||||
__func__, agno, error);
|
||||
} else {
|
||||
agfp = XFS_BUF_TO_AGF(agfbp);
|
||||
struct xfs_agf *agfp = agfbp->b_addr;
|
||||
|
||||
freeblks += be32_to_cpu(agfp->agf_freeblks) +
|
||||
be32_to_cpu(agfp->agf_flcount);
|
||||
xfs_buf_relse(agfbp);
|
||||
|
@ -5840,7 +5840,7 @@ xlog_recover_check_summary(
|
|||
xfs_alert(mp, "%s agi read failed agno %d error %d",
|
||||
__func__, agno, error);
|
||||
} else {
|
||||
struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
|
||||
struct xfs_agi *agi = agibp->b_addr;
|
||||
|
||||
itotal += be32_to_cpu(agi->agi_count);
|
||||
ifree += be32_to_cpu(agi->agi_freecount);
|
||||
|
|
|
@ -310,7 +310,7 @@ xfs_readsb(
|
|||
/*
|
||||
* Initialize the mount structure from the superblock.
|
||||
*/
|
||||
xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
|
||||
xfs_sb_from_disk(sbp, bp->b_addr);
|
||||
|
||||
/*
|
||||
* If we haven't validated the superblock, do so now before we try
|
||||
|
|
|
@ -326,16 +326,16 @@ xfs_qm_dqattach_locked(
|
|||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
|
||||
error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
|
||||
doalloc, &ip->i_udquot);
|
||||
error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
|
||||
XFS_DQ_USER, doalloc, &ip->i_udquot);
|
||||
if (error)
|
||||
goto done;
|
||||
ASSERT(ip->i_udquot);
|
||||
}
|
||||
|
||||
if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
|
||||
error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
|
||||
doalloc, &ip->i_gdquot);
|
||||
error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
|
||||
XFS_DQ_GROUP, doalloc, &ip->i_gdquot);
|
||||
if (error)
|
||||
goto done;
|
||||
ASSERT(ip->i_gdquot);
|
||||
|
@ -871,12 +871,20 @@ xfs_qm_reset_dqcounts(
|
|||
ddq->d_bcount = 0;
|
||||
ddq->d_icount = 0;
|
||||
ddq->d_rtbcount = 0;
|
||||
ddq->d_btimer = 0;
|
||||
ddq->d_itimer = 0;
|
||||
ddq->d_rtbtimer = 0;
|
||||
ddq->d_bwarns = 0;
|
||||
ddq->d_iwarns = 0;
|
||||
ddq->d_rtbwarns = 0;
|
||||
|
||||
/*
|
||||
* dquot id 0 stores the default grace period and the maximum
|
||||
* warning limit that were set by the administrator, so we
|
||||
* should not reset them.
|
||||
*/
|
||||
if (ddq->d_id != 0) {
|
||||
ddq->d_btimer = 0;
|
||||
ddq->d_itimer = 0;
|
||||
ddq->d_rtbtimer = 0;
|
||||
ddq->d_bwarns = 0;
|
||||
ddq->d_iwarns = 0;
|
||||
ddq->d_rtbwarns = 0;
|
||||
}
|
||||
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
||||
xfs_update_cksum((char *)&dqb[j],
|
||||
|
@ -1613,8 +1621,8 @@ xfs_qm_dqfree_one(
|
|||
int
|
||||
xfs_qm_vop_dqalloc(
|
||||
struct xfs_inode *ip,
|
||||
xfs_dqid_t uid,
|
||||
xfs_dqid_t gid,
|
||||
kuid_t uid,
|
||||
kgid_t gid,
|
||||
prid_t prid,
|
||||
uint flags,
|
||||
struct xfs_dquot **O_udqpp,
|
||||
|
@ -1622,6 +1630,8 @@ xfs_qm_vop_dqalloc(
|
|||
struct xfs_dquot **O_pdqpp)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
struct user_namespace *user_ns = inode->i_sb->s_user_ns;
|
||||
struct xfs_dquot *uq = NULL;
|
||||
struct xfs_dquot *gq = NULL;
|
||||
struct xfs_dquot *pq = NULL;
|
||||
|
@ -1635,7 +1645,7 @@ xfs_qm_vop_dqalloc(
|
|||
xfs_ilock(ip, lockflags);
|
||||
|
||||
if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
|
||||
gid = ip->i_d.di_gid;
|
||||
gid = inode->i_gid;
|
||||
|
||||
/*
|
||||
* Attach the dquot(s) to this inode, doing a dquot allocation
|
||||
|
@ -1650,7 +1660,7 @@ xfs_qm_vop_dqalloc(
|
|||
}
|
||||
|
||||
if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
|
||||
if (ip->i_d.di_uid != uid) {
|
||||
if (!uid_eq(inode->i_uid, uid)) {
|
||||
/*
|
||||
* What we need is the dquot that has this uid, and
|
||||
* if we send the inode to dqget, the uid of the inode
|
||||
|
@ -1661,7 +1671,8 @@ xfs_qm_vop_dqalloc(
|
|||
* holding ilock.
|
||||
*/
|
||||
xfs_iunlock(ip, lockflags);
|
||||
error = xfs_qm_dqget(mp, uid, XFS_DQ_USER, true, &uq);
|
||||
error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
|
||||
XFS_DQ_USER, true, &uq);
|
||||
if (error) {
|
||||
ASSERT(error != -ENOENT);
|
||||
return error;
|
||||
|
@ -1682,9 +1693,10 @@ xfs_qm_vop_dqalloc(
|
|||
}
|
||||
}
|
||||
if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
|
||||
if (ip->i_d.di_gid != gid) {
|
||||
if (!gid_eq(inode->i_gid, gid)) {
|
||||
xfs_iunlock(ip, lockflags);
|
||||
error = xfs_qm_dqget(mp, gid, XFS_DQ_GROUP, true, &gq);
|
||||
error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
|
||||
XFS_DQ_GROUP, true, &gq);
|
||||
if (error) {
|
||||
ASSERT(error != -ENOENT);
|
||||
goto error_rele;
|
||||
|
@ -1810,7 +1822,7 @@ xfs_qm_vop_chown_reserve(
|
|||
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
|
||||
|
||||
if (XFS_IS_UQUOTA_ON(mp) && udqp &&
|
||||
ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
|
||||
i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) {
|
||||
udq_delblks = udqp;
|
||||
/*
|
||||
* If there are delayed allocation blocks, then we have to
|
||||
|
@ -1823,7 +1835,7 @@ xfs_qm_vop_chown_reserve(
|
|||
}
|
||||
}
|
||||
if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
|
||||
ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
|
||||
i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) {
|
||||
gdq_delblks = gdqp;
|
||||
if (delblks) {
|
||||
ASSERT(ip->i_gdquot);
|
||||
|
@ -1920,14 +1932,15 @@ xfs_qm_vop_create_dqattach(
|
|||
|
||||
if (udqp && XFS_IS_UQUOTA_ON(mp)) {
|
||||
ASSERT(ip->i_udquot == NULL);
|
||||
ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
|
||||
ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id));
|
||||
|
||||
ip->i_udquot = xfs_qm_dqhold(udqp);
|
||||
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
|
||||
}
|
||||
if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
|
||||
ASSERT(ip->i_gdquot == NULL);
|
||||
ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
|
||||
ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id));
|
||||
|
||||
ip->i_gdquot = xfs_qm_dqhold(gdqp);
|
||||
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
|
||||
}
|
||||
|
|
|
@ -29,8 +29,6 @@ xfs_qm_log_quotaoff(
|
|||
int error;
|
||||
struct xfs_qoff_logitem *qoffi;
|
||||
|
||||
*qoffstartp = NULL;
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
|
||||
if (error)
|
||||
goto out;
|
||||
|
@ -62,7 +60,7 @@ xfs_qm_log_quotaoff(
|
|||
STATIC int
|
||||
xfs_qm_log_quotaoff_end(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_qoff_logitem *startqoff,
|
||||
struct xfs_qoff_logitem **startqoff,
|
||||
uint flags)
|
||||
{
|
||||
struct xfs_trans *tp;
|
||||
|
@ -73,9 +71,10 @@ xfs_qm_log_quotaoff_end(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
qoffi = xfs_trans_get_qoff_item(tp, startqoff,
|
||||
qoffi = xfs_trans_get_qoff_item(tp, *startqoff,
|
||||
flags & XFS_ALL_QUOTA_ACCT);
|
||||
xfs_trans_log_quotaoff_item(tp, qoffi);
|
||||
*startqoff = NULL;
|
||||
|
||||
/*
|
||||
* We have to make sure that the transaction is secure on disk before we
|
||||
|
@ -103,7 +102,7 @@ xfs_qm_scall_quotaoff(
|
|||
uint dqtype;
|
||||
int error;
|
||||
uint inactivate_flags;
|
||||
struct xfs_qoff_logitem *qoffstart;
|
||||
struct xfs_qoff_logitem *qoffstart = NULL;
|
||||
|
||||
/*
|
||||
* No file system can have quotas enabled on disk but not in core.
|
||||
|
@ -228,7 +227,7 @@ xfs_qm_scall_quotaoff(
|
|||
* So, we have QUOTAOFF start and end logitems; the start
|
||||
* logitem won't get overwritten until the end logitem appears...
|
||||
*/
|
||||
error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
|
||||
error = xfs_qm_log_quotaoff_end(mp, &qoffstart, flags);
|
||||
if (error) {
|
||||
/* We're screwed now. Shutdown is the only option. */
|
||||
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
||||
|
@ -261,6 +260,8 @@ xfs_qm_scall_quotaoff(
|
|||
}
|
||||
|
||||
out_unlock:
|
||||
if (error && qoffstart)
|
||||
xfs_qm_qoff_logitem_relse(qoffstart);
|
||||
mutex_unlock(&q->qi_quotaofflock);
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
|
|||
struct xfs_mount *, struct xfs_dquot *,
|
||||
struct xfs_dquot *, struct xfs_dquot *, int64_t, long, uint);
|
||||
|
||||
extern int xfs_qm_vop_dqalloc(struct xfs_inode *, xfs_dqid_t, xfs_dqid_t,
|
||||
extern int xfs_qm_vop_dqalloc(struct xfs_inode *, kuid_t, kgid_t,
|
||||
prid_t, uint, struct xfs_dquot **, struct xfs_dquot **,
|
||||
struct xfs_dquot **);
|
||||
extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *,
|
||||
|
@ -109,7 +109,7 @@ extern void xfs_qm_unmount_quotas(struct xfs_mount *);
|
|||
|
||||
#else
|
||||
static inline int
|
||||
xfs_qm_vop_dqalloc(struct xfs_inode *ip, xfs_dqid_t uid, xfs_dqid_t gid,
|
||||
xfs_qm_vop_dqalloc(struct xfs_inode *ip, kuid_t kuid, kgid_t kgid,
|
||||
prid_t prid, uint flags, struct xfs_dquot **udqp,
|
||||
struct xfs_dquot **gdqp, struct xfs_dquot **pdqp)
|
||||
{
|
||||
|
|
|
@ -57,13 +57,13 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
|
|||
/* Loop over all stats groups */
|
||||
|
||||
for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
|
||||
len += snprintf(buf + len, PATH_MAX - len, "%s",
|
||||
len += scnprintf(buf + len, PATH_MAX - len, "%s",
|
||||
xstats[i].desc);
|
||||
/* inner loop does each group */
|
||||
for (; j < xstats[i].endpoint; j++)
|
||||
len += snprintf(buf + len, PATH_MAX - len, " %u",
|
||||
len += scnprintf(buf + len, PATH_MAX - len, " %u",
|
||||
counter_val(stats, j));
|
||||
len += snprintf(buf + len, PATH_MAX - len, "\n");
|
||||
len += scnprintf(buf + len, PATH_MAX - len, "\n");
|
||||
}
|
||||
/* extra precision counters */
|
||||
for_each_possible_cpu(i) {
|
||||
|
@ -72,9 +72,9 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
|
|||
xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
|
||||
len += scnprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
|
||||
xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
|
||||
len += snprintf(buf + len, PATH_MAX-len, "debug %u\n",
|
||||
len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
|
||||
#if defined(DEBUG)
|
||||
1);
|
||||
#else
|
||||
|
|
|
@ -182,9 +182,7 @@ xfs_symlink(
|
|||
/*
|
||||
* Make sure that we have allocated dquot(s) on disk.
|
||||
*/
|
||||
error = xfs_qm_vop_dqalloc(dp,
|
||||
xfs_kuid_to_uid(current_fsuid()),
|
||||
xfs_kgid_to_gid(current_fsgid()), prid,
|
||||
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
|
||||
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
|
||||
&udqp, &gdqp, &pdqp);
|
||||
if (error)
|
||||
|
@ -194,7 +192,7 @@ xfs_symlink(
|
|||
* The symlink will fit into the inode data fork?
|
||||
* There can't be any attributes so we get the whole variable part.
|
||||
*/
|
||||
if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
|
||||
if (pathlen <= XFS_LITINO(mp))
|
||||
fs_blocks = 0;
|
||||
else
|
||||
fs_blocks = xfs_symlink_blocks(mp, pathlen);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "xfs.h"
|
||||
#include "xfs_fs.h"
|
||||
#include "xfs_shared.h"
|
||||
#include "xfs_bit.h"
|
||||
#include "xfs_format.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
|
@ -27,6 +28,7 @@
|
|||
#include "xfs_log_recover.h"
|
||||
#include "xfs_filestream.h"
|
||||
#include "xfs_fsmap.h"
|
||||
#include "xfs_btree_staging.h"
|
||||
|
||||
/*
|
||||
* We include this last to have the helpers above available for the trace
|
||||
|
|
|
@ -35,6 +35,12 @@ struct xfs_icreate_log;
|
|||
struct xfs_owner_info;
|
||||
struct xfs_trans_res;
|
||||
struct xfs_inobt_rec_incore;
|
||||
union xfs_btree_ptr;
|
||||
|
||||
#define XFS_ATTR_FILTER_FLAGS \
|
||||
{ XFS_ATTR_ROOT, "ROOT" }, \
|
||||
{ XFS_ATTR_SECURE, "SECURE" }, \
|
||||
{ XFS_ATTR_INCOMPLETE, "INCOMPLETE" }
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_attr_list_class,
|
||||
TP_PROTO(struct xfs_attr_list_context *ctx),
|
||||
|
@ -45,39 +51,39 @@ DECLARE_EVENT_CLASS(xfs_attr_list_class,
|
|||
__field(u32, hashval)
|
||||
__field(u32, blkno)
|
||||
__field(u32, offset)
|
||||
__field(void *, alist)
|
||||
__field(void *, buffer)
|
||||
__field(int, bufsize)
|
||||
__field(int, count)
|
||||
__field(int, firstu)
|
||||
__field(int, dupcnt)
|
||||
__field(int, flags)
|
||||
__field(unsigned int, attr_filter)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
|
||||
__entry->ino = ctx->dp->i_ino;
|
||||
__entry->hashval = ctx->cursor->hashval;
|
||||
__entry->blkno = ctx->cursor->blkno;
|
||||
__entry->offset = ctx->cursor->offset;
|
||||
__entry->alist = ctx->alist;
|
||||
__entry->hashval = ctx->cursor.hashval;
|
||||
__entry->blkno = ctx->cursor.blkno;
|
||||
__entry->offset = ctx->cursor.offset;
|
||||
__entry->buffer = ctx->buffer;
|
||||
__entry->bufsize = ctx->bufsize;
|
||||
__entry->count = ctx->count;
|
||||
__entry->firstu = ctx->firstu;
|
||||
__entry->flags = ctx->flags;
|
||||
__entry->attr_filter = ctx->attr_filter;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
|
||||
"alist %p size %u count %u firstu %u flags %d %s",
|
||||
"buffer %p size %u count %u firstu %u filter %s",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__entry->hashval,
|
||||
__entry->blkno,
|
||||
__entry->offset,
|
||||
__entry->dupcnt,
|
||||
__entry->alist,
|
||||
__entry->buffer,
|
||||
__entry->bufsize,
|
||||
__entry->count,
|
||||
__entry->firstu,
|
||||
__entry->flags,
|
||||
__print_flags(__entry->flags, "|", XFS_ATTR_FLAGS)
|
||||
__print_flags(__entry->attr_filter, "|",
|
||||
XFS_ATTR_FILTER_FLAGS)
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -169,31 +175,31 @@ TRACE_EVENT(xfs_attr_list_node_descend,
|
|||
__field(u32, hashval)
|
||||
__field(u32, blkno)
|
||||
__field(u32, offset)
|
||||
__field(void *, alist)
|
||||
__field(void *, buffer)
|
||||
__field(int, bufsize)
|
||||
__field(int, count)
|
||||
__field(int, firstu)
|
||||
__field(int, dupcnt)
|
||||
__field(int, flags)
|
||||
__field(unsigned int, attr_filter)
|
||||
__field(u32, bt_hashval)
|
||||
__field(u32, bt_before)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
|
||||
__entry->ino = ctx->dp->i_ino;
|
||||
__entry->hashval = ctx->cursor->hashval;
|
||||
__entry->blkno = ctx->cursor->blkno;
|
||||
__entry->offset = ctx->cursor->offset;
|
||||
__entry->alist = ctx->alist;
|
||||
__entry->hashval = ctx->cursor.hashval;
|
||||
__entry->blkno = ctx->cursor.blkno;
|
||||
__entry->offset = ctx->cursor.offset;
|
||||
__entry->buffer = ctx->buffer;
|
||||
__entry->bufsize = ctx->bufsize;
|
||||
__entry->count = ctx->count;
|
||||
__entry->firstu = ctx->firstu;
|
||||
__entry->flags = ctx->flags;
|
||||
__entry->attr_filter = ctx->attr_filter;
|
||||
__entry->bt_hashval = be32_to_cpu(btree->hashval);
|
||||
__entry->bt_before = be32_to_cpu(btree->before);
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
|
||||
"alist %p size %u count %u firstu %u flags %d %s "
|
||||
"buffer %p size %u count %u firstu %u filter %s "
|
||||
"node hashval %u, node before %u",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
|
@ -201,12 +207,12 @@ TRACE_EVENT(xfs_attr_list_node_descend,
|
|||
__entry->blkno,
|
||||
__entry->offset,
|
||||
__entry->dupcnt,
|
||||
__entry->alist,
|
||||
__entry->buffer,
|
||||
__entry->bufsize,
|
||||
__entry->count,
|
||||
__entry->firstu,
|
||||
__entry->flags,
|
||||
__print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
|
||||
__print_flags(__entry->attr_filter, "|",
|
||||
XFS_ATTR_FILTER_FLAGS),
|
||||
__entry->bt_hashval,
|
||||
__entry->bt_before)
|
||||
);
|
||||
|
@ -1701,7 +1707,8 @@ DECLARE_EVENT_CLASS(xfs_attr_class,
|
|||
__field(int, namelen)
|
||||
__field(int, valuelen)
|
||||
__field(xfs_dahash_t, hashval)
|
||||
__field(int, flags)
|
||||
__field(unsigned int, attr_filter)
|
||||
__field(unsigned int, attr_flags)
|
||||
__field(int, op_flags)
|
||||
),
|
||||
TP_fast_assign(
|
||||
|
@ -1712,11 +1719,12 @@ DECLARE_EVENT_CLASS(xfs_attr_class,
|
|||
__entry->namelen = args->namelen;
|
||||
__entry->valuelen = args->valuelen;
|
||||
__entry->hashval = args->hashval;
|
||||
__entry->flags = args->flags;
|
||||
__entry->attr_filter = args->attr_filter;
|
||||
__entry->attr_flags = args->attr_flags;
|
||||
__entry->op_flags = args->op_flags;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d valuelen %d "
|
||||
"hashval 0x%x flags %s op_flags %s",
|
||||
"hashval 0x%x filter %s flags %s op_flags %s",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__entry->namelen,
|
||||
|
@ -1724,7 +1732,11 @@ DECLARE_EVENT_CLASS(xfs_attr_class,
|
|||
__entry->namelen,
|
||||
__entry->valuelen,
|
||||
__entry->hashval,
|
||||
__print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
|
||||
__print_flags(__entry->attr_filter, "|",
|
||||
XFS_ATTR_FILTER_FLAGS),
|
||||
__print_flags(__entry->attr_flags, "|",
|
||||
{ XATTR_CREATE, "CREATE" },
|
||||
{ XATTR_REPLACE, "REPLACE" }),
|
||||
__print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
|
||||
)
|
||||
|
||||
|
@ -3594,6 +3606,151 @@ TRACE_EVENT(xfs_check_new_dalign,
|
|||
__entry->calc_rootino)
|
||||
)
|
||||
|
||||
TRACE_EVENT(xfs_btree_commit_afakeroot,
|
||||
TP_PROTO(struct xfs_btree_cur *cur),
|
||||
TP_ARGS(cur),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_btnum_t, btnum)
|
||||
__field(xfs_agnumber_t, agno)
|
||||
__field(xfs_agblock_t, agbno)
|
||||
__field(unsigned int, levels)
|
||||
__field(unsigned int, blocks)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = cur->bc_mp->m_super->s_dev;
|
||||
__entry->btnum = cur->bc_btnum;
|
||||
__entry->agno = cur->bc_ag.agno;
|
||||
__entry->agbno = cur->bc_ag.afake->af_root;
|
||||
__entry->levels = cur->bc_ag.afake->af_levels;
|
||||
__entry->blocks = cur->bc_ag.afake->af_blocks;
|
||||
),
|
||||
TP_printk("dev %d:%d btree %s ag %u levels %u blocks %u root %u",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
|
||||
__entry->agno,
|
||||
__entry->levels,
|
||||
__entry->blocks,
|
||||
__entry->agbno)
|
||||
)
|
||||
|
||||
TRACE_EVENT(xfs_btree_commit_ifakeroot,
|
||||
TP_PROTO(struct xfs_btree_cur *cur),
|
||||
TP_ARGS(cur),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_btnum_t, btnum)
|
||||
__field(xfs_agnumber_t, agno)
|
||||
__field(xfs_agino_t, agino)
|
||||
__field(unsigned int, levels)
|
||||
__field(unsigned int, blocks)
|
||||
__field(int, whichfork)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = cur->bc_mp->m_super->s_dev;
|
||||
__entry->btnum = cur->bc_btnum;
|
||||
__entry->agno = XFS_INO_TO_AGNO(cur->bc_mp,
|
||||
cur->bc_ino.ip->i_ino);
|
||||
__entry->agino = XFS_INO_TO_AGINO(cur->bc_mp,
|
||||
cur->bc_ino.ip->i_ino);
|
||||
__entry->levels = cur->bc_ino.ifake->if_levels;
|
||||
__entry->blocks = cur->bc_ino.ifake->if_blocks;
|
||||
__entry->whichfork = cur->bc_ino.whichfork;
|
||||
),
|
||||
TP_printk("dev %d:%d btree %s ag %u agino %u whichfork %s levels %u blocks %u",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
|
||||
__entry->agno,
|
||||
__entry->agino,
|
||||
__entry->whichfork == XFS_ATTR_FORK ? "attr" : "data",
|
||||
__entry->levels,
|
||||
__entry->blocks)
|
||||
)
|
||||
|
||||
TRACE_EVENT(xfs_btree_bload_level_geometry,
|
||||
TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
|
||||
uint64_t nr_this_level, unsigned int nr_per_block,
|
||||
unsigned int desired_npb, uint64_t blocks,
|
||||
uint64_t blocks_with_extra),
|
||||
TP_ARGS(cur, level, nr_this_level, nr_per_block, desired_npb, blocks,
|
||||
blocks_with_extra),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_btnum_t, btnum)
|
||||
__field(unsigned int, level)
|
||||
__field(unsigned int, nlevels)
|
||||
__field(uint64_t, nr_this_level)
|
||||
__field(unsigned int, nr_per_block)
|
||||
__field(unsigned int, desired_npb)
|
||||
__field(unsigned long long, blocks)
|
||||
__field(unsigned long long, blocks_with_extra)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = cur->bc_mp->m_super->s_dev;
|
||||
__entry->btnum = cur->bc_btnum;
|
||||
__entry->level = level;
|
||||
__entry->nlevels = cur->bc_nlevels;
|
||||
__entry->nr_this_level = nr_this_level;
|
||||
__entry->nr_per_block = nr_per_block;
|
||||
__entry->desired_npb = desired_npb;
|
||||
__entry->blocks = blocks;
|
||||
__entry->blocks_with_extra = blocks_with_extra;
|
||||
),
|
||||
TP_printk("dev %d:%d btree %s level %u/%u nr_this_level %llu nr_per_block %u desired_npb %u blocks %llu blocks_with_extra %llu",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
|
||||
__entry->level,
|
||||
__entry->nlevels,
|
||||
__entry->nr_this_level,
|
||||
__entry->nr_per_block,
|
||||
__entry->desired_npb,
|
||||
__entry->blocks,
|
||||
__entry->blocks_with_extra)
|
||||
)
|
||||
|
||||
TRACE_EVENT(xfs_btree_bload_block,
|
||||
TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
|
||||
uint64_t block_idx, uint64_t nr_blocks,
|
||||
union xfs_btree_ptr *ptr, unsigned int nr_records),
|
||||
TP_ARGS(cur, level, block_idx, nr_blocks, ptr, nr_records),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_btnum_t, btnum)
|
||||
__field(unsigned int, level)
|
||||
__field(unsigned long long, block_idx)
|
||||
__field(unsigned long long, nr_blocks)
|
||||
__field(xfs_agnumber_t, agno)
|
||||
__field(xfs_agblock_t, agbno)
|
||||
__field(unsigned int, nr_records)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = cur->bc_mp->m_super->s_dev;
|
||||
__entry->btnum = cur->bc_btnum;
|
||||
__entry->level = level;
|
||||
__entry->block_idx = block_idx;
|
||||
__entry->nr_blocks = nr_blocks;
|
||||
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
|
||||
xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
|
||||
|
||||
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
|
||||
__entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb);
|
||||
} else {
|
||||
__entry->agno = cur->bc_ag.agno;
|
||||
__entry->agbno = be32_to_cpu(ptr->s);
|
||||
}
|
||||
__entry->nr_records = nr_records;
|
||||
),
|
||||
TP_printk("dev %d:%d btree %s level %u block %llu/%llu fsb (%u/%u) recs %u",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
|
||||
__entry->level,
|
||||
__entry->block_idx,
|
||||
__entry->nr_blocks,
|
||||
__entry->agno,
|
||||
__entry->agbno,
|
||||
__entry->nr_records)
|
||||
)
|
||||
|
||||
#endif /* _TRACE_XFS_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
|
|
|
@ -306,6 +306,11 @@ xfs_trans_alloc(
|
|||
*
|
||||
* Note the zero-length reservation; this transaction MUST be cancelled
|
||||
* without any dirty data.
|
||||
*
|
||||
* Callers should obtain freeze protection to avoid two conflicts with fs
|
||||
* freezing: (1) having active transactions trip the m_active_trans ASSERTs;
|
||||
* and (2) grabbing buffers at the same time that freeze is trying to drain
|
||||
* the buffer LRU list.
|
||||
*/
|
||||
int
|
||||
xfs_trans_alloc_empty(
|
||||
|
@ -450,7 +455,7 @@ xfs_trans_apply_sb_deltas(
|
|||
int whole = 0;
|
||||
|
||||
bp = xfs_trans_getsb(tp, tp->t_mountp);
|
||||
sbp = XFS_BUF_TO_SBP(bp);
|
||||
sbp = bp->b_addr;
|
||||
|
||||
/*
|
||||
* Check that superblock mods match the mods made to AGF counters.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue