mirror of https://gitee.com/openkylin/linux.git
New code for 5.13:
- Various minor fixes in online scrub. - Prevent metadata files from being automatically inactivated. - Validate btree heights by the computed per-btree limits. - Don't warn about remounting with deprecated mount options. - Initialize attr forks at create time if we suspect we're going to need to store them. - Reduce memory reallocation workouts in the logging code. - Fix some theoretical math calculation errors in logged buffers that span multiple discontig memory ranges but contiguous ondisk regions. - Speedups in dirty buffer bitmap handling. - Make type verifier functions more inline-happy to reduce overhead. - Reduce debug overhead in directory checking code. - Many many typo fixes. - Begin to handle the permanent loss of the very end of a filesystem. - Fold struct xfs_icdinode into xfs_inode. - Deprecate the long defunct BMV_IF_NO_DMAPI_READ from the bmapx ioctl. - Remove a broken directory block format check from online scrub. - Fix a bug where we could produce an unnecessarily tall data fork btree when creating an attr fork. - Fix scrub and readonly remounts racing. - Fix a writeback ioend log deadlock problem by dropping the behavior where we could preallocate a setfilesize transaction. - Fix some bugs in the new extent count checking code. - Fix some bugs in the attr fork preallocation code. - Refactor if_flags out of the incore inode fork data structure. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEUzaAxoMeQq6m2jMV+H93GTRKtOsFAmB6MFUACgkQ+H93GTRK tOvigBAAlpzBUXnZVo+U18u0tSHnq5c1zbXYcf5GPhQv9w3n3TlPi3YhK2vgEXlI TULwsdU+an30oqWkQiVrwQjKPVaTWeWE3K0sA2MlYX9L2CwPPde4x5hwhyppfQxq mQyu0suWp480ao7vToXAgZ751OdZRtGu8sRQ7rVQ/FVf9K4R8EqpZMEynNry25f+ hpK235hpf4IUC9E1A4pE2hNBSr/LGPIyu1t5sZsfazcNmtpKcauy5R5b8Pdnzo2/ WFa6PoeE8SRIp4OxZY/c/4QUI5cRubJGyoB+kbl0hg69uYIJO+pc+R69yrQPD9Z+ JDW/FktH+Zz4pstFsC+qnSvhRaF2DvXpvXrIldonQ2Z2ByVqbs3r6HzKySlWQ+QE jU717HApWl/ADI/kVD2IuQnrbU+Q8Ue8thzgQeEpTRWsea2HzPMofNi5FImU2ulw g4V7PleQWJ6AsLhcpfA46Y+CUAtjTD1Tvj67JpXuWJ+MFTB4hRm3U7zgCtV/0c3T wBBUybQjDoVA6DDr6CP/9ki1k0BO3wKJGlZMR0bkEsuxXdFNTvHEz5lmueYT/Wxc D91+oRbna9NpEeIVFGo6lhMIu2t0iYssFdgQKyn1jXrpGXKvOklP8zDjRdPnnQmz plT2ajlXPIjc6KjOTP2mbVqKs059LuJoYV7gIWwM7CgtFsMIrd8= =oRKe -----END PGP SIGNATURE----- Merge tag 'xfs-5.13-merge-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull xfs updates from Darrick Wong: "The notable user-visible addition this cycle is ability to remove space from the last AG in a filesystem. This is the first of many changes needed for full-fledged support for shrinking a filesystem. Still needed are (a) the ability to reorganize files and metadata away from the end of the fs; (b) the ability to remove entire allocation groups; (c) shrink support for realtime volumes; and (d) thorough testing of (a-c). There are a number of performance improvements in this code drop: Dave streamlined various parts of the buffer logging code and reduced the cost of various debugging checks, and added the ability to pre-create the xattr structures while creating files. Brian eliminated transaction reservations that were being held across writeback (thus reducing livelock potential. Other random pieces: Pavel fixed the repetitve warnings about deprecated mount options, I fixed online fsck to behave itself when a readonly remount comes in during scrub, and refactored various other parts of that code, Christoph contributed a lot of refactoring this cycle. The xfs_icdinode structure has been absorbed into the (incore) xfs_inode structure, and the format and flags handling around xfs_inode_fork structures has been simplified. Chandan provided a number of fixes for extent count overflow related problems that have been shaken out by debugging knobs added during 5.12. Summary: - Various minor fixes in online scrub. - Prevent metadata files from being automatically inactivated. - Validate btree heights by the computed per-btree limits. - Don't warn about remounting with deprecated mount options. - Initialize attr forks at create time if we suspect we're going to need to store them. - Reduce memory reallocation workouts in the logging code. - Fix some theoretical math calculation errors in logged buffers that span multiple discontig memory ranges but contiguous ondisk regions. - Speedups in dirty buffer bitmap handling. - Make type verifier functions more inline-happy to reduce overhead. - Reduce debug overhead in directory checking code. - Many many typo fixes. - Begin to handle the permanent loss of the very end of a filesystem. - Fold struct xfs_icdinode into xfs_inode. - Deprecate the long defunct BMV_IF_NO_DMAPI_READ from the bmapx ioctl. - Remove a broken directory block format check from online scrub. - Fix a bug where we could produce an unnecessarily tall data fork btree when creating an attr fork. - Fix scrub and readonly remounts racing. - Fix a writeback ioend log deadlock problem by dropping the behavior where we could preallocate a setfilesize transaction. - Fix some bugs in the new extent count checking code. - Fix some bugs in the attr fork preallocation code. - Refactor if_flags out of the incore inode fork data structure" * tag 'xfs-5.13-merge-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (77 commits) xfs: remove xfs_quiesce_attr declaration xfs: remove XFS_IFEXTENTS xfs: remove XFS_IFINLINE xfs: remove XFS_IFBROOT xfs: only look at the fork format in xfs_idestroy_fork xfs: simplify xfs_attr_remove_args xfs: rename and simplify xfs_bmap_one_block xfs: move the XFS_IFEXTENTS check into xfs_iread_extents xfs: drop unnecessary setfilesize helper xfs: drop unused ioend private merge and setfilesize code xfs: open code ioend needs workqueue helper xfs: drop submit side trans alloc for append ioends xfs: fix return of uninitialized value in variable error xfs: get rid of the ip parameter to xchk_setup_* xfs: fix scrub and remount-ro protection when running scrub xfs: move the check for post-EOF mappings into xfs_can_free_eofblocks xfs: move the xfs_can_free_eofblocks call under the IOLOCK xfs: precalculate default inode attribute offset xfs: default attr fork size does not handle device inodes xfs: inode fork allocation depends on XFS_IFEXTENT flag ...
This commit is contained in:
commit
d2b6f8a179
|
@ -522,7 +522,7 @@ and the short name of the data device. They all can be found in:
|
|||
================ ===========
|
||||
xfs_iwalk-$pid Inode scans of the entire filesystem. Currently limited to
|
||||
mount time quotacheck.
|
||||
xfs-blockgc Background garbage collection of disk space that have been
|
||||
xfs-gc Background garbage collection of disk space that have been
|
||||
speculatively allocated beyond EOF or for staging copy on
|
||||
write operations.
|
||||
================ ===========
|
||||
|
|
|
@ -22,6 +22,11 @@
|
|||
#include "xfs_ag.h"
|
||||
#include "xfs_ag_resv.h"
|
||||
#include "xfs_health.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_defer.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_trans.h"
|
||||
|
||||
static int
|
||||
xfs_get_aghdr_buf(
|
||||
|
@ -485,6 +490,116 @@ xfs_ag_init_headers(
|
|||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_ag_shrink_space(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans **tpp,
|
||||
xfs_agnumber_t agno,
|
||||
xfs_extlen_t delta)
|
||||
{
|
||||
struct xfs_alloc_arg args = {
|
||||
.tp = *tpp,
|
||||
.mp = mp,
|
||||
.type = XFS_ALLOCTYPE_THIS_BNO,
|
||||
.minlen = delta,
|
||||
.maxlen = delta,
|
||||
.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
|
||||
.resv = XFS_AG_RESV_NONE,
|
||||
.prod = 1
|
||||
};
|
||||
struct xfs_buf *agibp, *agfbp;
|
||||
struct xfs_agi *agi;
|
||||
struct xfs_agf *agf;
|
||||
int error, err2;
|
||||
|
||||
ASSERT(agno == mp->m_sb.sb_agcount - 1);
|
||||
error = xfs_ialloc_read_agi(mp, *tpp, agno, &agibp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
agi = agibp->b_addr;
|
||||
|
||||
error = xfs_alloc_read_agf(mp, *tpp, agno, 0, &agfbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
agf = agfbp->b_addr;
|
||||
/* some extra paranoid checks before we shrink the ag */
|
||||
if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length))
|
||||
return -EFSCORRUPTED;
|
||||
if (delta >= agi->agi_length)
|
||||
return -EINVAL;
|
||||
|
||||
args.fsbno = XFS_AGB_TO_FSB(mp, agno,
|
||||
be32_to_cpu(agi->agi_length) - delta);
|
||||
|
||||
/*
|
||||
* Disable perag reservations so it doesn't cause the allocation request
|
||||
* to fail. We'll reestablish reservation before we return.
|
||||
*/
|
||||
error = xfs_ag_resv_free(agibp->b_pag);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* internal log shouldn't also show up in the free space btrees */
|
||||
error = xfs_alloc_vextent(&args);
|
||||
if (!error && args.agbno == NULLAGBLOCK)
|
||||
error = -ENOSPC;
|
||||
|
||||
if (error) {
|
||||
/*
|
||||
* if extent allocation fails, need to roll the transaction to
|
||||
* ensure that the AGFL fixup has been committed anyway.
|
||||
*/
|
||||
xfs_trans_bhold(*tpp, agfbp);
|
||||
err2 = xfs_trans_roll(tpp);
|
||||
if (err2)
|
||||
return err2;
|
||||
xfs_trans_bjoin(*tpp, agfbp);
|
||||
goto resv_init_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* if successfully deleted from freespace btrees, need to confirm
|
||||
* per-AG reservation works as expected.
|
||||
*/
|
||||
be32_add_cpu(&agi->agi_length, -delta);
|
||||
be32_add_cpu(&agf->agf_length, -delta);
|
||||
|
||||
err2 = xfs_ag_resv_init(agibp->b_pag, *tpp);
|
||||
if (err2) {
|
||||
be32_add_cpu(&agi->agi_length, delta);
|
||||
be32_add_cpu(&agf->agf_length, delta);
|
||||
if (err2 != -ENOSPC)
|
||||
goto resv_err;
|
||||
|
||||
__xfs_bmap_add_free(*tpp, args.fsbno, delta, NULL, true);
|
||||
|
||||
/*
|
||||
* Roll the transaction before trying to re-init the per-ag
|
||||
* reservation. The new transaction is clean so it will cancel
|
||||
* without any side effects.
|
||||
*/
|
||||
error = xfs_defer_finish(tpp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = -ENOSPC;
|
||||
goto resv_init_out;
|
||||
}
|
||||
xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
|
||||
xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
|
||||
return 0;
|
||||
resv_init_out:
|
||||
err2 = xfs_ag_resv_init(agibp->b_pag, *tpp);
|
||||
if (!err2)
|
||||
return error;
|
||||
resv_err:
|
||||
xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", err2);
|
||||
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
||||
return err2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extent the AG indicated by the @id by the length passed in
|
||||
*/
|
||||
|
|
|
@ -24,6 +24,8 @@ struct aghdr_init_data {
|
|||
};
|
||||
|
||||
int xfs_ag_init_headers(struct xfs_mount *mp, struct aghdr_init_data *id);
|
||||
int xfs_ag_shrink_space(struct xfs_mount *mp, struct xfs_trans **tpp,
|
||||
xfs_agnumber_t agno, xfs_extlen_t delta);
|
||||
int xfs_ag_extend_space(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
struct aghdr_init_data *id, xfs_extlen_t len);
|
||||
int xfs_ag_get_geometry(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
|
|
|
@ -211,7 +211,11 @@ __xfs_ag_resv_init(
|
|||
ASSERT(0);
|
||||
return -EINVAL;
|
||||
}
|
||||
error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
|
||||
|
||||
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_AG_RESV_FAIL))
|
||||
error = -ENOSPC;
|
||||
else
|
||||
error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
|
||||
if (error) {
|
||||
trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
|
||||
error, _RET_IP_);
|
||||
|
|
|
@ -2906,13 +2906,13 @@ xfs_agf_verify(
|
|||
|
||||
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > mp->m_ag_maxlevels ||
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > mp->m_ag_maxlevels)
|
||||
return __this_address;
|
||||
|
||||
if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
|
||||
(be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > mp->m_rmap_maxlevels))
|
||||
return __this_address;
|
||||
|
||||
if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
|
||||
|
@ -2939,7 +2939,7 @@ xfs_agf_verify(
|
|||
|
||||
if (xfs_sb_version_hasreflink(&mp->m_sb) &&
|
||||
(be32_to_cpu(agf->agf_refcount_level) < 1 ||
|
||||
be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
|
||||
be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels))
|
||||
return __this_address;
|
||||
|
||||
return NULL;
|
||||
|
|
|
@ -70,6 +70,26 @@ xfs_inode_hasattr(
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the there is exactly only block in the attr fork, in which
|
||||
* case the attribute fork consists of a single leaf block entry.
|
||||
*/
|
||||
bool
|
||||
xfs_attr_is_leaf(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_ifork *ifp = ip->i_afp;
|
||||
struct xfs_iext_cursor icur;
|
||||
struct xfs_bmbt_irec imap;
|
||||
|
||||
if (ifp->if_nextents != 1 || ifp->if_format != XFS_DINODE_FMT_EXTENTS)
|
||||
return false;
|
||||
|
||||
xfs_iext_first(ifp, &icur);
|
||||
xfs_iext_get_extent(ifp, &icur, &imap);
|
||||
return imap.br_startoff == 0 && imap.br_blockcount == 1;
|
||||
}
|
||||
|
||||
/*========================================================================
|
||||
* Overall external interface routines.
|
||||
*========================================================================*/
|
||||
|
@ -89,7 +109,7 @@ xfs_attr_get_ilocked(
|
|||
|
||||
if (args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
|
||||
return xfs_attr_shortform_getvalue(args);
|
||||
if (xfs_bmap_one_block(args->dp, XFS_ATTR_FORK))
|
||||
if (xfs_attr_is_leaf(args->dp))
|
||||
return xfs_attr_leaf_get(args);
|
||||
return xfs_attr_node_get(args);
|
||||
}
|
||||
|
@ -293,7 +313,7 @@ xfs_attr_set_args(
|
|||
return error;
|
||||
}
|
||||
|
||||
if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
|
||||
if (xfs_attr_is_leaf(dp)) {
|
||||
error = xfs_attr_leaf_addname(args);
|
||||
if (error != -ENOSPC)
|
||||
return error;
|
||||
|
@ -342,12 +362,10 @@ xfs_has_attr(
|
|||
if (!xfs_inode_hasattr(dp))
|
||||
return -ENOATTR;
|
||||
|
||||
if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
|
||||
if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
|
||||
return xfs_attr_sf_findname(args, NULL, NULL);
|
||||
}
|
||||
|
||||
if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
|
||||
if (xfs_attr_is_leaf(dp)) {
|
||||
error = xfs_attr_leaf_hasname(args, &bp);
|
||||
|
||||
if (bp)
|
||||
|
@ -366,21 +384,14 @@ int
|
|||
xfs_attr_remove_args(
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
struct xfs_inode *dp = args->dp;
|
||||
int error;
|
||||
if (!xfs_inode_hasattr(args->dp))
|
||||
return -ENOATTR;
|
||||
|
||||
if (!xfs_inode_hasattr(dp)) {
|
||||
error = -ENOATTR;
|
||||
} else if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
|
||||
error = xfs_attr_shortform_remove(args);
|
||||
} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
|
||||
error = xfs_attr_leaf_removename(args);
|
||||
} else {
|
||||
error = xfs_attr_node_removename(args);
|
||||
}
|
||||
|
||||
return error;
|
||||
if (args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
|
||||
return xfs_attr_shortform_remove(args);
|
||||
if (xfs_attr_is_leaf(args->dp))
|
||||
return xfs_attr_leaf_removename(args);
|
||||
return xfs_attr_node_removename(args);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -928,6 +939,7 @@ xfs_attr_node_addname(
|
|||
* Search to see if name already exists, and get back a pointer
|
||||
* to where it should go.
|
||||
*/
|
||||
error = 0;
|
||||
retval = xfs_attr_node_hasname(args, &state);
|
||||
if (retval != -ENOATTR && retval != -EEXIST)
|
||||
goto out;
|
||||
|
@ -1282,7 +1294,7 @@ xfs_attr_node_removename(
|
|||
/*
|
||||
* If the result is small enough, push it all into the inode.
|
||||
*/
|
||||
if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
|
||||
if (xfs_attr_is_leaf(dp))
|
||||
error = xfs_attr_node_shrink(args, state);
|
||||
|
||||
out:
|
||||
|
|
|
@ -85,6 +85,7 @@ int xfs_attr_inactive(struct xfs_inode *dp);
|
|||
int xfs_attr_list_ilocked(struct xfs_attr_list_context *);
|
||||
int xfs_attr_list(struct xfs_attr_list_context *);
|
||||
int xfs_inode_hasattr(struct xfs_inode *ip);
|
||||
bool xfs_attr_is_leaf(struct xfs_inode *ip);
|
||||
int xfs_attr_get_ilocked(struct xfs_da_args *args);
|
||||
int xfs_attr_get(struct xfs_da_args *args);
|
||||
int xfs_attr_set(struct xfs_da_args *args);
|
||||
|
|
|
@ -518,10 +518,10 @@ xfs_attr_copy_value(
|
|||
* Query whether the total requested number of attr fork bytes of extended
|
||||
* attribute space will be able to fit inline.
|
||||
*
|
||||
* Returns zero if not, else the di_forkoff fork offset to be used in the
|
||||
* Returns zero if not, else the i_forkoff fork offset to be used in the
|
||||
* literal area for attribute data once the new bytes have been added.
|
||||
*
|
||||
* di_forkoff must be 8 byte aligned, hence is stored as a >>3 value;
|
||||
* i_forkoff must be 8 byte aligned, hence is stored as a >>3 value;
|
||||
* special case for dev/uuid inodes, they have fixed size data forks.
|
||||
*/
|
||||
int
|
||||
|
@ -560,7 +560,7 @@ xfs_attr_shortform_bytesfit(
|
|||
* literal area rebalancing.
|
||||
*/
|
||||
if (bytes <= XFS_IFORK_ASIZE(dp))
|
||||
return dp->i_d.di_forkoff;
|
||||
return dp->i_forkoff;
|
||||
|
||||
/*
|
||||
* For attr2 we can try to move the forkoff if there is space in the
|
||||
|
@ -581,7 +581,7 @@ xfs_attr_shortform_bytesfit(
|
|||
* minimum offset only needs to be the space required for
|
||||
* the btree root.
|
||||
*/
|
||||
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
|
||||
if (!dp->i_forkoff && dp->i_df.if_bytes >
|
||||
xfs_default_attroffset(dp))
|
||||
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
|
||||
break;
|
||||
|
@ -592,10 +592,10 @@ xfs_attr_shortform_bytesfit(
|
|||
* minforkoff to where the btree root can finish so we have
|
||||
* plenty of room for attrs
|
||||
*/
|
||||
if (dp->i_d.di_forkoff) {
|
||||
if (offset < dp->i_d.di_forkoff)
|
||||
if (dp->i_forkoff) {
|
||||
if (offset < dp->i_forkoff)
|
||||
return 0;
|
||||
return dp->i_d.di_forkoff;
|
||||
return dp->i_forkoff;
|
||||
}
|
||||
dsize = XFS_BMAP_BROOT_SPACE(mp, dp->i_df.if_broot);
|
||||
break;
|
||||
|
@ -651,13 +651,8 @@ xfs_attr_shortform_create(
|
|||
trace_xfs_attr_sf_create(args);
|
||||
|
||||
ASSERT(ifp->if_bytes == 0);
|
||||
if (ifp->if_format == XFS_DINODE_FMT_EXTENTS) {
|
||||
ifp->if_flags &= ~XFS_IFEXTENTS; /* just in case */
|
||||
if (ifp->if_format == XFS_DINODE_FMT_EXTENTS)
|
||||
ifp->if_format = XFS_DINODE_FMT_LOCAL;
|
||||
ifp->if_flags |= XFS_IFINLINE;
|
||||
} else {
|
||||
ASSERT(ifp->if_flags & XFS_IFINLINE);
|
||||
}
|
||||
xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
|
||||
hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data;
|
||||
memset(hdr, 0, sizeof(*hdr));
|
||||
|
@ -730,10 +725,10 @@ xfs_attr_shortform_add(
|
|||
|
||||
dp = args->dp;
|
||||
mp = dp->i_mount;
|
||||
dp->i_d.di_forkoff = forkoff;
|
||||
dp->i_forkoff = forkoff;
|
||||
|
||||
ifp = dp->i_afp;
|
||||
ASSERT(ifp->if_flags & XFS_IFINLINE);
|
||||
ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
|
||||
sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
|
||||
if (xfs_attr_sf_findname(args, &sfe, NULL) == -EEXIST)
|
||||
ASSERT(0);
|
||||
|
@ -770,7 +765,7 @@ xfs_attr_fork_remove(
|
|||
xfs_idestroy_fork(ip->i_afp);
|
||||
kmem_cache_free(xfs_ifork_zone, ip->i_afp);
|
||||
ip->i_afp = NULL;
|
||||
ip->i_d.di_forkoff = 0;
|
||||
ip->i_forkoff = 0;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
}
|
||||
|
||||
|
@ -821,8 +816,8 @@ xfs_attr_shortform_remove(
|
|||
xfs_attr_fork_remove(dp, args->trans);
|
||||
} else {
|
||||
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
|
||||
dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
|
||||
ASSERT(dp->i_d.di_forkoff);
|
||||
dp->i_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
|
||||
ASSERT(dp->i_forkoff);
|
||||
ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) ||
|
||||
(args->op_flags & XFS_DA_OP_ADDNAME) ||
|
||||
!(mp->m_flags & XFS_MOUNT_ATTR2) ||
|
||||
|
@ -851,7 +846,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
|
|||
trace_xfs_attr_sf_lookup(args);
|
||||
|
||||
ifp = args->dp->i_afp;
|
||||
ASSERT(ifp->if_flags & XFS_IFINLINE);
|
||||
ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
|
||||
sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
|
||||
sfe = &sf->list[0];
|
||||
for (i = 0; i < sf->hdr.count;
|
||||
|
@ -878,7 +873,7 @@ xfs_attr_shortform_getvalue(
|
|||
struct xfs_attr_sf_entry *sfe;
|
||||
int i;
|
||||
|
||||
ASSERT(args->dp->i_afp->if_flags == XFS_IFINLINE);
|
||||
ASSERT(args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL);
|
||||
sf = (struct xfs_attr_shortform *)args->dp->i_afp->if_u1.if_data;
|
||||
sfe = &sf->list[0];
|
||||
for (i = 0; i < sf->hdr.count;
|
||||
|
|
|
@ -66,13 +66,13 @@ xfs_bmap_compute_maxlevels(
|
|||
* either a signed 32-bit number for the data fork, or a signed 16-bit
|
||||
* number for the attr fork.
|
||||
*
|
||||
* Note that we can no longer assume that if we are in ATTR1 that
|
||||
* the fork offset of all the inodes will be
|
||||
* (xfs_default_attroffset(ip) >> 3) because we could have mounted
|
||||
* with ATTR2 and then mounted back with ATTR1, keeping the
|
||||
* di_forkoff's fixed but probably at various positions. Therefore,
|
||||
* for both ATTR1 and ATTR2 we have to assume the worst case scenario
|
||||
* of a minimum size available.
|
||||
* Note that we can no longer assume that if we are in ATTR1 that the
|
||||
* fork offset of all the inodes will be
|
||||
* (xfs_default_attroffset(ip) >> 3) because we could have mounted with
|
||||
* ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
|
||||
* but probably at various positions. Therefore, for both ATTR1 and
|
||||
* ATTR2 we have to assume the worst case scenario of a minimum size
|
||||
* available.
|
||||
*/
|
||||
if (whichfork == XFS_DATA_FORK) {
|
||||
maxleafents = MAXEXTNUM;
|
||||
|
@ -94,6 +94,15 @@ xfs_bmap_compute_maxlevels(
|
|||
mp->m_bm_maxlevels[whichfork] = level;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
xfs_bmap_compute_attr_offset(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
if (mp->m_sb.sb_inodesize == 256)
|
||||
return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
|
||||
return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
|
||||
}
|
||||
|
||||
STATIC int /* error */
|
||||
xfs_bmbt_lookup_eq(
|
||||
struct xfs_btree_cur *cur,
|
||||
|
@ -192,22 +201,15 @@ uint
|
|||
xfs_default_attroffset(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
uint offset;
|
||||
|
||||
if (mp->m_sb.sb_inodesize == 256)
|
||||
offset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
|
||||
else
|
||||
offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
|
||||
|
||||
ASSERT(offset < XFS_LITINO(mp));
|
||||
return offset;
|
||||
if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
|
||||
return roundup(sizeof(xfs_dev_t), 8);
|
||||
return M_IGEO(ip->i_mount)->attr_fork_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routine to reset inode di_forkoff field when switching
|
||||
* attribute fork from local to extent format - we reset it where
|
||||
* possible to make space available for inline data fork extents.
|
||||
* Helper routine to reset inode i_forkoff field when switching attribute fork
|
||||
* from local to extent format - we reset it where possible to make space
|
||||
* available for inline data fork extents.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_bmap_forkoff_reset(
|
||||
|
@ -219,8 +221,8 @@ xfs_bmap_forkoff_reset(
|
|||
ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
|
||||
uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
|
||||
|
||||
if (dfl_forkoff > ip->i_d.di_forkoff)
|
||||
ip->i_d.di_forkoff = dfl_forkoff;
|
||||
if (dfl_forkoff > ip->i_forkoff)
|
||||
ip->i_forkoff = dfl_forkoff;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -603,7 +605,7 @@ xfs_bmap_btree_to_extents(
|
|||
|
||||
ASSERT(cur);
|
||||
ASSERT(whichfork != XFS_COW_FORK);
|
||||
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
|
||||
ASSERT(!xfs_need_iread_extents(ifp));
|
||||
ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
|
||||
ASSERT(be16_to_cpu(rblock->bb_level) == 1);
|
||||
ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
|
||||
|
@ -624,14 +626,13 @@ xfs_bmap_btree_to_extents(
|
|||
return error;
|
||||
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
|
||||
xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
|
||||
ip->i_d.di_nblocks--;
|
||||
ip->i_nblocks--;
|
||||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
|
||||
xfs_trans_binval(tp, cbp);
|
||||
if (cur->bc_bufs[0] == cbp)
|
||||
cur->bc_bufs[0] = NULL;
|
||||
xfs_iroot_realloc(ip, -1, whichfork);
|
||||
ASSERT(ifp->if_broot == NULL);
|
||||
ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
|
||||
ifp->if_format = XFS_DINODE_FMT_EXTENTS;
|
||||
*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
|
||||
return 0;
|
||||
|
@ -675,7 +676,6 @@ xfs_bmap_extents_to_btree(
|
|||
* to expand the root.
|
||||
*/
|
||||
xfs_iroot_realloc(ip, 1, whichfork);
|
||||
ifp->if_flags |= XFS_IFBROOT;
|
||||
|
||||
/*
|
||||
* Fill in the root.
|
||||
|
@ -726,7 +726,7 @@ xfs_bmap_extents_to_btree(
|
|||
args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
|
||||
tp->t_firstblock = args.fsbno;
|
||||
cur->bc_ino.allocated++;
|
||||
ip->i_d.di_nblocks++;
|
||||
ip->i_nblocks++;
|
||||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
|
||||
XFS_FSB_TO_DADDR(mp, args.fsbno),
|
||||
|
@ -805,8 +805,6 @@ xfs_bmap_local_to_extents_empty(
|
|||
ASSERT(ifp->if_nextents == 0);
|
||||
|
||||
xfs_bmap_forkoff_reset(ip, whichfork);
|
||||
ifp->if_flags &= ~XFS_IFINLINE;
|
||||
ifp->if_flags |= XFS_IFEXTENTS;
|
||||
ifp->if_u1.if_root = NULL;
|
||||
ifp->if_height = 0;
|
||||
ifp->if_format = XFS_DINODE_FMT_EXTENTS;
|
||||
|
@ -850,7 +848,6 @@ xfs_bmap_local_to_extents(
|
|||
|
||||
flags = 0;
|
||||
error = 0;
|
||||
ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.tp = tp;
|
||||
args.mp = ip->i_mount;
|
||||
|
@ -908,7 +905,7 @@ xfs_bmap_local_to_extents(
|
|||
xfs_iext_insert(ip, &icur, &rec, 0);
|
||||
|
||||
ifp->if_nextents = 1;
|
||||
ip->i_d.di_nblocks = 1;
|
||||
ip->i_nblocks = 1;
|
||||
xfs_trans_mod_dquot_byino(tp, ip,
|
||||
XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
flags |= xfs_ilog_fext(whichfork);
|
||||
|
@ -927,13 +924,15 @@ xfs_bmap_add_attrfork_btree(
|
|||
xfs_inode_t *ip, /* incore inode pointer */
|
||||
int *flags) /* inode logging flags */
|
||||
{
|
||||
struct xfs_btree_block *block = ip->i_df.if_broot;
|
||||
xfs_btree_cur_t *cur; /* btree cursor */
|
||||
int error; /* error return value */
|
||||
xfs_mount_t *mp; /* file system mount struct */
|
||||
int stat; /* newroot status */
|
||||
|
||||
mp = ip->i_mount;
|
||||
if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
|
||||
|
||||
if (XFS_BMAP_BMDR_SPACE(block) <= XFS_IFORK_DSIZE(ip))
|
||||
*flags |= XFS_ILOG_DBROOT;
|
||||
else {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
|
||||
|
@ -1027,23 +1026,27 @@ xfs_bmap_add_attrfork_local(
|
|||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/* Set an inode attr fork off based on the format */
|
||||
/*
|
||||
* Set an inode attr fork offset based on the format of the data fork.
|
||||
*/
|
||||
int
|
||||
xfs_bmap_set_attrforkoff(
|
||||
struct xfs_inode *ip,
|
||||
int size,
|
||||
int *version)
|
||||
{
|
||||
int default_size = xfs_default_attroffset(ip) >> 3;
|
||||
|
||||
switch (ip->i_df.if_format) {
|
||||
case XFS_DINODE_FMT_DEV:
|
||||
ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
|
||||
ip->i_forkoff = default_size;
|
||||
break;
|
||||
case XFS_DINODE_FMT_LOCAL:
|
||||
case XFS_DINODE_FMT_EXTENTS:
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
|
||||
if (!ip->i_d.di_forkoff)
|
||||
ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
|
||||
ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
|
||||
if (!ip->i_forkoff)
|
||||
ip->i_forkoff = default_size;
|
||||
else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
|
||||
*version = 2;
|
||||
break;
|
||||
|
@ -1092,11 +1095,7 @@ xfs_bmap_add_attrfork(
|
|||
goto trans_cancel;
|
||||
ASSERT(ip->i_afp == NULL);
|
||||
|
||||
ip->i_afp = kmem_cache_zalloc(xfs_ifork_zone,
|
||||
GFP_KERNEL | __GFP_NOFAIL);
|
||||
|
||||
ip->i_afp->if_format = XFS_DINODE_FMT_EXTENTS;
|
||||
ip->i_afp->if_flags = XFS_IFEXTENTS;
|
||||
ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
|
||||
logflags = 0;
|
||||
switch (ip->i_df.if_format) {
|
||||
case XFS_DINODE_FMT_LOCAL:
|
||||
|
@ -1222,12 +1221,10 @@ xfs_iread_extents(
|
|||
struct xfs_btree_cur *cur;
|
||||
int error;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
if (!xfs_need_iread_extents(ifp))
|
||||
return 0;
|
||||
|
||||
if (XFS_IS_CORRUPT(mp, ifp->if_format != XFS_DINODE_FMT_BTREE)) {
|
||||
error = -EFSCORRUPTED;
|
||||
goto out;
|
||||
}
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
|
||||
ir.loaded = 0;
|
||||
xfs_iext_first(ifp, &ir.icur);
|
||||
|
@ -1243,8 +1240,6 @@ xfs_iread_extents(
|
|||
goto out;
|
||||
}
|
||||
ASSERT(ir.loaded == xfs_iext_count(ifp));
|
||||
|
||||
ifp->if_flags |= XFS_IFEXTENTS;
|
||||
return 0;
|
||||
out:
|
||||
xfs_iext_destroy(ifp);
|
||||
|
@ -1279,11 +1274,9 @@ xfs_bmap_first_unused(
|
|||
|
||||
ASSERT(xfs_ifork_has_extents(ifp));
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
lowest = max = *first_unused;
|
||||
for_each_xfs_iext(ifp, &icur, &got) {
|
||||
|
@ -1331,11 +1324,9 @@ xfs_bmap_last_before(
|
|||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
|
||||
*last_block = 0;
|
||||
|
@ -1354,11 +1345,9 @@ xfs_bmap_last_extent(
|
|||
struct xfs_iext_cursor icur;
|
||||
int error;
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_iext_last(ifp, &icur);
|
||||
if (!xfs_iext_get_extent(ifp, &icur, rec))
|
||||
|
@ -1439,38 +1428,6 @@ xfs_bmap_last_offset(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns whether the selected fork of the inode has exactly one
|
||||
* block or not. For the data fork we check this matches di_size,
|
||||
* implying the file's range is 0..bsize-1.
|
||||
*/
|
||||
int /* 1=>1 block, 0=>otherwise */
|
||||
xfs_bmap_one_block(
|
||||
struct xfs_inode *ip, /* incore inode */
|
||||
int whichfork) /* data or attr fork */
|
||||
{
|
||||
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
||||
int rval; /* return value */
|
||||
struct xfs_bmbt_irec s; /* internal version of extent */
|
||||
struct xfs_iext_cursor icur;
|
||||
|
||||
#ifndef DEBUG
|
||||
if (whichfork == XFS_DATA_FORK)
|
||||
return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
|
||||
#endif /* !DEBUG */
|
||||
if (ifp->if_nextents != 1)
|
||||
return 0;
|
||||
if (ifp->if_format != XFS_DINODE_FMT_EXTENTS)
|
||||
return 0;
|
||||
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
|
||||
xfs_iext_first(ifp, &icur);
|
||||
xfs_iext_get_extent(ifp, &icur, &s);
|
||||
rval = s.br_startoff == 0 && s.br_blockcount == 1;
|
||||
if (rval && whichfork == XFS_DATA_FORK)
|
||||
ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
|
||||
return rval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extent tree manipulation functions used during allocation.
|
||||
*/
|
||||
|
@ -2938,7 +2895,7 @@ xfs_bmap_add_extent_hole_real(
|
|||
*/
|
||||
|
||||
/*
|
||||
* Adjust the size of the new extent based on di_extsize and rt extsize.
|
||||
* Adjust the size of the new extent based on i_extsize and rt extsize.
|
||||
*/
|
||||
int
|
||||
xfs_bmap_extsize_align(
|
||||
|
@ -3444,7 +3401,7 @@ xfs_bmap_btalloc_accounting(
|
|||
}
|
||||
|
||||
/* data/attr fork only */
|
||||
ap->ip->i_d.di_nblocks += args->len;
|
||||
ap->ip->i_nblocks += args->len;
|
||||
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
|
||||
if (ap->wasdel) {
|
||||
ap->ip->i_delayed_blks -= args->len;
|
||||
|
@ -3586,7 +3543,8 @@ xfs_bmap_exact_minlen_extent_alloc(
|
|||
args.fsbno = ap->blkno;
|
||||
args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
|
||||
args.type = XFS_ALLOCTYPE_FIRST_AG;
|
||||
args.total = args.minlen = args.maxlen = ap->minlen;
|
||||
args.minlen = args.maxlen = ap->minlen;
|
||||
args.total = ap->total;
|
||||
|
||||
args.alignment = 1;
|
||||
args.minalignslop = 0;
|
||||
|
@ -3985,11 +3943,9 @@ xfs_bmapi_read(
|
|||
|
||||
XFS_STATS_INC(mp, xs_blk_mapr);
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(NULL, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(NULL, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
|
||||
eof = true;
|
||||
|
@ -4227,7 +4183,7 @@ xfs_bmapi_allocate(
|
|||
return error;
|
||||
}
|
||||
|
||||
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
|
||||
if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
|
||||
bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
|
||||
/*
|
||||
* Bump the number of extents we've allocated
|
||||
|
@ -4300,7 +4256,7 @@ xfs_bmapi_convert_unwritten(
|
|||
* Modify (by adding) the state flag, if writing.
|
||||
*/
|
||||
ASSERT(mval->br_blockcount <= len);
|
||||
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
|
||||
if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
|
||||
bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
|
||||
bma->ip, whichfork);
|
||||
}
|
||||
|
@ -4469,11 +4425,9 @@ xfs_bmapi_write(
|
|||
|
||||
XFS_STATS_INC(mp, xs_blk_mapw);
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
goto error0;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
goto error0;
|
||||
|
||||
if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
|
||||
eof = true;
|
||||
|
@ -4752,11 +4706,9 @@ xfs_bmapi_remap(
|
|||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return -EIO;
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
|
||||
/* make sure we only reflink into a hole. */
|
||||
|
@ -4764,10 +4716,10 @@ xfs_bmapi_remap(
|
|||
ASSERT(got.br_startoff - bno >= len);
|
||||
}
|
||||
|
||||
ip->i_d.di_nblocks += len;
|
||||
ip->i_nblocks += len;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_ino.flags = 0;
|
||||
}
|
||||
|
@ -5355,7 +5307,7 @@ xfs_bmap_del_extent_real(
|
|||
* Adjust inode # blocks in the file.
|
||||
*/
|
||||
if (nblks)
|
||||
ip->i_d.di_nblocks -= nblks;
|
||||
ip->i_nblocks -= nblks;
|
||||
/*
|
||||
* Adjust quota data.
|
||||
*/
|
||||
|
@ -5427,9 +5379,10 @@ __xfs_bunmapi(
|
|||
else
|
||||
max_len = len;
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS) &&
|
||||
(error = xfs_iread_extents(tp, ip, whichfork)))
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (xfs_iext_count(ifp) == 0) {
|
||||
*rlen = 0;
|
||||
return 0;
|
||||
|
@ -5445,7 +5398,7 @@ __xfs_bunmapi(
|
|||
end--;
|
||||
|
||||
logflags = 0;
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
|
||||
ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_ino.flags = 0;
|
||||
|
@ -5915,13 +5868,11 @@ xfs_bmap_collapse_extents(
|
|||
|
||||
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_ino.flags = 0;
|
||||
}
|
||||
|
@ -6032,13 +5983,11 @@ xfs_bmap_insert_extents(
|
|||
|
||||
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_ino.flags = 0;
|
||||
}
|
||||
|
@ -6135,12 +6084,10 @@ xfs_bmap_split_extent(
|
|||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return -EIO;
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
/* Read in all the extents */
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
/* Read in all the extents */
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If there are not extents, or split_fsb lies in a hole we are done.
|
||||
|
@ -6155,7 +6102,7 @@ xfs_bmap_split_extent(
|
|||
new.br_blockcount = got.br_blockcount - gotblkcnt;
|
||||
new.br_state = got.br_state;
|
||||
|
||||
if (ifp->if_flags & XFS_IFBROOT) {
|
||||
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
cur->bc_ino.flags = 0;
|
||||
error = xfs_bmbt_lookup_eq(cur, &got, &i);
|
||||
|
|
|
@ -185,6 +185,7 @@ static inline bool xfs_bmap_is_written_extent(struct xfs_bmbt_irec *irec)
|
|||
|
||||
void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
|
||||
xfs_filblks_t len);
|
||||
unsigned int xfs_bmap_compute_attr_offset(struct xfs_mount *mp);
|
||||
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
|
||||
int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
|
||||
void xfs_bmap_local_to_extents_empty(struct xfs_trans *tp,
|
||||
|
@ -199,7 +200,6 @@ int xfs_bmap_last_before(struct xfs_trans *tp, struct xfs_inode *ip,
|
|||
xfs_fileoff_t *last_block, int whichfork);
|
||||
int xfs_bmap_last_offset(struct xfs_inode *ip, xfs_fileoff_t *unused,
|
||||
int whichfork);
|
||||
int xfs_bmap_one_block(struct xfs_inode *ip, int whichfork);
|
||||
int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
|
||||
xfs_filblks_t len, struct xfs_bmbt_irec *mval,
|
||||
int *nmap, int flags);
|
||||
|
|
|
@ -183,7 +183,7 @@ xfs_bmbt_update_cursor(
|
|||
struct xfs_btree_cur *dst)
|
||||
{
|
||||
ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
|
||||
(dst->bc_ino.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
|
||||
(dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME));
|
||||
|
||||
dst->bc_ino.allocated += src->bc_ino.allocated;
|
||||
dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
|
||||
|
@ -260,7 +260,7 @@ xfs_bmbt_alloc_block(
|
|||
ASSERT(args.len == 1);
|
||||
cur->bc_tp->t_firstblock = args.fsbno;
|
||||
cur->bc_ino.allocated++;
|
||||
cur->bc_ino.ip->i_d.di_nblocks++;
|
||||
cur->bc_ino.ip->i_nblocks++;
|
||||
xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE);
|
||||
xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip,
|
||||
XFS_TRANS_DQ_BCOUNT, 1L);
|
||||
|
@ -287,7 +287,7 @@ xfs_bmbt_free_block(
|
|||
|
||||
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
|
||||
xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
|
||||
ip->i_d.di_nblocks--;
|
||||
ip->i_nblocks--;
|
||||
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
|
||||
|
|
|
@ -387,7 +387,6 @@ xfs_btree_bload_prep_block(
|
|||
new_size = bbl->iroot_size(cur, nr_this_block, priv);
|
||||
ifp->if_broot = kmem_zalloc(new_size, 0);
|
||||
ifp->if_broot_bytes = (int)new_size;
|
||||
ifp->if_flags |= XFS_IFBROOT;
|
||||
|
||||
/* Initialize it and send it out. */
|
||||
xfs_btree_init_block_int(cur->bc_mp, ifp->if_broot,
|
||||
|
|
|
@ -2145,7 +2145,7 @@ xfs_da_grow_inode_int(
|
|||
struct xfs_trans *tp = args->trans;
|
||||
struct xfs_inode *dp = args->dp;
|
||||
int w = args->whichfork;
|
||||
xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
|
||||
xfs_rfsblock_t nblks = dp->i_nblocks;
|
||||
struct xfs_bmbt_irec map, *mapp;
|
||||
int nmap, error, got, i, mapi;
|
||||
|
||||
|
@ -2211,7 +2211,7 @@ xfs_da_grow_inode_int(
|
|||
}
|
||||
|
||||
/* account for newly allocated blocks in reserved blocks total */
|
||||
args->total -= dp->i_d.di_nblocks - nblks;
|
||||
args->total -= dp->i_nblocks - nblks;
|
||||
|
||||
out_free_map:
|
||||
if (mapp != &map)
|
||||
|
|
|
@ -179,9 +179,9 @@ xfs_dir_isempty(
|
|||
xfs_dir2_sf_hdr_t *sfp;
|
||||
|
||||
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
|
||||
if (dp->i_d.di_size == 0) /* might happen during shutdown. */
|
||||
if (dp->i_disk_size == 0) /* might happen during shutdown. */
|
||||
return 1;
|
||||
if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
|
||||
if (dp->i_disk_size > XFS_IFORK_DSIZE(dp))
|
||||
return 0;
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
return !sfp->count;
|
||||
|
@ -584,8 +584,8 @@ xfs_dir2_grow_inode(
|
|||
xfs_fsize_t size; /* directory file (data) size */
|
||||
|
||||
size = XFS_FSB_TO_B(mp, bno + count);
|
||||
if (size > dp->i_d.di_size) {
|
||||
dp->i_d.di_size = size;
|
||||
if (size > dp->i_disk_size) {
|
||||
dp->i_disk_size = size;
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
|
||||
}
|
||||
}
|
||||
|
@ -608,7 +608,7 @@ xfs_dir2_isblock(
|
|||
rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
|
||||
if (XFS_IS_CORRUPT(args->dp->i_mount,
|
||||
rval != 0 &&
|
||||
args->dp->i_d.di_size != args->geo->blksize))
|
||||
args->dp->i_disk_size != args->geo->blksize))
|
||||
return -EFSCORRUPTED;
|
||||
*vp = rval;
|
||||
return 0;
|
||||
|
@ -687,7 +687,7 @@ xfs_dir2_shrink_inode(
|
|||
/*
|
||||
* If the block isn't the last one in the directory, we're done.
|
||||
*/
|
||||
if (dp->i_d.di_size > xfs_dir2_db_off_to_byte(args->geo, db + 1, 0))
|
||||
if (dp->i_disk_size > xfs_dir2_db_off_to_byte(args->geo, db + 1, 0))
|
||||
return 0;
|
||||
bno = da;
|
||||
if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) {
|
||||
|
@ -703,7 +703,7 @@ xfs_dir2_shrink_inode(
|
|||
/*
|
||||
* Set the size to the new last block.
|
||||
*/
|
||||
dp->i_d.di_size = XFS_FSB_TO_B(mp, bno);
|
||||
dp->i_disk_size = XFS_FSB_TO_B(mp, bno);
|
||||
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -961,7 +961,7 @@ xfs_dir2_leaf_to_block(
|
|||
* been left behind during no-space-reservation operations.
|
||||
* These will show up in the leaf bests table.
|
||||
*/
|
||||
while (dp->i_d.di_size > args->geo->blksize) {
|
||||
while (dp->i_disk_size > args->geo->blksize) {
|
||||
int hdrsz;
|
||||
|
||||
hdrsz = args->geo->data_entry_offset;
|
||||
|
@ -1096,14 +1096,14 @@ xfs_dir2_sf_to_block(
|
|||
|
||||
trace_xfs_dir2_sf_to_block(args);
|
||||
|
||||
ASSERT(ifp->if_flags & XFS_IFINLINE);
|
||||
ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
|
||||
ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
|
||||
ASSERT(dp->i_disk_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
|
||||
|
||||
oldsfp = (xfs_dir2_sf_hdr_t *)ifp->if_u1.if_data;
|
||||
|
||||
ASSERT(ifp->if_bytes == dp->i_d.di_size);
|
||||
ASSERT(ifp->if_bytes == dp->i_disk_size);
|
||||
ASSERT(ifp->if_u1.if_data != NULL);
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(oldsfp->i8count));
|
||||
ASSERT(dp->i_disk_size >= xfs_dir2_sf_hdr_size(oldsfp->i8count));
|
||||
ASSERT(dp->i_df.if_nextents == 0);
|
||||
|
||||
/*
|
||||
|
@ -1115,7 +1115,7 @@ xfs_dir2_sf_to_block(
|
|||
|
||||
xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
|
||||
xfs_bmap_local_to_extents_empty(tp, dp, XFS_DATA_FORK);
|
||||
dp->i_d.di_size = 0;
|
||||
dp->i_disk_size = 0;
|
||||
|
||||
/*
|
||||
* Add block 0 to the inode.
|
||||
|
|
|
@ -218,7 +218,7 @@ __xfs_dir3_data_check(
|
|||
*/
|
||||
if (dep->namelen == 0)
|
||||
return __this_address;
|
||||
if (xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)))
|
||||
if (!xfs_verify_dir_ino(mp, be64_to_cpu(dep->inumber)))
|
||||
return __this_address;
|
||||
if (offset + xfs_dir2_data_entsize(mp, dep->namelen) > end)
|
||||
return __this_address;
|
||||
|
|
|
@ -113,7 +113,7 @@ xfs_dir3_leaf1_check(
|
|||
} else if (leafhdr.magic != XFS_DIR2_LEAF1_MAGIC)
|
||||
return __this_address;
|
||||
|
||||
return xfs_dir3_leaf_check_int(dp->i_mount, &leafhdr, leaf);
|
||||
return xfs_dir3_leaf_check_int(dp->i_mount, &leafhdr, leaf, false);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -139,7 +139,8 @@ xfs_failaddr_t
|
|||
xfs_dir3_leaf_check_int(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_dir3_icleaf_hdr *hdr,
|
||||
struct xfs_dir2_leaf *leaf)
|
||||
struct xfs_dir2_leaf *leaf,
|
||||
bool expensive_checking)
|
||||
{
|
||||
struct xfs_da_geometry *geo = mp->m_dir_geo;
|
||||
xfs_dir2_leaf_tail_t *ltp;
|
||||
|
@ -151,7 +152,7 @@ xfs_dir3_leaf_check_int(
|
|||
/*
|
||||
* XXX (dgc): This value is not restrictive enough.
|
||||
* Should factor in the size of the bests table as well.
|
||||
* We can deduce a value for that from di_size.
|
||||
* We can deduce a value for that from i_disk_size.
|
||||
*/
|
||||
if (hdr->count > geo->leaf_max_ents)
|
||||
return __this_address;
|
||||
|
@ -162,6 +163,9 @@ xfs_dir3_leaf_check_int(
|
|||
(char *)&hdr->ents[hdr->count] > (char *)xfs_dir2_leaf_bests_p(ltp))
|
||||
return __this_address;
|
||||
|
||||
if (!expensive_checking)
|
||||
return NULL;
|
||||
|
||||
/* Check hash value order, count stale entries. */
|
||||
for (i = stale = 0; i < hdr->count; i++) {
|
||||
if (i + 1 < hdr->count) {
|
||||
|
@ -195,7 +199,7 @@ xfs_dir3_leaf_verify(
|
|||
return fa;
|
||||
|
||||
xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, bp->b_addr);
|
||||
return xfs_dir3_leaf_check_int(mp, &leafhdr, bp->b_addr);
|
||||
return xfs_dir3_leaf_check_int(mp, &leafhdr, bp->b_addr, true);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -73,7 +73,7 @@ xfs_dir3_leafn_check(
|
|||
} else if (leafhdr.magic != XFS_DIR2_LEAFN_MAGIC)
|
||||
return __this_address;
|
||||
|
||||
return xfs_dir3_leaf_check_int(dp->i_mount, &leafhdr, leaf);
|
||||
return xfs_dir3_leaf_check_int(dp->i_mount, &leafhdr, leaf, false);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -441,7 +441,7 @@ xfs_dir2_leaf_to_node(
|
|||
leaf = lbp->b_addr;
|
||||
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
|
||||
if (be32_to_cpu(ltp->bestcount) >
|
||||
(uint)dp->i_d.di_size / args->geo->blksize) {
|
||||
(uint)dp->i_disk_size / args->geo->blksize) {
|
||||
xfs_buf_mark_corrupt(lbp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
|
|
@ -127,7 +127,8 @@ xfs_dir3_leaf_find_entry(struct xfs_dir3_icleaf_hdr *leafhdr,
|
|||
extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
|
||||
|
||||
extern xfs_failaddr_t xfs_dir3_leaf_check_int(struct xfs_mount *mp,
|
||||
struct xfs_dir3_icleaf_hdr *hdr, struct xfs_dir2_leaf *leaf);
|
||||
struct xfs_dir3_icleaf_hdr *hdr, struct xfs_dir2_leaf *leaf,
|
||||
bool expensive_checks);
|
||||
|
||||
/* xfs_dir2_node.c */
|
||||
void xfs_dir2_free_hdr_from_disk(struct xfs_mount *mp,
|
||||
|
|
|
@ -344,7 +344,7 @@ xfs_dir2_block_to_sf(
|
|||
ASSERT(dp->i_df.if_bytes == 0);
|
||||
xfs_init_local_fork(dp, XFS_DATA_FORK, sfp, size);
|
||||
dp->i_df.if_format = XFS_DINODE_FMT_LOCAL;
|
||||
dp->i_d.di_size = size;
|
||||
dp->i_disk_size = size;
|
||||
|
||||
logflags |= XFS_ILOG_DDATA;
|
||||
xfs_dir2_sf_check(args);
|
||||
|
@ -367,7 +367,7 @@ xfs_dir2_sf_addname(
|
|||
xfs_inode_t *dp; /* incore directory inode */
|
||||
int error; /* error return value */
|
||||
int incr_isize; /* total change in size */
|
||||
int new_isize; /* di_size after adding name */
|
||||
int new_isize; /* size after adding name */
|
||||
int objchange; /* changing to 8-byte inodes */
|
||||
xfs_dir2_data_aoff_t offset = 0; /* offset for new entry */
|
||||
int pick; /* which algorithm to use */
|
||||
|
@ -378,12 +378,12 @@ xfs_dir2_sf_addname(
|
|||
|
||||
ASSERT(xfs_dir2_sf_lookup(args) == -ENOENT);
|
||||
dp = args->dp;
|
||||
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
|
||||
ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
||||
ASSERT(dp->i_df.if_format == XFS_DINODE_FMT_LOCAL);
|
||||
ASSERT(dp->i_disk_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_disk_size);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
ASSERT(dp->i_disk_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
/*
|
||||
* Compute entry (and change in) size.
|
||||
*/
|
||||
|
@ -401,7 +401,7 @@ xfs_dir2_sf_addname(
|
|||
objchange = 1;
|
||||
}
|
||||
|
||||
new_isize = (int)dp->i_d.di_size + incr_isize;
|
||||
new_isize = (int)dp->i_disk_size + incr_isize;
|
||||
/*
|
||||
* Won't fit as shortform any more (due to size),
|
||||
* or the pick routine says it won't (due to offset values).
|
||||
|
@ -492,7 +492,7 @@ xfs_dir2_sf_addname_easy(
|
|||
sfp->count++;
|
||||
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM)
|
||||
sfp->i8count++;
|
||||
dp->i_d.di_size = new_isize;
|
||||
dp->i_disk_size = new_isize;
|
||||
xfs_dir2_sf_check(args);
|
||||
}
|
||||
|
||||
|
@ -519,7 +519,7 @@ xfs_dir2_sf_addname_hard(
|
|||
int nbytes; /* temp for byte copies */
|
||||
xfs_dir2_data_aoff_t new_offset; /* next offset value */
|
||||
xfs_dir2_data_aoff_t offset; /* current offset value */
|
||||
int old_isize; /* previous di_size */
|
||||
int old_isize; /* previous size */
|
||||
xfs_dir2_sf_entry_t *oldsfep; /* entry in original dir */
|
||||
xfs_dir2_sf_hdr_t *oldsfp; /* original shortform dir */
|
||||
xfs_dir2_sf_entry_t *sfep; /* entry in new dir */
|
||||
|
@ -529,7 +529,7 @@ xfs_dir2_sf_addname_hard(
|
|||
* Copy the old directory to the stack buffer.
|
||||
*/
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
old_isize = (int)dp->i_d.di_size;
|
||||
old_isize = (int)dp->i_disk_size;
|
||||
buf = kmem_alloc(old_isize, 0);
|
||||
oldsfp = (xfs_dir2_sf_hdr_t *)buf;
|
||||
memcpy(oldsfp, sfp, old_isize);
|
||||
|
@ -586,7 +586,7 @@ xfs_dir2_sf_addname_hard(
|
|||
memcpy(sfep, oldsfep, old_isize - nbytes);
|
||||
}
|
||||
kmem_free(buf);
|
||||
dp->i_d.di_size = new_isize;
|
||||
dp->i_disk_size = new_isize;
|
||||
xfs_dir2_sf_check(args);
|
||||
}
|
||||
|
||||
|
@ -697,7 +697,7 @@ xfs_dir2_sf_check(
|
|||
ASSERT(xfs_dir2_sf_get_ftype(mp, sfep) < XFS_DIR3_FT_MAX);
|
||||
}
|
||||
ASSERT(i8count == sfp->i8count);
|
||||
ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size);
|
||||
ASSERT((char *)sfep - (char *)sfp == dp->i_disk_size);
|
||||
ASSERT(offset +
|
||||
(sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
|
||||
(uint)sizeof(xfs_dir2_block_tail_t) <= args->geo->blksize);
|
||||
|
@ -821,18 +821,16 @@ xfs_dir2_sf_create(
|
|||
dp = args->dp;
|
||||
|
||||
ASSERT(dp != NULL);
|
||||
ASSERT(dp->i_d.di_size == 0);
|
||||
ASSERT(dp->i_disk_size == 0);
|
||||
/*
|
||||
* If it's currently a zero-length extent file,
|
||||
* convert it to local format.
|
||||
*/
|
||||
if (dp->i_df.if_format == XFS_DINODE_FMT_EXTENTS) {
|
||||
dp->i_df.if_flags &= ~XFS_IFEXTENTS; /* just in case */
|
||||
dp->i_df.if_format = XFS_DINODE_FMT_LOCAL;
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
|
||||
dp->i_df.if_flags |= XFS_IFINLINE;
|
||||
}
|
||||
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
|
||||
ASSERT(dp->i_df.if_format == XFS_DINODE_FMT_LOCAL);
|
||||
ASSERT(dp->i_df.if_bytes == 0);
|
||||
i8count = pino > XFS_DIR2_MAX_SHORT_INUM;
|
||||
size = xfs_dir2_sf_hdr_size(i8count);
|
||||
|
@ -850,7 +848,7 @@ xfs_dir2_sf_create(
|
|||
*/
|
||||
xfs_dir2_sf_put_parent_ino(sfp, pino);
|
||||
sfp->count = 0;
|
||||
dp->i_d.di_size = size;
|
||||
dp->i_disk_size = size;
|
||||
xfs_dir2_sf_check(args);
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
|
||||
return 0;
|
||||
|
@ -877,12 +875,12 @@ xfs_dir2_sf_lookup(
|
|||
|
||||
xfs_dir2_sf_check(args);
|
||||
|
||||
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
|
||||
ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
||||
ASSERT(dp->i_df.if_format == XFS_DINODE_FMT_LOCAL);
|
||||
ASSERT(dp->i_disk_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_disk_size);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
ASSERT(dp->i_disk_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
/*
|
||||
* Special case for .
|
||||
*/
|
||||
|
@ -954,8 +952,8 @@ xfs_dir2_sf_removename(
|
|||
|
||||
trace_xfs_dir2_sf_removename(args);
|
||||
|
||||
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
|
||||
oldsize = (int)dp->i_d.di_size;
|
||||
ASSERT(dp->i_df.if_format == XFS_DINODE_FMT_LOCAL);
|
||||
oldsize = (int)dp->i_disk_size;
|
||||
ASSERT(oldsize >= offsetof(struct xfs_dir2_sf_hdr, parent));
|
||||
ASSERT(dp->i_df.if_bytes == oldsize);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
|
@ -995,7 +993,7 @@ xfs_dir2_sf_removename(
|
|||
* Fix up the header and file size.
|
||||
*/
|
||||
sfp->count--;
|
||||
dp->i_d.di_size = newsize;
|
||||
dp->i_disk_size = newsize;
|
||||
/*
|
||||
* Reallocate, making it smaller.
|
||||
*/
|
||||
|
@ -1053,12 +1051,12 @@ xfs_dir2_sf_replace(
|
|||
|
||||
trace_xfs_dir2_sf_replace(args);
|
||||
|
||||
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
|
||||
ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
||||
ASSERT(dp->i_df.if_format == XFS_DINODE_FMT_LOCAL);
|
||||
ASSERT(dp->i_disk_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_disk_size);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
ASSERT(dp->i_disk_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
|
||||
|
||||
/*
|
||||
* New inode number is large, and need to convert to 8-byte inodes.
|
||||
|
@ -1219,7 +1217,7 @@ xfs_dir2_sf_toino4(
|
|||
* Clean up the inode.
|
||||
*/
|
||||
kmem_free(buf);
|
||||
dp->i_d.di_size = newsize;
|
||||
dp->i_disk_size = newsize;
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
|
||||
}
|
||||
|
||||
|
@ -1292,6 +1290,6 @@ xfs_dir2_sf_toino8(
|
|||
* Clean up the inode.
|
||||
*/
|
||||
kmem_free(buf);
|
||||
dp->i_d.di_size = newsize;
|
||||
dp->i_disk_size = newsize;
|
||||
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,8 @@
|
|||
#define XFS_ERRTAG_BUF_IOERROR 35
|
||||
#define XFS_ERRTAG_REDUCE_MAX_IEXTENTS 36
|
||||
#define XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT 37
|
||||
#define XFS_ERRTAG_MAX 38
|
||||
#define XFS_ERRTAG_AG_RESV_FAIL 38
|
||||
#define XFS_ERRTAG_MAX 39
|
||||
|
||||
/*
|
||||
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
|
||||
|
@ -101,5 +102,6 @@
|
|||
#define XFS_RANDOM_BUF_IOERROR XFS_RANDOM_DEFAULT
|
||||
#define XFS_RANDOM_REDUCE_MAX_IEXTENTS 1
|
||||
#define XFS_RANDOM_BMAP_ALLOC_MINLEN_EXTENT 1
|
||||
#define XFS_RANDOM_AG_RESV_FAIL 1
|
||||
|
||||
#endif /* __XFS_ERRORTAG_H_ */
|
||||
|
|
|
@ -955,9 +955,8 @@ static inline time64_t xfs_bigtime_to_unix(uint64_t ondisk_seconds)
|
|||
* attribute use the XFS_DFORK_DPTR, XFS_DFORK_APTR, and XFS_DFORK_PTR macros
|
||||
* below.
|
||||
*
|
||||
* There is a very similar struct icdinode in xfs_inode which matches the
|
||||
* layout of the first 96 bytes of this structure, but is kept in native
|
||||
* format instead of big endian.
|
||||
* There is a very similar struct xfs_log_dinode which matches the layout of
|
||||
* this structure, but is kept in native format instead of big endian.
|
||||
*
|
||||
* Note: di_flushiter is only used by v1/2 inodes - it's effectively a zeroed
|
||||
* padding field for v3 inodes.
|
||||
|
|
|
@ -65,7 +65,7 @@ struct getbmapx {
|
|||
|
||||
/* bmv_iflags values - set by XFS_IOC_GETBMAPX caller. */
|
||||
#define BMV_IF_ATTRFORK 0x1 /* return attr fork rather than data */
|
||||
#define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */
|
||||
#define BMV_IF_NO_DMAPI_READ 0x2 /* Deprecated */
|
||||
#define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */
|
||||
#define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */
|
||||
#define BMV_IF_NO_HOLES 0x10 /* Do not return holes */
|
||||
|
|
|
@ -2535,12 +2535,12 @@ xfs_agi_verify(
|
|||
return __this_address;
|
||||
|
||||
if (be32_to_cpu(agi->agi_level) < 1 ||
|
||||
be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
|
||||
be32_to_cpu(agi->agi_level) > M_IGEO(mp)->inobt_maxlevels)
|
||||
return __this_address;
|
||||
|
||||
if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
|
||||
(be32_to_cpu(agi->agi_free_level) < 1 ||
|
||||
be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS))
|
||||
be32_to_cpu(agi->agi_free_level) > M_IGEO(mp)->inobt_maxlevels))
|
||||
return __this_address;
|
||||
|
||||
/*
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
#include "xfs_format.h"
|
||||
#include "xfs_bit.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
/*
|
||||
|
|
|
@ -124,37 +124,18 @@ const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
|
|||
/*
|
||||
* This routine is called to map an inode to the buffer containing the on-disk
|
||||
* version of the inode. It returns a pointer to the buffer containing the
|
||||
* on-disk inode in the bpp parameter, and in the dipp parameter it returns a
|
||||
* pointer to the on-disk inode within that buffer.
|
||||
*
|
||||
* If a non-zero error is returned, then the contents of bpp and dipp are
|
||||
* undefined.
|
||||
* on-disk inode in the bpp parameter.
|
||||
*/
|
||||
int
|
||||
xfs_imap_to_bp(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_imap *imap,
|
||||
struct xfs_dinode **dipp,
|
||||
struct xfs_buf **bpp,
|
||||
uint buf_flags)
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
struct xfs_buf *bp;
|
||||
int error;
|
||||
|
||||
buf_flags |= XBF_UNMAPPED;
|
||||
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
|
||||
(int)imap->im_len, buf_flags, &bp,
|
||||
return xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
|
||||
imap->im_len, XBF_UNMAPPED, bpp,
|
||||
&xfs_inode_buf_ops);
|
||||
if (error) {
|
||||
ASSERT(error != -EAGAIN || (buf_flags & XBF_TRYLOCK));
|
||||
return error;
|
||||
}
|
||||
|
||||
*bpp = bp;
|
||||
if (dipp)
|
||||
*dipp = xfs_buf_offset(bp, imap->im_boffset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct timespec64 xfs_inode_decode_bigtime(uint64_t ts)
|
||||
|
@ -192,7 +173,6 @@ xfs_inode_from_disk(
|
|||
struct xfs_inode *ip,
|
||||
struct xfs_dinode *from)
|
||||
{
|
||||
struct xfs_icdinode *to = &ip->i_d;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
int error;
|
||||
xfs_failaddr_t fa;
|
||||
|
@ -212,7 +192,8 @@ xfs_inode_from_disk(
|
|||
* inode. If the inode is unused, mode is zero and we shouldn't mess
|
||||
* with the uninitialized part of it.
|
||||
*/
|
||||
to->di_flushiter = be16_to_cpu(from->di_flushiter);
|
||||
if (!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb))
|
||||
ip->i_flushiter = be16_to_cpu(from->di_flushiter);
|
||||
inode->i_generation = be32_to_cpu(from->di_gen);
|
||||
inode->i_mode = be16_to_cpu(from->di_mode);
|
||||
if (!inode->i_mode)
|
||||
|
@ -225,10 +206,10 @@ xfs_inode_from_disk(
|
|||
*/
|
||||
if (unlikely(from->di_version == 1)) {
|
||||
set_nlink(inode, be16_to_cpu(from->di_onlink));
|
||||
to->di_projid = 0;
|
||||
ip->i_projid = 0;
|
||||
} else {
|
||||
set_nlink(inode, be32_to_cpu(from->di_nlink));
|
||||
to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
|
||||
ip->i_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
|
||||
be16_to_cpu(from->di_projid_lo);
|
||||
}
|
||||
|
||||
|
@ -245,20 +226,21 @@ xfs_inode_from_disk(
|
|||
inode->i_mtime = xfs_inode_from_disk_ts(from, from->di_mtime);
|
||||
inode->i_ctime = xfs_inode_from_disk_ts(from, from->di_ctime);
|
||||
|
||||
to->di_size = be64_to_cpu(from->di_size);
|
||||
to->di_nblocks = be64_to_cpu(from->di_nblocks);
|
||||
to->di_extsize = be32_to_cpu(from->di_extsize);
|
||||
to->di_forkoff = from->di_forkoff;
|
||||
to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
|
||||
to->di_dmstate = be16_to_cpu(from->di_dmstate);
|
||||
to->di_flags = be16_to_cpu(from->di_flags);
|
||||
ip->i_disk_size = be64_to_cpu(from->di_size);
|
||||
ip->i_nblocks = be64_to_cpu(from->di_nblocks);
|
||||
ip->i_extsize = be32_to_cpu(from->di_extsize);
|
||||
ip->i_forkoff = from->di_forkoff;
|
||||
ip->i_diflags = be16_to_cpu(from->di_flags);
|
||||
|
||||
if (from->di_dmevmask || from->di_dmstate)
|
||||
xfs_iflags_set(ip, XFS_IPRESERVE_DM_FIELDS);
|
||||
|
||||
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
||||
inode_set_iversion_queried(inode,
|
||||
be64_to_cpu(from->di_changecount));
|
||||
to->di_crtime = xfs_inode_from_disk_ts(from, from->di_crtime);
|
||||
to->di_flags2 = be64_to_cpu(from->di_flags2);
|
||||
to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
|
||||
ip->i_crtime = xfs_inode_from_disk_ts(from, from->di_crtime);
|
||||
ip->i_diflags2 = be64_to_cpu(from->di_flags2);
|
||||
ip->i_cowextsize = be32_to_cpu(from->di_cowextsize);
|
||||
}
|
||||
|
||||
error = xfs_iformat_data_fork(ip, from);
|
||||
|
@ -303,7 +285,6 @@ xfs_inode_to_disk(
|
|||
struct xfs_dinode *to,
|
||||
xfs_lsn_t lsn)
|
||||
{
|
||||
struct xfs_icdinode *from = &ip->i_d;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
|
||||
to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
|
||||
|
@ -312,8 +293,8 @@ xfs_inode_to_disk(
|
|||
to->di_format = xfs_ifork_format(&ip->i_df);
|
||||
to->di_uid = cpu_to_be32(i_uid_read(inode));
|
||||
to->di_gid = cpu_to_be32(i_gid_read(inode));
|
||||
to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
|
||||
to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
|
||||
to->di_projid_lo = cpu_to_be16(ip->i_projid & 0xffff);
|
||||
to->di_projid_hi = cpu_to_be16(ip->i_projid >> 16);
|
||||
|
||||
memset(to->di_pad, 0, sizeof(to->di_pad));
|
||||
to->di_atime = xfs_inode_to_disk_ts(ip, inode->i_atime);
|
||||
|
@ -323,23 +304,21 @@ xfs_inode_to_disk(
|
|||
to->di_gen = cpu_to_be32(inode->i_generation);
|
||||
to->di_mode = cpu_to_be16(inode->i_mode);
|
||||
|
||||
to->di_size = cpu_to_be64(from->di_size);
|
||||
to->di_nblocks = cpu_to_be64(from->di_nblocks);
|
||||
to->di_extsize = cpu_to_be32(from->di_extsize);
|
||||
to->di_size = cpu_to_be64(ip->i_disk_size);
|
||||
to->di_nblocks = cpu_to_be64(ip->i_nblocks);
|
||||
to->di_extsize = cpu_to_be32(ip->i_extsize);
|
||||
to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
|
||||
to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp));
|
||||
to->di_forkoff = from->di_forkoff;
|
||||
to->di_forkoff = ip->i_forkoff;
|
||||
to->di_aformat = xfs_ifork_format(ip->i_afp);
|
||||
to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
|
||||
to->di_dmstate = cpu_to_be16(from->di_dmstate);
|
||||
to->di_flags = cpu_to_be16(from->di_flags);
|
||||
to->di_flags = cpu_to_be16(ip->i_diflags);
|
||||
|
||||
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
||||
to->di_version = 3;
|
||||
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
|
||||
to->di_crtime = xfs_inode_to_disk_ts(ip, from->di_crtime);
|
||||
to->di_flags2 = cpu_to_be64(from->di_flags2);
|
||||
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
|
||||
to->di_crtime = xfs_inode_to_disk_ts(ip, ip->i_crtime);
|
||||
to->di_flags2 = cpu_to_be64(ip->i_diflags2);
|
||||
to->di_cowextsize = cpu_to_be32(ip->i_cowextsize);
|
||||
to->di_ino = cpu_to_be64(ip->i_ino);
|
||||
to->di_lsn = cpu_to_be64(lsn);
|
||||
memset(to->di_pad2, 0, sizeof(to->di_pad2));
|
||||
|
@ -347,7 +326,7 @@ xfs_inode_to_disk(
|
|||
to->di_flushiter = 0;
|
||||
} else {
|
||||
to->di_version = 2;
|
||||
to->di_flushiter = cpu_to_be16(from->di_flushiter);
|
||||
to->di_flushiter = cpu_to_be16(ip->i_flushiter);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,34 +9,6 @@
|
|||
struct xfs_inode;
|
||||
struct xfs_dinode;
|
||||
|
||||
/*
|
||||
* In memory representation of the XFS inode. This is held in the in-core struct
|
||||
* xfs_inode and represents the current on disk values but the structure is not
|
||||
* in on-disk format. That is, this structure is always translated to on-disk
|
||||
* format specific structures at the appropriate time.
|
||||
*/
|
||||
struct xfs_icdinode {
|
||||
uint16_t di_flushiter; /* incremented on flush */
|
||||
prid_t di_projid; /* owner's project id */
|
||||
xfs_fsize_t di_size; /* number of bytes in file */
|
||||
xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */
|
||||
xfs_extlen_t di_extsize; /* basic/minimum extent size for file */
|
||||
uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */
|
||||
uint32_t di_dmevmask; /* DMIG event mask */
|
||||
uint16_t di_dmstate; /* DMIG state info */
|
||||
uint16_t di_flags; /* random flags, XFS_DIFLAG_... */
|
||||
|
||||
uint64_t di_flags2; /* more random flags */
|
||||
uint32_t di_cowextsize; /* basic cow extent size for file */
|
||||
|
||||
struct timespec64 di_crtime; /* time created */
|
||||
};
|
||||
|
||||
static inline bool xfs_icdinode_has_bigtime(const struct xfs_icdinode *icd)
|
||||
{
|
||||
return icd->di_flags2 & XFS_DIFLAG2_BIGTIME;
|
||||
}
|
||||
|
||||
/*
|
||||
* Inode location information. Stored in the inode and passed to
|
||||
* xfs_imap_to_bp() to get a buffer and dinode for a given inode.
|
||||
|
@ -47,9 +19,8 @@ struct xfs_imap {
|
|||
unsigned short im_boffset; /* inode offset in block in bytes */
|
||||
};
|
||||
|
||||
int xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
|
||||
struct xfs_imap *, struct xfs_dinode **,
|
||||
struct xfs_buf **, uint);
|
||||
int xfs_imap_to_bp(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
struct xfs_imap *imap, struct xfs_buf **bpp);
|
||||
void xfs_dinode_calc_crc(struct xfs_mount *, struct xfs_dinode *);
|
||||
void xfs_inode_to_disk(struct xfs_inode *ip, struct xfs_dinode *to,
|
||||
xfs_lsn_t lsn);
|
||||
|
|
|
@ -60,8 +60,6 @@ xfs_init_local_fork(
|
|||
}
|
||||
|
||||
ifp->if_bytes = size;
|
||||
ifp->if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
|
||||
ifp->if_flags |= XFS_IFINLINE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -151,7 +149,6 @@ xfs_iformat_extents(
|
|||
xfs_iext_next(ifp, &icur);
|
||||
}
|
||||
}
|
||||
ifp->if_flags |= XFS_IFEXTENTS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -194,8 +191,8 @@ xfs_iformat_btree(
|
|||
nrecs == 0 ||
|
||||
XFS_BMDR_SPACE_CALC(nrecs) >
|
||||
XFS_DFORK_SIZE(dip, mp, whichfork) ||
|
||||
ifp->if_nextents > ip->i_d.di_nblocks) ||
|
||||
level == 0 || level > XFS_BTREE_MAXLEVELS) {
|
||||
ifp->if_nextents > ip->i_nblocks) ||
|
||||
level == 0 || level > XFS_BM_MAXLEVELS(mp, whichfork)) {
|
||||
xfs_warn(mp, "corrupt inode %Lu (btree).",
|
||||
(unsigned long long) ip->i_ino);
|
||||
xfs_inode_verifier_error(ip, -EFSCORRUPTED,
|
||||
|
@ -213,8 +210,6 @@ xfs_iformat_btree(
|
|||
*/
|
||||
xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
|
||||
ifp->if_broot, size);
|
||||
ifp->if_flags &= ~XFS_IFEXTENTS;
|
||||
ifp->if_flags |= XFS_IFBROOT;
|
||||
|
||||
ifp->if_bytes = 0;
|
||||
ifp->if_u1.if_root = NULL;
|
||||
|
@ -242,7 +237,7 @@ xfs_iformat_data_fork(
|
|||
case S_IFCHR:
|
||||
case S_IFBLK:
|
||||
case S_IFSOCK:
|
||||
ip->i_d.di_size = 0;
|
||||
ip->i_disk_size = 0;
|
||||
inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip));
|
||||
return 0;
|
||||
case S_IFREG:
|
||||
|
@ -282,6 +277,19 @@ xfs_dfork_attr_shortform_size(
|
|||
return be16_to_cpu(atp->hdr.totsize);
|
||||
}
|
||||
|
||||
struct xfs_ifork *
|
||||
xfs_ifork_alloc(
|
||||
enum xfs_dinode_fmt format,
|
||||
xfs_extnum_t nextents)
|
||||
{
|
||||
struct xfs_ifork *ifp;
|
||||
|
||||
ifp = kmem_cache_zalloc(xfs_ifork_zone, GFP_NOFS | __GFP_NOFAIL);
|
||||
ifp->if_format = format;
|
||||
ifp->if_nextents = nextents;
|
||||
return ifp;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_iformat_attr_fork(
|
||||
struct xfs_inode *ip,
|
||||
|
@ -293,11 +301,8 @@ xfs_iformat_attr_fork(
|
|||
* Initialize the extent count early, as the per-format routines may
|
||||
* depend on it.
|
||||
*/
|
||||
ip->i_afp = kmem_cache_zalloc(xfs_ifork_zone, GFP_NOFS | __GFP_NOFAIL);
|
||||
ip->i_afp->if_format = dip->di_aformat;
|
||||
if (unlikely(ip->i_afp->if_format == 0)) /* pre IRIX 6.2 file system */
|
||||
ip->i_afp->if_format = XFS_DINODE_FMT_EXTENTS;
|
||||
ip->i_afp->if_nextents = be16_to_cpu(dip->di_anextents);
|
||||
ip->i_afp = xfs_ifork_alloc(dip->di_aformat,
|
||||
be16_to_cpu(dip->di_anextents));
|
||||
|
||||
switch (ip->i_afp->if_format) {
|
||||
case XFS_DINODE_FMT_LOCAL:
|
||||
|
@ -423,7 +428,6 @@ xfs_iroot_realloc(
|
|||
XFS_BMBT_BLOCK_LEN(ip->i_mount));
|
||||
} else {
|
||||
new_broot = NULL;
|
||||
ifp->if_flags &= ~XFS_IFBROOT;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -512,17 +516,16 @@ xfs_idestroy_fork(
|
|||
ifp->if_broot = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the format is local, then we can't have an extents array so just
|
||||
* look for an inline data array. If we're not local then we may or may
|
||||
* not have an extents list, so check and free it up if we do.
|
||||
*/
|
||||
if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
switch (ifp->if_format) {
|
||||
case XFS_DINODE_FMT_LOCAL:
|
||||
kmem_free(ifp->if_u1.if_data);
|
||||
ifp->if_u1.if_data = NULL;
|
||||
} else if (ifp->if_flags & XFS_IFEXTENTS) {
|
||||
break;
|
||||
case XFS_DINODE_FMT_EXTENTS:
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
if (ifp->if_height)
|
||||
xfs_iext_destroy(ifp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -616,8 +619,6 @@ xfs_iflush_fork(
|
|||
break;
|
||||
|
||||
case XFS_DINODE_FMT_EXTENTS:
|
||||
ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
|
||||
!(iip->ili_fields & extflag[whichfork]));
|
||||
if ((iip->ili_fields & extflag[whichfork]) &&
|
||||
(ifp->if_bytes > 0)) {
|
||||
ASSERT(ifp->if_nextents > 0);
|
||||
|
@ -677,7 +678,6 @@ xfs_ifork_init_cow(
|
|||
|
||||
ip->i_cowfp = kmem_cache_zalloc(xfs_ifork_zone,
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
ip->i_cowfp->if_flags = XFS_IFEXTENTS;
|
||||
ip->i_cowfp->if_format = XFS_DINODE_FMT_EXTENTS;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,18 +22,10 @@ struct xfs_ifork {
|
|||
char *if_data; /* inline file data */
|
||||
} if_u1;
|
||||
short if_broot_bytes; /* bytes allocated for root */
|
||||
unsigned char if_flags; /* per-fork flags */
|
||||
int8_t if_format; /* format of this fork */
|
||||
xfs_extnum_t if_nextents; /* # of extents in this fork */
|
||||
};
|
||||
|
||||
/*
|
||||
* Per-fork incore inode flags.
|
||||
*/
|
||||
#define XFS_IFINLINE 0x01 /* Inline data is read in */
|
||||
#define XFS_IFEXTENTS 0x02 /* All extent pointers are read in */
|
||||
#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */
|
||||
|
||||
/*
|
||||
* Worst-case increase in the fork extent count when we're adding a single
|
||||
* extent to a fork and there's no possibility of splitting an existing mapping.
|
||||
|
@ -99,8 +91,8 @@ struct xfs_ifork {
|
|||
* Fork handling.
|
||||
*/
|
||||
|
||||
#define XFS_IFORK_Q(ip) ((ip)->i_d.di_forkoff != 0)
|
||||
#define XFS_IFORK_BOFF(ip) ((int)((ip)->i_d.di_forkoff << 3))
|
||||
#define XFS_IFORK_Q(ip) ((ip)->i_forkoff != 0)
|
||||
#define XFS_IFORK_BOFF(ip) ((int)((ip)->i_forkoff << 3))
|
||||
|
||||
#define XFS_IFORK_PTR(ip,w) \
|
||||
((w) == XFS_DATA_FORK ? \
|
||||
|
@ -141,6 +133,8 @@ static inline int8_t xfs_ifork_format(struct xfs_ifork *ifp)
|
|||
return ifp->if_format;
|
||||
}
|
||||
|
||||
struct xfs_ifork *xfs_ifork_alloc(enum xfs_dinode_fmt format,
|
||||
xfs_extnum_t nextents);
|
||||
struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
|
||||
|
||||
int xfs_iformat_data_fork(struct xfs_inode *, struct xfs_dinode *);
|
||||
|
@ -236,4 +230,10 @@ int xfs_ifork_verify_local_attr(struct xfs_inode *ip);
|
|||
int xfs_iext_count_may_overflow(struct xfs_inode *ip, int whichfork,
|
||||
int nr_to_add);
|
||||
|
||||
/* returns true if the fork has extents but they are not read in yet. */
|
||||
static inline bool xfs_need_iread_extents(struct xfs_ifork *ifp)
|
||||
{
|
||||
return ifp->if_format == XFS_DINODE_FMT_BTREE && ifp->if_height == 0;
|
||||
}
|
||||
|
||||
#endif /* __XFS_INODE_FORK_H__ */
|
||||
|
|
|
@ -997,8 +997,8 @@ xfs_rtfree_extent(
|
|||
*/
|
||||
if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
|
||||
mp->m_sb.sb_rextents) {
|
||||
if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
|
||||
mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
|
||||
if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM))
|
||||
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
|
||||
*(uint64_t *)&VFS_I(mp->m_rbmip)->i_atime = 0;
|
||||
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
|
||||
}
|
||||
|
|
|
@ -176,8 +176,12 @@ struct xfs_ino_geometry {
|
|||
|
||||
unsigned int agino_log; /* #bits for agino in inum */
|
||||
|
||||
/* precomputed default inode attribute fork offset */
|
||||
unsigned int attr_fork_offset;
|
||||
|
||||
/* precomputed value for di_flags2 */
|
||||
uint64_t new_diflags2;
|
||||
|
||||
};
|
||||
|
||||
#endif /* __XFS_SHARED_H__ */
|
||||
|
|
|
@ -70,7 +70,7 @@ xfs_trans_ichgtime(
|
|||
if (flags & XFS_ICHGTIME_CHG)
|
||||
inode->i_ctime = tv;
|
||||
if (flags & XFS_ICHGTIME_CREATE)
|
||||
ip->i_d.di_crtime = tv;
|
||||
ip->i_crtime = tv;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -138,7 +138,7 @@ xfs_trans_log_inode(
|
|||
if ((flags & (XFS_ILOG_CORE | XFS_ILOG_TIMESTAMP)) &&
|
||||
xfs_sb_version_hasbigtime(&ip->i_mount->m_sb) &&
|
||||
!xfs_inode_has_bigtime(ip)) {
|
||||
ip->i_d.di_flags2 |= XFS_DIFLAG2_BIGTIME;
|
||||
ip->i_diflags2 |= XFS_DIFLAG2_BIGTIME;
|
||||
flags |= XFS_ILOG_CORE;
|
||||
}
|
||||
|
||||
|
@ -164,8 +164,7 @@ xfs_trans_log_inode(
|
|||
* here.
|
||||
*/
|
||||
spin_unlock(&iip->ili_lock);
|
||||
error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, NULL,
|
||||
&bp, 0);
|
||||
error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &bp);
|
||||
if (error) {
|
||||
xfs_force_shutdown(ip->i_mount, SHUTDOWN_META_IO_ERROR);
|
||||
return;
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include "xfs_mount.h"
|
||||
|
||||
/* Find the size of the AG, in blocks. */
|
||||
xfs_agblock_t
|
||||
inline xfs_agblock_t
|
||||
xfs_ag_block_count(
|
||||
struct xfs_mount *mp,
|
||||
xfs_agnumber_t agno)
|
||||
|
@ -29,7 +29,7 @@ xfs_ag_block_count(
|
|||
* Verify that an AG block number pointer neither points outside the AG
|
||||
* nor points at static metadata.
|
||||
*/
|
||||
bool
|
||||
inline bool
|
||||
xfs_verify_agbno(
|
||||
struct xfs_mount *mp,
|
||||
xfs_agnumber_t agno,
|
||||
|
@ -49,7 +49,7 @@ xfs_verify_agbno(
|
|||
* Verify that an FS block number pointer neither points outside the
|
||||
* filesystem nor points at static AG metadata.
|
||||
*/
|
||||
bool
|
||||
inline bool
|
||||
xfs_verify_fsbno(
|
||||
struct xfs_mount *mp,
|
||||
xfs_fsblock_t fsbno)
|
||||
|
@ -85,7 +85,7 @@ xfs_verify_fsbext(
|
|||
}
|
||||
|
||||
/* Calculate the first and last possible inode number in an AG. */
|
||||
void
|
||||
inline void
|
||||
xfs_agino_range(
|
||||
struct xfs_mount *mp,
|
||||
xfs_agnumber_t agno,
|
||||
|
@ -116,7 +116,7 @@ xfs_agino_range(
|
|||
* Verify that an AG inode number pointer neither points outside the AG
|
||||
* nor points at static metadata.
|
||||
*/
|
||||
bool
|
||||
inline bool
|
||||
xfs_verify_agino(
|
||||
struct xfs_mount *mp,
|
||||
xfs_agnumber_t agno,
|
||||
|
@ -146,7 +146,7 @@ xfs_verify_agino_or_null(
|
|||
* Verify that an FS inode number pointer neither points outside the
|
||||
* filesystem nor points at static AG metadata.
|
||||
*/
|
||||
bool
|
||||
inline bool
|
||||
xfs_verify_ino(
|
||||
struct xfs_mount *mp,
|
||||
xfs_ino_t ino)
|
||||
|
@ -162,7 +162,7 @@ xfs_verify_ino(
|
|||
}
|
||||
|
||||
/* Is this an internal inode number? */
|
||||
bool
|
||||
inline bool
|
||||
xfs_internal_inum(
|
||||
struct xfs_mount *mp,
|
||||
xfs_ino_t ino)
|
||||
|
@ -190,7 +190,7 @@ xfs_verify_dir_ino(
|
|||
* Verify that an realtime block number pointer doesn't point off the
|
||||
* end of the realtime device.
|
||||
*/
|
||||
bool
|
||||
inline bool
|
||||
xfs_verify_rtbno(
|
||||
struct xfs_mount *mp,
|
||||
xfs_rtblock_t rtbno)
|
||||
|
@ -215,7 +215,7 @@ xfs_verify_rtext(
|
|||
}
|
||||
|
||||
/* Calculate the range of valid icount values. */
|
||||
void
|
||||
inline void
|
||||
xfs_icount_range(
|
||||
struct xfs_mount *mp,
|
||||
unsigned long long *min,
|
||||
|
|
|
@ -477,16 +477,13 @@ xchk_agf_xref(
|
|||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
xfs_agblock_t agbno;
|
||||
int error;
|
||||
|
||||
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
return;
|
||||
|
||||
agbno = XFS_AGF_BLOCK(mp);
|
||||
|
||||
error = xchk_ag_btcur_init(sc, &sc->sa);
|
||||
if (error)
|
||||
return;
|
||||
xchk_ag_btcur_init(sc, &sc->sa);
|
||||
|
||||
xchk_xref_is_used_space(sc, agbno, 1);
|
||||
xchk_agf_xref_freeblks(sc);
|
||||
|
@ -508,7 +505,7 @@ xchk_agf(
|
|||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_agf *agf;
|
||||
struct xfs_perag *pag;
|
||||
xfs_agnumber_t agno;
|
||||
xfs_agnumber_t agno = sc->sm->sm_agno;
|
||||
xfs_agblock_t agbno;
|
||||
xfs_agblock_t eoag;
|
||||
xfs_agblock_t agfl_first;
|
||||
|
@ -518,9 +515,7 @@ xchk_agf(
|
|||
int level;
|
||||
int error = 0;
|
||||
|
||||
agno = sc->sa.agno = sc->sm->sm_agno;
|
||||
error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
|
||||
&sc->sa.agf_bp, &sc->sa.agfl_bp);
|
||||
error = xchk_ag_read_headers(sc, agno, &sc->sa);
|
||||
if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
|
||||
goto out;
|
||||
xchk_buffer_recheck(sc, sc->sa.agf_bp);
|
||||
|
@ -662,16 +657,13 @@ xchk_agfl_xref(
|
|||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
xfs_agblock_t agbno;
|
||||
int error;
|
||||
|
||||
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
return;
|
||||
|
||||
agbno = XFS_AGFL_BLOCK(mp);
|
||||
|
||||
error = xchk_ag_btcur_init(sc, &sc->sa);
|
||||
if (error)
|
||||
return;
|
||||
xchk_ag_btcur_init(sc, &sc->sa);
|
||||
|
||||
xchk_xref_is_used_space(sc, agbno, 1);
|
||||
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
|
||||
|
@ -691,14 +683,12 @@ xchk_agfl(
|
|||
{
|
||||
struct xchk_agfl_info sai;
|
||||
struct xfs_agf *agf;
|
||||
xfs_agnumber_t agno;
|
||||
xfs_agnumber_t agno = sc->sm->sm_agno;
|
||||
unsigned int agflcount;
|
||||
unsigned int i;
|
||||
int error;
|
||||
|
||||
agno = sc->sa.agno = sc->sm->sm_agno;
|
||||
error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
|
||||
&sc->sa.agf_bp, &sc->sa.agfl_bp);
|
||||
error = xchk_ag_read_headers(sc, agno, &sc->sa);
|
||||
if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
|
||||
goto out;
|
||||
if (!sc->sa.agf_bp)
|
||||
|
@ -817,16 +807,13 @@ xchk_agi_xref(
|
|||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
xfs_agblock_t agbno;
|
||||
int error;
|
||||
|
||||
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
return;
|
||||
|
||||
agbno = XFS_AGI_BLOCK(mp);
|
||||
|
||||
error = xchk_ag_btcur_init(sc, &sc->sa);
|
||||
if (error)
|
||||
return;
|
||||
xchk_ag_btcur_init(sc, &sc->sa);
|
||||
|
||||
xchk_xref_is_used_space(sc, agbno, 1);
|
||||
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
|
||||
|
@ -846,7 +833,7 @@ xchk_agi(
|
|||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_agi *agi;
|
||||
struct xfs_perag *pag;
|
||||
xfs_agnumber_t agno;
|
||||
xfs_agnumber_t agno = sc->sm->sm_agno;
|
||||
xfs_agblock_t agbno;
|
||||
xfs_agblock_t eoag;
|
||||
xfs_agino_t agino;
|
||||
|
@ -857,9 +844,7 @@ xchk_agi(
|
|||
int level;
|
||||
int error = 0;
|
||||
|
||||
agno = sc->sa.agno = sc->sm->sm_agno;
|
||||
error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
|
||||
&sc->sa.agf_bp, &sc->sa.agfl_bp);
|
||||
error = xchk_ag_read_headers(sc, agno, &sc->sa);
|
||||
if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
|
||||
goto out;
|
||||
xchk_buffer_recheck(sc, sc->sa.agi_bp);
|
||||
|
|
|
@ -21,10 +21,9 @@
|
|||
*/
|
||||
int
|
||||
xchk_setup_ag_allocbt(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
return xchk_setup_ag_btree(sc, ip, false);
|
||||
return xchk_setup_ag_btree(sc, false);
|
||||
}
|
||||
|
||||
/* Free space btree scrubber. */
|
||||
|
|
|
@ -69,8 +69,7 @@ xchk_setup_xattr_buf(
|
|||
/* Set us up to scrub an inode's extended attributes. */
|
||||
int
|
||||
xchk_setup_xattr(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
|
@ -85,7 +84,7 @@ xchk_setup_xattr(
|
|||
return error;
|
||||
}
|
||||
|
||||
return xchk_setup_inode_contents(sc, ip, 0);
|
||||
return xchk_setup_inode_contents(sc, 0);
|
||||
}
|
||||
|
||||
/* Extended Attributes */
|
||||
|
|
|
@ -26,12 +26,11 @@
|
|||
/* Set us up with an inode's bmap. */
|
||||
int
|
||||
xchk_setup_inode_bmap(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xchk_get_inode(sc, ip);
|
||||
error = xchk_get_inode(sc);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
|
@ -448,12 +447,11 @@ xchk_bmap_btree(
|
|||
int error;
|
||||
|
||||
/* Load the incore bmap cache if it's not loaded. */
|
||||
info->was_loaded = ifp->if_flags & XFS_IFEXTENTS;
|
||||
if (!info->was_loaded) {
|
||||
error = xfs_iread_extents(sc->tp, ip, whichfork);
|
||||
if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
|
||||
goto out;
|
||||
}
|
||||
info->was_loaded = !xfs_need_iread_extents(ifp);
|
||||
|
||||
error = xfs_iread_extents(sc->tp, ip, whichfork);
|
||||
if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
|
||||
goto out;
|
||||
|
||||
/* Check the btree structure. */
|
||||
cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
|
||||
|
@ -675,10 +673,6 @@ xchk_bmap(
|
|||
/* No mappings to check. */
|
||||
goto out;
|
||||
case XFS_DINODE_FMT_EXTENTS:
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
xchk_fblock_set_corrupt(sc, whichfork, 0);
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
if (whichfork == XFS_COW_FORK) {
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "xfs_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "scrub/scrub.h"
|
||||
#include "scrub/common.h"
|
||||
|
@ -442,6 +443,30 @@ xchk_btree_check_owner(
|
|||
return xchk_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
|
||||
}
|
||||
|
||||
/* Decide if we want to check minrecs of a btree block in the inode root. */
|
||||
static inline bool
|
||||
xchk_btree_check_iroot_minrecs(
|
||||
struct xchk_btree *bs)
|
||||
{
|
||||
/*
|
||||
* xfs_bmap_add_attrfork_btree had an implementation bug wherein it
|
||||
* would miscalculate the space required for the data fork bmbt root
|
||||
* when adding an attr fork, and promote the iroot contents to an
|
||||
* external block unnecessarily. This went unnoticed for many years
|
||||
* until scrub found filesystems in this state. Inode rooted btrees are
|
||||
* not supposed to have immediate child blocks that are small enough
|
||||
* that the contents could fit in the inode root, but we can't fail
|
||||
* existing filesystems, so instead we disable the check for data fork
|
||||
* bmap btrees when there's an attr fork.
|
||||
*/
|
||||
if (bs->cur->bc_btnum == XFS_BTNUM_BMAP &&
|
||||
bs->cur->bc_ino.whichfork == XFS_DATA_FORK &&
|
||||
XFS_IFORK_Q(bs->sc->ip))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that this btree block has at least minrecs records or is one of the
|
||||
* special blocks that don't require that.
|
||||
|
@ -475,8 +500,9 @@ xchk_btree_check_minrecs(
|
|||
|
||||
root_block = xfs_btree_get_block(cur, root_level, &root_bp);
|
||||
root_maxrecs = cur->bc_ops->get_dmaxrecs(cur, root_level);
|
||||
if (be16_to_cpu(root_block->bb_numrecs) != 1 ||
|
||||
numrecs <= root_maxrecs)
|
||||
if (xchk_btree_check_iroot_minrecs(bs) &&
|
||||
(be16_to_cpu(root_block->bb_numrecs) != 1 ||
|
||||
numrecs <= root_maxrecs))
|
||||
xchk_btree_set_corrupt(bs->sc, cur, level);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -402,22 +402,22 @@ int
|
|||
xchk_ag_read_headers(
|
||||
struct xfs_scrub *sc,
|
||||
xfs_agnumber_t agno,
|
||||
struct xfs_buf **agi,
|
||||
struct xfs_buf **agf,
|
||||
struct xfs_buf **agfl)
|
||||
struct xchk_ag *sa)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
int error;
|
||||
|
||||
error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi);
|
||||
sa->agno = agno;
|
||||
|
||||
error = xfs_ialloc_read_agi(mp, sc->tp, agno, &sa->agi_bp);
|
||||
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
|
||||
goto out;
|
||||
|
||||
error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, agf);
|
||||
error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &sa->agf_bp);
|
||||
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
|
||||
goto out;
|
||||
|
||||
error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl);
|
||||
error = xfs_alloc_read_agfl(mp, sc->tp, agno, &sa->agfl_bp);
|
||||
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
|
||||
goto out;
|
||||
error = 0;
|
||||
|
@ -452,7 +452,7 @@ xchk_ag_btcur_free(
|
|||
}
|
||||
|
||||
/* Initialize all the btree cursors for an AG. */
|
||||
int
|
||||
void
|
||||
xchk_ag_btcur_init(
|
||||
struct xfs_scrub *sc,
|
||||
struct xchk_ag *sa)
|
||||
|
@ -502,8 +502,6 @@ xchk_ag_btcur_init(
|
|||
sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
|
||||
sa->agf_bp, agno);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Release the AG header context and btree cursors. */
|
||||
|
@ -547,13 +545,12 @@ xchk_ag_init(
|
|||
{
|
||||
int error;
|
||||
|
||||
sa->agno = agno;
|
||||
error = xchk_ag_read_headers(sc, agno, &sa->agi_bp,
|
||||
&sa->agf_bp, &sa->agfl_bp);
|
||||
error = xchk_ag_read_headers(sc, agno, sa);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return xchk_ag_btcur_init(sc, sa);
|
||||
xchk_ag_btcur_init(sc, sa);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -596,8 +593,7 @@ xchk_trans_alloc(
|
|||
/* Set us up with a transaction and an empty context. */
|
||||
int
|
||||
xchk_setup_fs(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
uint resblks;
|
||||
|
||||
|
@ -609,7 +605,6 @@ xchk_setup_fs(
|
|||
int
|
||||
xchk_setup_ag_btree(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
bool force_log)
|
||||
{
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
|
@ -627,7 +622,7 @@ xchk_setup_ag_btree(
|
|||
return error;
|
||||
}
|
||||
|
||||
error = xchk_setup_fs(sc, ip);
|
||||
error = xchk_setup_fs(sc);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -655,11 +650,11 @@ xchk_checkpoint_log(
|
|||
*/
|
||||
int
|
||||
xchk_get_inode(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip_in)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xfs_imap imap;
|
||||
struct xfs_mount *mp = sc->mp;
|
||||
struct xfs_inode *ip_in = XFS_I(file_inode(sc->file));
|
||||
struct xfs_inode *ip = NULL;
|
||||
int error;
|
||||
|
||||
|
@ -720,12 +715,11 @@ xchk_get_inode(
|
|||
int
|
||||
xchk_setup_inode_contents(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip,
|
||||
unsigned int resblks)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xchk_get_inode(sc, ip);
|
||||
error = xchk_get_inode(sc);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -821,7 +815,7 @@ xchk_metadata_inode_forks(
|
|||
return 0;
|
||||
|
||||
/* Metadata inodes don't live on the rt device. */
|
||||
if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
|
||||
if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) {
|
||||
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -72,66 +72,52 @@ bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
|
|||
struct xfs_btree_cur **curpp);
|
||||
|
||||
/* Setup functions */
|
||||
int xchk_setup_fs(struct xfs_scrub *sc, struct xfs_inode *ip);
|
||||
int xchk_setup_ag_allocbt(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_ag_iallocbt(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_ag_rmapbt(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_ag_refcountbt(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_inode(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_inode_bmap(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_inode_bmap_data(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_directory(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_xattr(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_symlink(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_parent(struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip);
|
||||
int xchk_setup_fs(struct xfs_scrub *sc);
|
||||
int xchk_setup_ag_allocbt(struct xfs_scrub *sc);
|
||||
int xchk_setup_ag_iallocbt(struct xfs_scrub *sc);
|
||||
int xchk_setup_ag_rmapbt(struct xfs_scrub *sc);
|
||||
int xchk_setup_ag_refcountbt(struct xfs_scrub *sc);
|
||||
int xchk_setup_inode(struct xfs_scrub *sc);
|
||||
int xchk_setup_inode_bmap(struct xfs_scrub *sc);
|
||||
int xchk_setup_inode_bmap_data(struct xfs_scrub *sc);
|
||||
int xchk_setup_directory(struct xfs_scrub *sc);
|
||||
int xchk_setup_xattr(struct xfs_scrub *sc);
|
||||
int xchk_setup_symlink(struct xfs_scrub *sc);
|
||||
int xchk_setup_parent(struct xfs_scrub *sc);
|
||||
#ifdef CONFIG_XFS_RT
|
||||
int xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip);
|
||||
int xchk_setup_rt(struct xfs_scrub *sc);
|
||||
#else
|
||||
static inline int
|
||||
xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip)
|
||||
xchk_setup_rt(struct xfs_scrub *sc)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_XFS_QUOTA
|
||||
int xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip);
|
||||
int xchk_setup_quota(struct xfs_scrub *sc);
|
||||
#else
|
||||
static inline int
|
||||
xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip)
|
||||
xchk_setup_quota(struct xfs_scrub *sc)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
#endif
|
||||
int xchk_setup_fscounters(struct xfs_scrub *sc, struct xfs_inode *ip);
|
||||
int xchk_setup_fscounters(struct xfs_scrub *sc);
|
||||
|
||||
void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
|
||||
int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
|
||||
struct xchk_ag *sa);
|
||||
void xchk_perag_get(struct xfs_mount *mp, struct xchk_ag *sa);
|
||||
int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
|
||||
struct xfs_buf **agi, struct xfs_buf **agf,
|
||||
struct xfs_buf **agfl);
|
||||
struct xchk_ag *sa);
|
||||
void xchk_ag_btcur_free(struct xchk_ag *sa);
|
||||
int xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
|
||||
void xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
|
||||
int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
|
||||
const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
|
||||
|
||||
int xchk_setup_ag_btree(struct xfs_scrub *sc, struct xfs_inode *ip,
|
||||
bool force_log);
|
||||
int xchk_get_inode(struct xfs_scrub *sc, struct xfs_inode *ip_in);
|
||||
int xchk_setup_inode_contents(struct xfs_scrub *sc, struct xfs_inode *ip,
|
||||
unsigned int resblks);
|
||||
int xchk_setup_ag_btree(struct xfs_scrub *sc, bool force_log);
|
||||
int xchk_get_inode(struct xfs_scrub *sc);
|
||||
int xchk_setup_inode_contents(struct xfs_scrub *sc, unsigned int resblks);
|
||||
void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
|
||||
|
||||
/*
|
||||
|
|
|
@ -22,10 +22,9 @@
|
|||
/* Set us up to scrub directories. */
|
||||
int
|
||||
xchk_setup_directory(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
return xchk_setup_inode_contents(sc, ip, 0);
|
||||
return xchk_setup_inode_contents(sc, 0);
|
||||
}
|
||||
|
||||
/* Directories */
|
||||
|
@ -538,7 +537,7 @@ xchk_directory_leaf1_bestfree(
|
|||
* There should be as many bestfree slots as there are dir data
|
||||
* blocks that can fit under i_size.
|
||||
*/
|
||||
if (bestcount != xfs_dir2_byte_to_db(geo, sc->ip->i_d.di_size)) {
|
||||
if (bestcount != xfs_dir2_byte_to_db(geo, sc->ip->i_disk_size)) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
|
||||
goto out;
|
||||
}
|
||||
|
@ -694,15 +693,6 @@ xchk_directory_blocks(
|
|||
/* Iterate all the data extents in the directory... */
|
||||
found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
|
||||
while (found && !(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) {
|
||||
/* Block directories only have a single block at offset 0. */
|
||||
if (is_block &&
|
||||
(got.br_startoff > 0 ||
|
||||
got.br_blockcount != args.geo->fsbcount)) {
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
|
||||
got.br_startoff);
|
||||
break;
|
||||
}
|
||||
|
||||
/* No more data blocks... */
|
||||
if (got.br_startoff >= leaf_lblk)
|
||||
break;
|
||||
|
@ -817,7 +807,7 @@ xchk_directory(
|
|||
return -ENOENT;
|
||||
|
||||
/* Plausible size? */
|
||||
if (sc->ip->i_d.di_size < xfs_dir2_sf_hdr_size(0)) {
|
||||
if (sc->ip->i_disk_size < xfs_dir2_sf_hdr_size(0)) {
|
||||
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
|
||||
goto out;
|
||||
}
|
||||
|
@ -843,7 +833,7 @@ xchk_directory(
|
|||
* Userspace usually asks for a 32k buffer, so we will too.
|
||||
*/
|
||||
bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE,
|
||||
sc->ip->i_d.di_size);
|
||||
sc->ip->i_disk_size);
|
||||
|
||||
/*
|
||||
* Look up every name in this directory by hash.
|
||||
|
|
|
@ -116,8 +116,7 @@ xchk_fscount_warmup(
|
|||
|
||||
int
|
||||
xchk_setup_fscounters(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
struct xchk_fscounters *fsc;
|
||||
int error;
|
||||
|
|
|
@ -133,7 +133,8 @@ xchk_update_health(
|
|||
if (!sc->sick_mask)
|
||||
return;
|
||||
|
||||
bad = (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT);
|
||||
bad = (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
|
||||
XFS_SCRUB_OFLAG_XCORRUPT));
|
||||
switch (type_to_health_flag[sc->sm->sm_type].group) {
|
||||
case XHG_AG:
|
||||
pag = xfs_perag_get(sc->mp, sc->sm->sm_agno);
|
||||
|
|
|
@ -29,10 +29,9 @@
|
|||
*/
|
||||
int
|
||||
xchk_setup_ag_iallocbt(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
return xchk_setup_ag_btree(sc, ip, sc->flags & XCHK_TRY_HARDER);
|
||||
return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER);
|
||||
}
|
||||
|
||||
/* Inode btree scrubber. */
|
||||
|
@ -212,7 +211,6 @@ xchk_iallocbt_check_cluster(
|
|||
{
|
||||
struct xfs_imap imap;
|
||||
struct xfs_mount *mp = bs->cur->bc_mp;
|
||||
struct xfs_dinode *dip;
|
||||
struct xfs_buf *cluster_bp;
|
||||
unsigned int nr_inodes;
|
||||
xfs_agnumber_t agno = bs->cur->bc_ag.agno;
|
||||
|
@ -278,7 +276,7 @@ xchk_iallocbt_check_cluster(
|
|||
&XFS_RMAP_OINFO_INODES);
|
||||
|
||||
/* Grab the inode cluster buffer. */
|
||||
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp, 0);
|
||||
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp);
|
||||
if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
|
||||
return error;
|
||||
|
||||
|
|
|
@ -28,8 +28,7 @@
|
|||
*/
|
||||
int
|
||||
xchk_setup_inode(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
|
@ -37,7 +36,7 @@ xchk_setup_inode(
|
|||
* Try to get the inode. If the verifiers fail, we try again
|
||||
* in raw mode.
|
||||
*/
|
||||
error = xchk_get_inode(sc, ip);
|
||||
error = xchk_get_inode(sc);
|
||||
switch (error) {
|
||||
case 0:
|
||||
break;
|
||||
|
|
|
@ -20,10 +20,9 @@
|
|||
/* Set us up to scrub parents. */
|
||||
int
|
||||
xchk_setup_parent(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
return xchk_setup_inode_contents(sc, ip, 0);
|
||||
return xchk_setup_inode_contents(sc, 0);
|
||||
}
|
||||
|
||||
/* Parent pointers */
|
||||
|
@ -102,7 +101,7 @@ xchk_parent_count_parent_dentries(
|
|||
* scanned.
|
||||
*/
|
||||
bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE,
|
||||
parent->i_d.di_size);
|
||||
parent->i_disk_size);
|
||||
oldpos = 0;
|
||||
while (true) {
|
||||
error = xfs_readdir(sc->tp, parent, &spc.dc, bufsize);
|
||||
|
|
|
@ -37,8 +37,7 @@ xchk_quota_to_dqtype(
|
|||
/* Set us up to scrub a quota. */
|
||||
int
|
||||
xchk_setup_quota(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
xfs_dqtype_t dqtype;
|
||||
int error;
|
||||
|
@ -53,7 +52,7 @@ xchk_setup_quota(
|
|||
mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
|
||||
if (!xfs_this_quota_on(sc->mp, dqtype))
|
||||
return -ENOENT;
|
||||
error = xchk_setup_fs(sc, ip);
|
||||
error = xchk_setup_fs(sc);
|
||||
if (error)
|
||||
return error;
|
||||
sc->ip = xfs_quota_inode(sc->mp, dqtype);
|
||||
|
@ -85,7 +84,7 @@ xchk_quota_item(
|
|||
int error = 0;
|
||||
|
||||
if (xchk_should_terminate(sc, &error))
|
||||
return error;
|
||||
return -ECANCELED;
|
||||
|
||||
/*
|
||||
* Except for the root dquot, the actual dquot we got must either have
|
||||
|
@ -162,7 +161,7 @@ xchk_quota_item(
|
|||
|
||||
out:
|
||||
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
||||
return -EFSCORRUPTED;
|
||||
return -ECANCELED;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -238,6 +237,8 @@ xchk_quota(
|
|||
error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
|
||||
sc->ilock_flags = XFS_ILOCK_EXCL;
|
||||
xfs_ilock(sc->ip, sc->ilock_flags);
|
||||
if (error == -ECANCELED)
|
||||
error = 0;
|
||||
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
|
||||
sqi.last_id * qi->qi_dqperchunk, &error))
|
||||
goto out;
|
||||
|
|
|
@ -19,10 +19,9 @@
|
|||
*/
|
||||
int
|
||||
xchk_setup_ag_refcountbt(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
return xchk_setup_ag_btree(sc, ip, false);
|
||||
return xchk_setup_ag_btree(sc, false);
|
||||
}
|
||||
|
||||
/* Reference count btree scrubber. */
|
||||
|
|
|
@ -37,19 +37,18 @@
|
|||
*/
|
||||
int
|
||||
xrep_attempt(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
trace_xrep_attempt(ip, sc->sm, error);
|
||||
trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error);
|
||||
|
||||
xchk_ag_btcur_free(&sc->sa);
|
||||
|
||||
/* Repair whatever's broken. */
|
||||
ASSERT(sc->ops->repair);
|
||||
error = sc->ops->repair(sc);
|
||||
trace_xrep_done(ip, sc->sm, error);
|
||||
trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error);
|
||||
switch (error) {
|
||||
case 0:
|
||||
/*
|
||||
|
@ -207,7 +206,11 @@ xrep_calc_ag_resblks(
|
|||
|
||||
/* Now grab the block counters from the AGF. */
|
||||
error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp);
|
||||
if (!error) {
|
||||
if (error) {
|
||||
aglen = xfs_ag_block_count(mp, sm->sm_agno);
|
||||
freelen = aglen;
|
||||
usedlen = aglen;
|
||||
} else {
|
||||
struct xfs_agf *agf = bp->b_addr;
|
||||
|
||||
aglen = be32_to_cpu(agf->agf_length);
|
||||
|
|
|
@ -17,7 +17,7 @@ static inline int xrep_notsupported(struct xfs_scrub *sc)
|
|||
|
||||
/* Repair helpers */
|
||||
|
||||
int xrep_attempt(struct xfs_inode *ip, struct xfs_scrub *sc);
|
||||
int xrep_attempt(struct xfs_scrub *sc);
|
||||
void xrep_failure(struct xfs_mount *mp);
|
||||
int xrep_roll_ag_trans(struct xfs_scrub *sc);
|
||||
bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks,
|
||||
|
@ -64,8 +64,8 @@ int xrep_agi(struct xfs_scrub *sc);
|
|||
|
||||
#else
|
||||
|
||||
static inline int xrep_attempt(
|
||||
struct xfs_inode *ip,
|
||||
static inline int
|
||||
xrep_attempt(
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -21,10 +21,9 @@
|
|||
*/
|
||||
int
|
||||
xchk_setup_ag_rmapbt(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
return xchk_setup_ag_btree(sc, ip, false);
|
||||
return xchk_setup_ag_btree(sc, false);
|
||||
}
|
||||
|
||||
/* Reverse-mapping scrubber. */
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
/* Set us up with the realtime metadata locked. */
|
||||
int
|
||||
xchk_setup_rt(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = xchk_setup_fs(sc, ip);
|
||||
error = xchk_setup_fs(sc);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -100,7 +99,7 @@ xchk_rtbitmap(
|
|||
int error;
|
||||
|
||||
/* Is the size of the rtbitmap correct? */
|
||||
if (sc->mp->m_rbmip->i_d.di_size !=
|
||||
if (sc->mp->m_rbmip->i_disk_size !=
|
||||
XFS_FSB_TO_B(sc->mp, sc->mp->m_sb.sb_rbmblocks)) {
|
||||
xchk_ino_set_corrupt(sc, sc->mp->m_rbmip->i_ino);
|
||||
return 0;
|
||||
|
|
|
@ -149,9 +149,10 @@ xchk_probe(
|
|||
STATIC int
|
||||
xchk_teardown(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip_in,
|
||||
int error)
|
||||
{
|
||||
struct xfs_inode *ip_in = XFS_I(file_inode(sc->file));
|
||||
|
||||
xchk_ag_free(sc, &sc->sa);
|
||||
if (sc->tp) {
|
||||
if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
|
||||
|
@ -168,7 +169,8 @@ xchk_teardown(
|
|||
xfs_irele(sc->ip);
|
||||
sc->ip = NULL;
|
||||
}
|
||||
sb_end_write(sc->mp->m_super);
|
||||
if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
|
||||
mnt_drop_write_file(sc->file);
|
||||
if (sc->flags & XCHK_REAPING_DISABLED)
|
||||
xchk_start_reaping(sc);
|
||||
if (sc->flags & XCHK_HAS_QUOTAOFFLOCK) {
|
||||
|
@ -456,23 +458,25 @@ static inline void xchk_postmortem(struct xfs_scrub *sc)
|
|||
/* Dispatch metadata scrubbing. */
|
||||
int
|
||||
xfs_scrub_metadata(
|
||||
struct xfs_inode *ip,
|
||||
struct file *file,
|
||||
struct xfs_scrub_metadata *sm)
|
||||
{
|
||||
struct xfs_scrub sc = {
|
||||
.mp = ip->i_mount,
|
||||
.file = file,
|
||||
.sm = sm,
|
||||
.sa = {
|
||||
.agno = NULLAGNUMBER,
|
||||
},
|
||||
};
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount;
|
||||
int error = 0;
|
||||
|
||||
sc.mp = mp;
|
||||
|
||||
BUILD_BUG_ON(sizeof(meta_scrub_ops) !=
|
||||
(sizeof(struct xchk_meta_ops) * XFS_SCRUB_TYPE_NR));
|
||||
|
||||
trace_xchk_start(ip, sm, error);
|
||||
trace_xchk_start(XFS_I(file_inode(file)), sm, error);
|
||||
|
||||
/* Forbidden if we are shut down or mounted norecovery. */
|
||||
error = -ESHUTDOWN;
|
||||
|
@ -492,15 +496,17 @@ xfs_scrub_metadata(
|
|||
sc.sick_mask = xchk_health_mask_for_scrub_type(sm->sm_type);
|
||||
retry_op:
|
||||
/*
|
||||
* If freeze runs concurrently with a scrub, the freeze can be delayed
|
||||
* indefinitely as we walk the filesystem and iterate over metadata
|
||||
* buffers. Freeze quiesces the log (which waits for the buffer LRU to
|
||||
* be emptied) and that won't happen while checking is running.
|
||||
* When repairs are allowed, prevent freezing or readonly remount while
|
||||
* scrub is running with a real transaction.
|
||||
*/
|
||||
sb_start_write(mp->m_super);
|
||||
if (sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) {
|
||||
error = mnt_want_write_file(sc.file);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Set up for the operation. */
|
||||
error = sc.ops->setup(&sc, ip);
|
||||
error = sc.ops->setup(&sc);
|
||||
if (error)
|
||||
goto out_teardown;
|
||||
|
||||
|
@ -512,12 +518,12 @@ xfs_scrub_metadata(
|
|||
* Tear down everything we hold, then set up again with
|
||||
* preparation for worst-case scenarios.
|
||||
*/
|
||||
error = xchk_teardown(&sc, ip, 0);
|
||||
error = xchk_teardown(&sc, 0);
|
||||
if (error)
|
||||
goto out;
|
||||
sc.flags |= XCHK_TRY_HARDER;
|
||||
goto retry_op;
|
||||
} else if (error)
|
||||
} else if (error || (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE))
|
||||
goto out_teardown;
|
||||
|
||||
xchk_update_health(&sc);
|
||||
|
@ -546,14 +552,14 @@ xfs_scrub_metadata(
|
|||
* If it's broken, userspace wants us to fix it, and we haven't
|
||||
* already tried to fix it, then attempt a repair.
|
||||
*/
|
||||
error = xrep_attempt(ip, &sc);
|
||||
error = xrep_attempt(&sc);
|
||||
if (error == -EAGAIN) {
|
||||
/*
|
||||
* Either the repair function succeeded or it couldn't
|
||||
* get all the resources it needs; either way, we go
|
||||
* back to the beginning and call the scrub function.
|
||||
*/
|
||||
error = xchk_teardown(&sc, ip, 0);
|
||||
error = xchk_teardown(&sc, 0);
|
||||
if (error) {
|
||||
xrep_failure(mp);
|
||||
goto out;
|
||||
|
@ -565,9 +571,9 @@ xfs_scrub_metadata(
|
|||
out_nofix:
|
||||
xchk_postmortem(&sc);
|
||||
out_teardown:
|
||||
error = xchk_teardown(&sc, ip, error);
|
||||
error = xchk_teardown(&sc, error);
|
||||
out:
|
||||
trace_xchk_done(ip, sm, error);
|
||||
trace_xchk_done(XFS_I(file_inode(file)), sm, error);
|
||||
if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
|
||||
sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
|
||||
error = 0;
|
||||
|
|
|
@ -18,8 +18,7 @@ enum xchk_type {
|
|||
|
||||
struct xchk_meta_ops {
|
||||
/* Acquire whatever resources are needed for the operation. */
|
||||
int (*setup)(struct xfs_scrub *,
|
||||
struct xfs_inode *);
|
||||
int (*setup)(struct xfs_scrub *sc);
|
||||
|
||||
/* Examine metadata for errors. */
|
||||
int (*scrub)(struct xfs_scrub *);
|
||||
|
@ -59,7 +58,18 @@ struct xfs_scrub {
|
|||
struct xfs_scrub_metadata *sm;
|
||||
const struct xchk_meta_ops *ops;
|
||||
struct xfs_trans *tp;
|
||||
|
||||
/* File that scrub was called with. */
|
||||
struct file *file;
|
||||
|
||||
/*
|
||||
* File that is undergoing the scrub operation. This can differ from
|
||||
* the file that scrub was called with if we're checking file-based fs
|
||||
* metadata (e.g. rt bitmaps) or if we're doing a scrub-by-handle for
|
||||
* something that can't be opened directly (e.g. symlinks).
|
||||
*/
|
||||
struct xfs_inode *ip;
|
||||
|
||||
void *buf;
|
||||
uint ilock_flags;
|
||||
|
||||
|
|
|
@ -18,15 +18,14 @@
|
|||
/* Set us up to scrub a symbolic link. */
|
||||
int
|
||||
xchk_setup_symlink(
|
||||
struct xfs_scrub *sc,
|
||||
struct xfs_inode *ip)
|
||||
struct xfs_scrub *sc)
|
||||
{
|
||||
/* Allocate the buffer without the inode lock held. */
|
||||
sc->buf = kvzalloc(XFS_SYMLINK_MAXLEN + 1, GFP_KERNEL);
|
||||
if (!sc->buf)
|
||||
return -ENOMEM;
|
||||
|
||||
return xchk_setup_inode_contents(sc, ip, 0);
|
||||
return xchk_setup_inode_contents(sc, 0);
|
||||
}
|
||||
|
||||
/* Symbolic links. */
|
||||
|
@ -43,7 +42,7 @@ xchk_symlink(
|
|||
if (!S_ISLNK(VFS_I(ip)->i_mode))
|
||||
return -ENOENT;
|
||||
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
|
||||
len = ip->i_d.di_size;
|
||||
len = ip->i_disk_size;
|
||||
|
||||
/* Plausible size? */
|
||||
if (len > XFS_SYMLINK_MAXLEN || len <= 0) {
|
||||
|
@ -52,7 +51,7 @@ xchk_symlink(
|
|||
}
|
||||
|
||||
/* Inline symlink? */
|
||||
if (ifp->if_flags & XFS_IFINLINE) {
|
||||
if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
if (len > XFS_IFORK_DSIZE(ip) ||
|
||||
len > strnlen(ifp->if_u1.if_data, XFS_IFORK_DSIZE(ip)))
|
||||
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
#define __XFS_SCRUB_H__
|
||||
|
||||
#ifndef CONFIG_XFS_ONLINE_SCRUB
|
||||
# define xfs_scrub_metadata(ip, sm) (-ENOTTY)
|
||||
# define xfs_scrub_metadata(file, sm) (-ENOTTY)
|
||||
#else
|
||||
int xfs_scrub_metadata(struct xfs_inode *ip, struct xfs_scrub_metadata *sm);
|
||||
int xfs_scrub_metadata(struct file *file, struct xfs_scrub_metadata *sm);
|
||||
#endif /* CONFIG_XFS_ONLINE_SCRUB */
|
||||
|
||||
#endif /* __XFS_SCRUB_H__ */
|
||||
|
|
|
@ -36,47 +36,26 @@ XFS_WPC(struct iomap_writepage_ctx *ctx)
|
|||
static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
|
||||
{
|
||||
return ioend->io_offset + ioend->io_size >
|
||||
XFS_I(ioend->io_inode)->i_d.di_size;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_setfilesize_trans_alloc(
|
||||
struct iomap_ioend *ioend)
|
||||
{
|
||||
struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
|
||||
struct xfs_trans *tp;
|
||||
int error;
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
ioend->io_private = tp;
|
||||
|
||||
/*
|
||||
* We may pass freeze protection with a transaction. So tell lockdep
|
||||
* we released it.
|
||||
*/
|
||||
__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
|
||||
/*
|
||||
* We hand off the transaction to the completion thread now, so
|
||||
* clear the flag here.
|
||||
*/
|
||||
xfs_trans_clear_context(tp);
|
||||
return 0;
|
||||
XFS_I(ioend->io_inode)->i_disk_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update on-disk file size now that data has been written to disk.
|
||||
*/
|
||||
STATIC int
|
||||
__xfs_setfilesize(
|
||||
int
|
||||
xfs_setfilesize(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_trans *tp,
|
||||
xfs_off_t offset,
|
||||
size_t size)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_trans *tp;
|
||||
xfs_fsize_t isize;
|
||||
int error;
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
isize = xfs_new_eof(ip, offset + size);
|
||||
|
@ -88,55 +67,13 @@ __xfs_setfilesize(
|
|||
|
||||
trace_xfs_setfilesize(ip, offset, size);
|
||||
|
||||
ip->i_d.di_size = isize;
|
||||
ip->i_disk_size = isize;
|
||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
return xfs_trans_commit(tp);
|
||||
}
|
||||
|
||||
int
|
||||
xfs_setfilesize(
|
||||
struct xfs_inode *ip,
|
||||
xfs_off_t offset,
|
||||
size_t size)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_trans *tp;
|
||||
int error;
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return __xfs_setfilesize(ip, tp, offset, size);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_setfilesize_ioend(
|
||||
struct iomap_ioend *ioend,
|
||||
int error)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(ioend->io_inode);
|
||||
struct xfs_trans *tp = ioend->io_private;
|
||||
|
||||
/*
|
||||
* The transaction may have been allocated in the I/O submission thread,
|
||||
* thus we need to mark ourselves as being in a transaction manually.
|
||||
* Similarly for freeze protection.
|
||||
*/
|
||||
xfs_trans_set_context(tp);
|
||||
__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
|
||||
|
||||
/* we abort the update if there was an IO error */
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp);
|
||||
return error;
|
||||
}
|
||||
|
||||
return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
|
||||
}
|
||||
|
||||
/*
|
||||
* IO write completion.
|
||||
*/
|
||||
|
@ -158,7 +95,7 @@ xfs_end_ioend(
|
|||
nofs_flag = memalloc_nofs_save();
|
||||
|
||||
/*
|
||||
* Just clean up the in-memory strutures if the fs has been shut down.
|
||||
* Just clean up the in-memory structures if the fs has been shut down.
|
||||
*/
|
||||
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
|
||||
error = -EIO;
|
||||
|
@ -182,35 +119,14 @@ xfs_end_ioend(
|
|||
error = xfs_reflink_end_cow(ip, offset, size);
|
||||
else if (ioend->io_type == IOMAP_UNWRITTEN)
|
||||
error = xfs_iomap_write_unwritten(ip, offset, size, false);
|
||||
else
|
||||
ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
|
||||
|
||||
if (!error && xfs_ioend_is_append(ioend))
|
||||
error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
|
||||
done:
|
||||
if (ioend->io_private)
|
||||
error = xfs_setfilesize_ioend(ioend, error);
|
||||
iomap_finish_ioends(ioend, error);
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the to be merged ioend has a preallocated transaction for file
|
||||
* size updates we need to ensure the ioend it is merged into also
|
||||
* has one. If it already has one we can simply cancel the transaction
|
||||
* as it is guaranteed to be clean.
|
||||
*/
|
||||
static void
|
||||
xfs_ioend_merge_private(
|
||||
struct iomap_ioend *ioend,
|
||||
struct iomap_ioend *next)
|
||||
{
|
||||
if (!ioend->io_private) {
|
||||
ioend->io_private = next->io_private;
|
||||
next->io_private = NULL;
|
||||
} else {
|
||||
xfs_setfilesize_ioend(next, -ECANCELED);
|
||||
}
|
||||
}
|
||||
|
||||
/* Finish all pending io completions. */
|
||||
void
|
||||
xfs_end_io(
|
||||
|
@ -230,18 +146,11 @@ xfs_end_io(
|
|||
while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
|
||||
io_list))) {
|
||||
list_del_init(&ioend->io_list);
|
||||
iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
|
||||
iomap_ioend_try_merge(ioend, &tmp, NULL);
|
||||
xfs_end_ioend(ioend);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
|
||||
{
|
||||
return ioend->io_private ||
|
||||
ioend->io_type == IOMAP_UNWRITTEN ||
|
||||
(ioend->io_flags & IOMAP_F_SHARED);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_end_bio(
|
||||
struct bio *bio)
|
||||
|
@ -250,8 +159,6 @@ xfs_end_bio(
|
|||
struct xfs_inode *ip = XFS_I(ioend->io_inode);
|
||||
unsigned long flags;
|
||||
|
||||
ASSERT(xfs_ioend_needs_workqueue(ioend));
|
||||
|
||||
spin_lock_irqsave(&ip->i_ioend_lock, flags);
|
||||
if (list_empty(&ip->i_ioend_list))
|
||||
WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
|
||||
|
@ -384,8 +291,7 @@ xfs_map_blocks(
|
|||
cow_fsb = NULLFILEOFF;
|
||||
whichfork = XFS_DATA_FORK;
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
|
||||
(ip->i_df.if_flags & XFS_IFEXTENTS));
|
||||
ASSERT(!xfs_need_iread_extents(&ip->i_df));
|
||||
|
||||
/*
|
||||
* Check if this is offset is covered by a COW extents, and if yes use
|
||||
|
@ -501,17 +407,11 @@ xfs_prepare_ioend(
|
|||
ioend->io_offset, ioend->io_size);
|
||||
}
|
||||
|
||||
/* Reserve log space if we might write beyond the on-disk inode size. */
|
||||
if (!status &&
|
||||
((ioend->io_flags & IOMAP_F_SHARED) ||
|
||||
ioend->io_type != IOMAP_UNWRITTEN) &&
|
||||
xfs_ioend_is_append(ioend) &&
|
||||
!ioend->io_private)
|
||||
status = xfs_setfilesize_trans_alloc(ioend);
|
||||
|
||||
memalloc_nofs_restore(nofs_flag);
|
||||
|
||||
if (xfs_ioend_needs_workqueue(ioend))
|
||||
/* send ioends that might require a transaction to the completion wq */
|
||||
if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN ||
|
||||
(ioend->io_flags & IOMAP_F_SHARED))
|
||||
ioend->io_bio->bi_end_io = xfs_end_bio;
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -514,7 +514,7 @@ xfs_attr_list_ilocked(
|
|||
return 0;
|
||||
if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
|
||||
return xfs_attr_shortform_list(context);
|
||||
if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
|
||||
if (xfs_attr_is_leaf(dp))
|
||||
return xfs_attr_leaf_list(context);
|
||||
return xfs_attr_node_list(context);
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ xfs_bmap_rtalloc(
|
|||
ap->blkno *= mp->m_sb.sb_rextsize;
|
||||
ralen *= mp->m_sb.sb_rextsize;
|
||||
ap->length = ralen;
|
||||
ap->ip->i_d.di_nblocks += ralen;
|
||||
ap->ip->i_nblocks += ralen;
|
||||
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
|
||||
if (ap->wasdel)
|
||||
ap->ip->i_delayed_blks -= ralen;
|
||||
|
@ -225,11 +225,9 @@ xfs_bmap_count_blocks(
|
|||
|
||||
switch (ifp->if_format) {
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
||||
error = xfs_btree_count_blocks(cur, &btblocks);
|
||||
|
@ -423,7 +421,7 @@ xfs_getbmap(
|
|||
break;
|
||||
case XFS_DATA_FORK:
|
||||
if (!(iflags & BMV_IF_DELALLOC) &&
|
||||
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
|
||||
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
|
||||
error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
|
||||
if (error)
|
||||
goto out_unlock_iolock;
|
||||
|
@ -439,7 +437,7 @@ xfs_getbmap(
|
|||
}
|
||||
|
||||
if (xfs_get_extsz_hint(ip) ||
|
||||
(ip->i_d.di_flags &
|
||||
(ip->i_diflags &
|
||||
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
|
||||
max_len = mp->m_super->s_maxbytes;
|
||||
else
|
||||
|
@ -471,11 +469,9 @@ xfs_getbmap(
|
|||
first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
|
||||
len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(NULL, ip, whichfork);
|
||||
if (error)
|
||||
goto out_unlock_ilock;
|
||||
}
|
||||
error = xfs_iread_extents(NULL, ip, whichfork);
|
||||
if (error)
|
||||
goto out_unlock_ilock;
|
||||
|
||||
if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
|
||||
/*
|
||||
|
@ -558,7 +554,7 @@ xfs_bmap_punch_delalloc_range(
|
|||
struct xfs_iext_cursor icur;
|
||||
int error = 0;
|
||||
|
||||
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
|
||||
ASSERT(!xfs_need_iread_extents(ifp));
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
|
||||
|
@ -597,8 +593,24 @@ xfs_bmap_punch_delalloc_range(
|
|||
* regular files that are marked preallocated or append-only.
|
||||
*/
|
||||
bool
|
||||
xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
|
||||
xfs_can_free_eofblocks(
|
||||
struct xfs_inode *ip,
|
||||
bool force)
|
||||
{
|
||||
struct xfs_bmbt_irec imap;
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
xfs_fileoff_t end_fsb;
|
||||
xfs_fileoff_t last_fsb;
|
||||
int nimaps = 1;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Caller must either hold the exclusive io lock; or be inactivating
|
||||
* the inode, which guarantees there are no other users of the inode.
|
||||
*/
|
||||
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
|
||||
(VFS_I(ip)->i_state & I_FREEING));
|
||||
|
||||
/* prealloc/delalloc exists only on regular files */
|
||||
if (!S_ISREG(VFS_I(ip)->i_mode))
|
||||
return false;
|
||||
|
@ -613,18 +625,43 @@ xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
|
|||
return false;
|
||||
|
||||
/* If we haven't read in the extent list, then don't do it now. */
|
||||
if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
|
||||
if (xfs_need_iread_extents(&ip->i_df))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Do not free real preallocated or append-only files unless the file
|
||||
* has delalloc blocks and we are forced to remove them.
|
||||
*/
|
||||
if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
|
||||
if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
|
||||
if (!force || ip->i_delayed_blks == 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
/*
|
||||
* Do not try to free post-EOF blocks if EOF is beyond the end of the
|
||||
* range supported by the page cache, because the truncation will loop
|
||||
* forever.
|
||||
*/
|
||||
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
|
||||
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
||||
if (last_fsb <= end_fsb)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Look up the mapping for the first block past EOF. If we can't find
|
||||
* it, there's nothing to free.
|
||||
*/
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
|
||||
0);
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
if (error || nimaps == 0)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If there's a real mapping there or there are delayed allocation
|
||||
* reservations, then we have post-EOF blocks to try to free.
|
||||
*/
|
||||
return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -637,78 +674,52 @@ xfs_free_eofblocks(
|
|||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_trans *tp;
|
||||
int error;
|
||||
xfs_fileoff_t end_fsb;
|
||||
xfs_fileoff_t last_fsb;
|
||||
xfs_filblks_t map_len;
|
||||
int nimaps;
|
||||
struct xfs_bmbt_irec imap;
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* Figure out if there are any blocks beyond the end
|
||||
* of the file. If not, then there is nothing to do.
|
||||
*/
|
||||
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
|
||||
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
|
||||
if (last_fsb <= end_fsb)
|
||||
return 0;
|
||||
map_len = last_fsb - end_fsb;
|
||||
/* Attach the dquots to the inode up front. */
|
||||
error = xfs_qm_dqattach(ip);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
nimaps = 1;
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
/* Wait on dio to ensure i_size has settled. */
|
||||
inode_dio_wait(VFS_I(ip));
|
||||
|
||||
/*
|
||||
* If there are blocks after the end of file, truncate the file to its
|
||||
* current size to free them up.
|
||||
*/
|
||||
if (!error && (nimaps != 0) &&
|
||||
(imap.br_startblock != HOLESTARTBLOCK ||
|
||||
ip->i_delayed_blks)) {
|
||||
/*
|
||||
* Attach the dquots to the inode up front.
|
||||
*/
|
||||
error = xfs_qm_dqattach(ip);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* wait on dio to ensure i_size has settled */
|
||||
inode_dio_wait(VFS_I(ip));
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
|
||||
&tp);
|
||||
if (error) {
|
||||
ASSERT(XFS_FORCED_SHUTDOWN(mp));
|
||||
return error;
|
||||
}
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
/*
|
||||
* Do not update the on-disk file size. If we update the
|
||||
* on-disk file size and then the system crashes before the
|
||||
* contents of the file are flushed to disk then the files
|
||||
* may be full of holes (ie NULL files bug).
|
||||
*/
|
||||
error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
|
||||
XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
|
||||
if (error) {
|
||||
/*
|
||||
* If we get an error at this point we simply don't
|
||||
* bother truncating the file.
|
||||
*/
|
||||
xfs_trans_cancel(tp);
|
||||
} else {
|
||||
error = xfs_trans_commit(tp);
|
||||
if (!error)
|
||||
xfs_inode_clear_eofblocks_tag(ip);
|
||||
}
|
||||
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
|
||||
if (error) {
|
||||
ASSERT(XFS_FORCED_SHUTDOWN(mp));
|
||||
return error;
|
||||
}
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
/*
|
||||
* Do not update the on-disk file size. If we update the on-disk file
|
||||
* size and then the system crashes before the contents of the file are
|
||||
* flushed to disk then the files may be full of holes (ie NULL files
|
||||
* bug).
|
||||
*/
|
||||
error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
|
||||
XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
|
||||
if (error)
|
||||
goto err_cancel;
|
||||
|
||||
error = xfs_trans_commit(tp);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
xfs_inode_clear_eofblocks_tag(ip);
|
||||
goto out_unlock;
|
||||
|
||||
err_cancel:
|
||||
/*
|
||||
* If we get an error at this point we simply don't
|
||||
* bother truncating the file.
|
||||
*/
|
||||
xfs_trans_cancel(tp);
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1208,7 +1219,7 @@ xfs_swap_extents_check_format(
|
|||
if (XFS_IS_QUOTA_ON(ip->i_mount) &&
|
||||
(!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
|
||||
!gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
|
||||
ip->i_d.di_projid != tip->i_d.di_projid))
|
||||
ip->i_projid != tip->i_projid))
|
||||
return -EINVAL;
|
||||
|
||||
/* Should never get a local format */
|
||||
|
@ -1323,9 +1334,9 @@ xfs_swap_extent_rmap(
|
|||
* rmap functions when we go to fix up the rmaps. The flags
|
||||
* will be switch for reals later.
|
||||
*/
|
||||
tip_flags2 = tip->i_d.di_flags2;
|
||||
if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
|
||||
tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
|
||||
tip_flags2 = tip->i_diflags2;
|
||||
if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
|
||||
tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
|
||||
|
||||
offset_fsb = 0;
|
||||
end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
|
||||
|
@ -1412,12 +1423,12 @@ xfs_swap_extent_rmap(
|
|||
offset_fsb += ilen;
|
||||
}
|
||||
|
||||
tip->i_d.di_flags2 = tip_flags2;
|
||||
tip->i_diflags2 = tip_flags2;
|
||||
return 0;
|
||||
|
||||
out:
|
||||
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
|
||||
tip->i_d.di_flags2 = tip_flags2;
|
||||
tip->i_diflags2 = tip_flags2;
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1476,9 +1487,9 @@ xfs_swap_extent_forks(
|
|||
/*
|
||||
* Fix the on-disk inode values
|
||||
*/
|
||||
tmp = (uint64_t)ip->i_d.di_nblocks;
|
||||
ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
|
||||
tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
|
||||
tmp = (uint64_t)ip->i_nblocks;
|
||||
ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
|
||||
tip->i_nblocks = tmp + taforkblks - aforkblks;
|
||||
|
||||
/*
|
||||
* The extents in the source inode could still contain speculative
|
||||
|
@ -1663,8 +1674,8 @@ xfs_swap_extents(
|
|||
|
||||
/* Verify all data are being swapped */
|
||||
if (sxp->sx_offset != 0 ||
|
||||
sxp->sx_length != ip->i_d.di_size ||
|
||||
sxp->sx_length != tip->i_d.di_size) {
|
||||
sxp->sx_length != ip->i_disk_size ||
|
||||
sxp->sx_length != tip->i_disk_size) {
|
||||
error = -EFAULT;
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
@ -1715,13 +1726,13 @@ xfs_swap_extents(
|
|||
goto out_trans_cancel;
|
||||
|
||||
/* Do we have to swap reflink flags? */
|
||||
if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
|
||||
(tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
|
||||
f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
|
||||
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
|
||||
tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
|
||||
if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
|
||||
(tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
|
||||
f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
|
||||
ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
|
||||
tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
|
||||
}
|
||||
|
||||
/* Swap the cow forks. */
|
||||
|
|
|
@ -55,6 +55,24 @@ xfs_buf_log_format_size(
|
|||
(blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
xfs_buf_item_straddle(
|
||||
struct xfs_buf *bp,
|
||||
uint offset,
|
||||
int first_bit,
|
||||
int nbits)
|
||||
{
|
||||
void *first, *last;
|
||||
|
||||
first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
|
||||
last = xfs_buf_offset(bp,
|
||||
offset + ((first_bit + nbits) << XFS_BLF_SHIFT));
|
||||
|
||||
if (last - first != nbits * XFS_BLF_CHUNK)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* This returns the number of log iovecs needed to log the
|
||||
* given buf log item.
|
||||
|
@ -69,24 +87,56 @@ STATIC void
|
|||
xfs_buf_item_size_segment(
|
||||
struct xfs_buf_log_item *bip,
|
||||
struct xfs_buf_log_format *blfp,
|
||||
uint offset,
|
||||
int *nvecs,
|
||||
int *nbytes)
|
||||
{
|
||||
struct xfs_buf *bp = bip->bli_buf;
|
||||
int first_bit;
|
||||
int nbits;
|
||||
int next_bit;
|
||||
int last_bit;
|
||||
|
||||
last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
|
||||
if (last_bit == -1)
|
||||
first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
|
||||
if (first_bit == -1)
|
||||
return;
|
||||
|
||||
/*
|
||||
* initial count for a dirty buffer is 2 vectors - the format structure
|
||||
* and the first dirty region.
|
||||
*/
|
||||
*nvecs += 2;
|
||||
*nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
|
||||
(*nvecs)++;
|
||||
*nbytes += xfs_buf_log_format_size(blfp);
|
||||
|
||||
do {
|
||||
nbits = xfs_contig_bits(blfp->blf_data_map,
|
||||
blfp->blf_map_size, first_bit);
|
||||
ASSERT(nbits > 0);
|
||||
|
||||
/*
|
||||
* Straddling a page is rare because we don't log contiguous
|
||||
* chunks of unmapped buffers anywhere.
|
||||
*/
|
||||
if (nbits > 1 &&
|
||||
xfs_buf_item_straddle(bp, offset, first_bit, nbits))
|
||||
goto slow_scan;
|
||||
|
||||
(*nvecs)++;
|
||||
*nbytes += nbits * XFS_BLF_CHUNK;
|
||||
|
||||
/*
|
||||
* This takes the bit number to start looking from and
|
||||
* returns the next set bit from there. It returns -1
|
||||
* if there are no more bits set or the start bit is
|
||||
* beyond the end of the bitmap.
|
||||
*/
|
||||
first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
|
||||
(uint)first_bit + nbits + 1);
|
||||
} while (first_bit != -1);
|
||||
|
||||
return;
|
||||
|
||||
slow_scan:
|
||||
/* Count the first bit we jumped out of the above loop from */
|
||||
(*nvecs)++;
|
||||
*nbytes += XFS_BLF_CHUNK;
|
||||
last_bit = first_bit;
|
||||
while (last_bit != -1) {
|
||||
/*
|
||||
* This takes the bit number to start looking from and
|
||||
|
@ -103,16 +153,15 @@ xfs_buf_item_size_segment(
|
|||
*/
|
||||
if (next_bit == -1) {
|
||||
break;
|
||||
} else if (next_bit != last_bit + 1) {
|
||||
last_bit = next_bit;
|
||||
(*nvecs)++;
|
||||
} else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
|
||||
(xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
|
||||
XFS_BLF_CHUNK)) {
|
||||
} else if (next_bit != last_bit + 1 ||
|
||||
xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
|
||||
last_bit = next_bit;
|
||||
first_bit = next_bit;
|
||||
(*nvecs)++;
|
||||
nbits = 1;
|
||||
} else {
|
||||
last_bit++;
|
||||
nbits++;
|
||||
}
|
||||
*nbytes += XFS_BLF_CHUNK;
|
||||
}
|
||||
|
@ -142,7 +191,10 @@ xfs_buf_item_size(
|
|||
int *nbytes)
|
||||
{
|
||||
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
|
||||
struct xfs_buf *bp = bip->bli_buf;
|
||||
int i;
|
||||
int bytes;
|
||||
uint offset = 0;
|
||||
|
||||
ASSERT(atomic_read(&bip->bli_refcount) > 0);
|
||||
if (bip->bli_flags & XFS_BLI_STALE) {
|
||||
|
@ -174,7 +226,7 @@ xfs_buf_item_size(
|
|||
}
|
||||
|
||||
/*
|
||||
* the vector count is based on the number of buffer vectors we have
|
||||
* The vector count is based on the number of buffer vectors we have
|
||||
* dirty bits in. This will only be greater than one when we have a
|
||||
* compound buffer with more than one segment dirty. Hence for compound
|
||||
* buffers we need to track which segment the dirty bits correspond to,
|
||||
|
@ -182,10 +234,19 @@ xfs_buf_item_size(
|
|||
* count for the extra buf log format structure that will need to be
|
||||
* written.
|
||||
*/
|
||||
bytes = 0;
|
||||
for (i = 0; i < bip->bli_format_count; i++) {
|
||||
xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
|
||||
nvecs, nbytes);
|
||||
xfs_buf_item_size_segment(bip, &bip->bli_formats[i], offset,
|
||||
nvecs, &bytes);
|
||||
offset += BBTOB(bp->b_maps[i].bm_len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Round up the buffer size required to minimise the number of memory
|
||||
* allocations that need to be done as this item grows when relogged by
|
||||
* repeated modifications.
|
||||
*/
|
||||
*nbytes = round_up(bytes, 512);
|
||||
trace_xfs_buf_item_size(bip);
|
||||
}
|
||||
|
||||
|
@ -204,18 +265,6 @@ xfs_buf_item_copy_iovec(
|
|||
nbits * XFS_BLF_CHUNK);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
xfs_buf_item_straddle(
|
||||
struct xfs_buf *bp,
|
||||
uint offset,
|
||||
int next_bit,
|
||||
int last_bit)
|
||||
{
|
||||
return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
|
||||
(xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
|
||||
XFS_BLF_CHUNK);
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_buf_item_format_segment(
|
||||
struct xfs_buf_log_item *bip,
|
||||
|
@ -268,6 +317,38 @@ xfs_buf_item_format_segment(
|
|||
/*
|
||||
* Fill in an iovec for each set of contiguous chunks.
|
||||
*/
|
||||
do {
|
||||
ASSERT(first_bit >= 0);
|
||||
nbits = xfs_contig_bits(blfp->blf_data_map,
|
||||
blfp->blf_map_size, first_bit);
|
||||
ASSERT(nbits > 0);
|
||||
|
||||
/*
|
||||
* Straddling a page is rare because we don't log contiguous
|
||||
* chunks of unmapped buffers anywhere.
|
||||
*/
|
||||
if (nbits > 1 &&
|
||||
xfs_buf_item_straddle(bp, offset, first_bit, nbits))
|
||||
goto slow_scan;
|
||||
|
||||
xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
|
||||
first_bit, nbits);
|
||||
blfp->blf_size++;
|
||||
|
||||
/*
|
||||
* This takes the bit number to start looking from and
|
||||
* returns the next set bit from there. It returns -1
|
||||
* if there are no more bits set or the start bit is
|
||||
* beyond the end of the bitmap.
|
||||
*/
|
||||
first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
|
||||
(uint)first_bit + nbits + 1);
|
||||
} while (first_bit != -1);
|
||||
|
||||
return;
|
||||
|
||||
slow_scan:
|
||||
ASSERT(bp->b_addr == NULL);
|
||||
last_bit = first_bit;
|
||||
nbits = 1;
|
||||
for (;;) {
|
||||
|
@ -292,7 +373,7 @@ xfs_buf_item_format_segment(
|
|||
blfp->blf_size++;
|
||||
break;
|
||||
} else if (next_bit != last_bit + 1 ||
|
||||
xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
|
||||
xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
|
||||
xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
|
||||
first_bit, nbits);
|
||||
blfp->blf_size++;
|
||||
|
|
|
@ -57,8 +57,8 @@ xfs_dir2_sf_getdents(
|
|||
xfs_ino_t ino;
|
||||
struct xfs_da_geometry *geo = args->geo;
|
||||
|
||||
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
|
||||
ASSERT(dp->i_df.if_format == XFS_DINODE_FMT_LOCAL);
|
||||
ASSERT(dp->i_df.if_bytes == dp->i_disk_size);
|
||||
ASSERT(dp->i_df.if_u1.if_data != NULL);
|
||||
|
||||
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
|
||||
|
@ -258,11 +258,9 @@ xfs_dir2_leaf_readbuf(
|
|||
int ra_want;
|
||||
int error = 0;
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(args->trans, dp, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
error = xfs_iread_extents(args->trans, dp, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Look for mapped directory blocks at or above the current offset.
|
||||
|
|
|
@ -748,11 +748,9 @@ xfs_dq_get_next_id(
|
|||
start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
|
||||
|
||||
lock_flags = xfs_ilock_data_map_shared(quotip);
|
||||
if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &cur, &got)) {
|
||||
/* contiguous chunk, bump startoff for the id calculation */
|
||||
|
@ -953,7 +951,7 @@ xfs_qm_id_for_quotatype(
|
|||
case XFS_DQTYPE_GROUP:
|
||||
return i_gid_read(VFS_I(ip));
|
||||
case XFS_DQTYPE_PROJ:
|
||||
return ip->i_d.di_projid;
|
||||
return ip->i_projid;
|
||||
}
|
||||
ASSERT(0);
|
||||
return 0;
|
||||
|
|
|
@ -56,6 +56,7 @@ static unsigned int xfs_errortag_random_default[] = {
|
|||
XFS_RANDOM_BUF_IOERROR,
|
||||
XFS_RANDOM_REDUCE_MAX_IEXTENTS,
|
||||
XFS_RANDOM_BMAP_ALLOC_MINLEN_EXTENT,
|
||||
XFS_RANDOM_AG_RESV_FAIL,
|
||||
};
|
||||
|
||||
struct xfs_errortag_attr {
|
||||
|
@ -168,6 +169,7 @@ XFS_ERRORTAG_ATTR_RW(iunlink_fallback, XFS_ERRTAG_IUNLINK_FALLBACK);
|
|||
XFS_ERRORTAG_ATTR_RW(buf_ioerror, XFS_ERRTAG_BUF_IOERROR);
|
||||
XFS_ERRORTAG_ATTR_RW(reduce_max_iextents, XFS_ERRTAG_REDUCE_MAX_IEXTENTS);
|
||||
XFS_ERRORTAG_ATTR_RW(bmap_alloc_minlen_extent, XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT);
|
||||
XFS_ERRORTAG_ATTR_RW(ag_resv_fail, XFS_ERRTAG_AG_RESV_FAIL);
|
||||
|
||||
static struct attribute *xfs_errortag_attrs[] = {
|
||||
XFS_ERRORTAG_ATTR_LIST(noerror),
|
||||
|
@ -208,6 +210,7 @@ static struct attribute *xfs_errortag_attrs[] = {
|
|||
XFS_ERRORTAG_ATTR_LIST(buf_ioerror),
|
||||
XFS_ERRORTAG_ATTR_LIST(reduce_max_iextents),
|
||||
XFS_ERRORTAG_ATTR_LIST(bmap_alloc_minlen_extent),
|
||||
XFS_ERRORTAG_ATTR_LIST(ag_resv_fail),
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -299,6 +302,8 @@ xfs_errortag_add(
|
|||
struct xfs_mount *mp,
|
||||
unsigned int error_tag)
|
||||
{
|
||||
BUILD_BUG_ON(ARRAY_SIZE(xfs_errortag_random_default) != XFS_ERRTAG_MAX);
|
||||
|
||||
if (error_tag >= XFS_ERRTAG_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -90,9 +90,9 @@ xfs_update_prealloc_flags(
|
|||
}
|
||||
|
||||
if (flags & XFS_PREALLOC_SET)
|
||||
ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
|
||||
ip->i_diflags |= XFS_DIFLAG_PREALLOC;
|
||||
if (flags & XFS_PREALLOC_CLEAR)
|
||||
ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
|
||||
ip->i_diflags &= ~XFS_DIFLAG_PREALLOC;
|
||||
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
if (flags & XFS_PREALLOC_SYNC)
|
||||
|
@ -1159,10 +1159,10 @@ xfs_file_remap_range(
|
|||
*/
|
||||
cowextsize = 0;
|
||||
if (pos_in == 0 && len == i_size_read(inode_in) &&
|
||||
(src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
|
||||
(src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
|
||||
pos_out == 0 && len >= i_size_read(inode_out) &&
|
||||
!(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
|
||||
cowextsize = src->i_d.di_cowextsize;
|
||||
!(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
|
||||
cowextsize = src->i_cowextsize;
|
||||
|
||||
ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
|
||||
remap_flags);
|
||||
|
@ -1244,7 +1244,7 @@ xfs_file_readdir(
|
|||
* point we can change the ->readdir prototype to include the
|
||||
* buffer size. For now we use the current glibc buffer size.
|
||||
*/
|
||||
bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
|
||||
bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
|
||||
|
||||
return xfs_readdir(NULL, ip, ctx, bufsize);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ xfs_inode_is_filestream(
|
|||
struct xfs_inode *ip)
|
||||
{
|
||||
return (ip->i_mount->m_flags & XFS_MOUNT_FILESTREAMS) ||
|
||||
(ip->i_d.di_flags & XFS_DIFLAG_FILESTREAM);
|
||||
(ip->i_diflags & XFS_DIFLAG_FILESTREAM);
|
||||
}
|
||||
|
||||
#endif /* __XFS_FILESTREAM_H__ */
|
||||
|
|
|
@ -904,14 +904,6 @@ xfs_getfsmap(
|
|||
info.fsmap_recs = fsmap_recs;
|
||||
info.head = head;
|
||||
|
||||
/*
|
||||
* If fsmap runs concurrently with a scrub, the freeze can be delayed
|
||||
* indefinitely as we walk the rmapbt and iterate over metadata
|
||||
* buffers. Freeze quiesces the log (which waits for the buffer LRU to
|
||||
* be emptied) and that won't happen while we're reading buffers.
|
||||
*/
|
||||
sb_start_write(mp->m_super);
|
||||
|
||||
/* For each device we support... */
|
||||
for (i = 0; i < XFS_GETFSMAP_DEVS; i++) {
|
||||
/* Is this device within the range the user asked for? */
|
||||
|
@ -934,6 +926,11 @@ xfs_getfsmap(
|
|||
if (handlers[i].dev > head->fmh_keys[0].fmr_device)
|
||||
memset(&dkeys[0], 0, sizeof(struct xfs_fsmap));
|
||||
|
||||
/*
|
||||
* Grab an empty transaction so that we can use its recursive
|
||||
* buffer locking abilities to detect cycles in the rmapbt
|
||||
* without deadlocking.
|
||||
*/
|
||||
error = xfs_trans_alloc_empty(mp, &tp);
|
||||
if (error)
|
||||
break;
|
||||
|
@ -951,7 +948,6 @@ xfs_getfsmap(
|
|||
|
||||
if (tp)
|
||||
xfs_trans_cancel(tp);
|
||||
sb_end_write(mp->m_super);
|
||||
head->fmh_oflags = FMH_OF_DEV_T;
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,64 @@
|
|||
#include "xfs_ag.h"
|
||||
#include "xfs_ag_resv.h"
|
||||
|
||||
/*
|
||||
* Write new AG headers to disk. Non-transactional, but need to be
|
||||
* written and completed prior to the growfs transaction being logged.
|
||||
* To do this, we use a delayed write buffer list and wait for
|
||||
* submission and IO completion of the list as a whole. This allows the
|
||||
* IO subsystem to merge all the AG headers in a single AG into a single
|
||||
* IO and hide most of the latency of the IO from us.
|
||||
*
|
||||
* This also means that if we get an error whilst building the buffer
|
||||
* list to write, we can cancel the entire list without having written
|
||||
* anything.
|
||||
*/
|
||||
static int
|
||||
xfs_resizefs_init_new_ags(
|
||||
struct xfs_trans *tp,
|
||||
struct aghdr_init_data *id,
|
||||
xfs_agnumber_t oagcount,
|
||||
xfs_agnumber_t nagcount,
|
||||
xfs_rfsblock_t delta,
|
||||
bool *lastag_extended)
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
xfs_rfsblock_t nb = mp->m_sb.sb_dblocks + delta;
|
||||
int error;
|
||||
|
||||
*lastag_extended = false;
|
||||
|
||||
INIT_LIST_HEAD(&id->buffer_list);
|
||||
for (id->agno = nagcount - 1;
|
||||
id->agno >= oagcount;
|
||||
id->agno--, delta -= id->agsize) {
|
||||
|
||||
if (id->agno == nagcount - 1)
|
||||
id->agsize = nb - (id->agno *
|
||||
(xfs_rfsblock_t)mp->m_sb.sb_agblocks);
|
||||
else
|
||||
id->agsize = mp->m_sb.sb_agblocks;
|
||||
|
||||
error = xfs_ag_init_headers(mp, id);
|
||||
if (error) {
|
||||
xfs_buf_delwri_cancel(&id->buffer_list);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
error = xfs_buf_delwri_submit(&id->buffer_list);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_trans_agblocks_delta(tp, id->nfree);
|
||||
|
||||
if (delta) {
|
||||
*lastag_extended = true;
|
||||
error = xfs_ag_extend_space(mp, tp, id, delta);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* growfs operations
|
||||
*/
|
||||
|
@ -33,22 +91,25 @@ xfs_growfs_data_private(
|
|||
xfs_agnumber_t nagcount;
|
||||
xfs_agnumber_t nagimax = 0;
|
||||
xfs_rfsblock_t nb, nb_div, nb_mod;
|
||||
xfs_rfsblock_t delta;
|
||||
int64_t delta;
|
||||
bool lastag_extended;
|
||||
xfs_agnumber_t oagcount;
|
||||
struct xfs_trans *tp;
|
||||
struct aghdr_init_data id = {};
|
||||
|
||||
nb = in->newblocks;
|
||||
if (nb < mp->m_sb.sb_dblocks)
|
||||
return -EINVAL;
|
||||
if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
|
||||
return error;
|
||||
error = xfs_buf_read_uncached(mp->m_ddev_targp,
|
||||
XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
|
||||
XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
|
||||
error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
|
||||
if (error)
|
||||
return error;
|
||||
xfs_buf_relse(bp);
|
||||
|
||||
if (nb > mp->m_sb.sb_dblocks) {
|
||||
error = xfs_buf_read_uncached(mp->m_ddev_targp,
|
||||
XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
|
||||
XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
|
||||
nb_div = nb;
|
||||
nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
|
||||
|
@ -56,10 +117,16 @@ xfs_growfs_data_private(
|
|||
if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
|
||||
nagcount--;
|
||||
nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
|
||||
if (nb < mp->m_sb.sb_dblocks)
|
||||
return -EINVAL;
|
||||
}
|
||||
delta = nb - mp->m_sb.sb_dblocks;
|
||||
/*
|
||||
* Reject filesystems with a single AG because they are not
|
||||
* supported, and reject a shrink operation that would cause a
|
||||
* filesystem to become unsupported.
|
||||
*/
|
||||
if (delta < 0 && nagcount < 2)
|
||||
return -EINVAL;
|
||||
|
||||
oagcount = mp->m_sb.sb_agcount;
|
||||
|
||||
/* allocate the new per-ag structures */
|
||||
|
@ -67,55 +134,34 @@ xfs_growfs_data_private(
|
|||
error = xfs_initialize_perag(mp, nagcount, &nagimax);
|
||||
if (error)
|
||||
return error;
|
||||
} else if (nagcount < oagcount) {
|
||||
/* TODO: shrinking the entire AGs hasn't yet completed */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
|
||||
XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
|
||||
(delta > 0 ? XFS_GROWFS_SPACE_RES(mp) : -delta), 0,
|
||||
XFS_TRANS_RESERVE, &tp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Write new AG headers to disk. Non-transactional, but need to be
|
||||
* written and completed prior to the growfs transaction being logged.
|
||||
* To do this, we use a delayed write buffer list and wait for
|
||||
* submission and IO completion of the list as a whole. This allows the
|
||||
* IO subsystem to merge all the AG headers in a single AG into a single
|
||||
* IO and hide most of the latency of the IO from us.
|
||||
*
|
||||
* This also means that if we get an error whilst building the buffer
|
||||
* list to write, we can cancel the entire list without having written
|
||||
* anything.
|
||||
*/
|
||||
INIT_LIST_HEAD(&id.buffer_list);
|
||||
for (id.agno = nagcount - 1;
|
||||
id.agno >= oagcount;
|
||||
id.agno--, delta -= id.agsize) {
|
||||
if (delta > 0) {
|
||||
error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
|
||||
delta, &lastag_extended);
|
||||
} else {
|
||||
static struct ratelimit_state shrink_warning = \
|
||||
RATELIMIT_STATE_INIT("shrink_warning", 86400 * HZ, 1);
|
||||
ratelimit_set_flags(&shrink_warning, RATELIMIT_MSG_ON_RELEASE);
|
||||
|
||||
if (id.agno == nagcount - 1)
|
||||
id.agsize = nb -
|
||||
(id.agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
|
||||
else
|
||||
id.agsize = mp->m_sb.sb_agblocks;
|
||||
if (__ratelimit(&shrink_warning))
|
||||
xfs_alert(mp,
|
||||
"EXPERIMENTAL online shrink feature in use. Use at your own risk!");
|
||||
|
||||
error = xfs_ag_init_headers(mp, &id);
|
||||
if (error) {
|
||||
xfs_buf_delwri_cancel(&id.buffer_list);
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
error = xfs_ag_shrink_space(mp, &tp, nagcount - 1, -delta);
|
||||
}
|
||||
error = xfs_buf_delwri_submit(&id.buffer_list);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
xfs_trans_agblocks_delta(tp, id.nfree);
|
||||
|
||||
/* If there are new blocks in the old last AG, extend it. */
|
||||
if (delta) {
|
||||
error = xfs_ag_extend_space(mp, tp, &id, delta);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update changed superblock fields transactionally. These are not
|
||||
* seen by the rest of the world until the transaction commit applies
|
||||
|
@ -123,11 +169,19 @@ xfs_growfs_data_private(
|
|||
*/
|
||||
if (nagcount > oagcount)
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
|
||||
if (nb > mp->m_sb.sb_dblocks)
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
|
||||
nb - mp->m_sb.sb_dblocks);
|
||||
if (delta)
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
|
||||
if (id.nfree)
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
|
||||
|
||||
/*
|
||||
* Sync sb counters now to reflect the updated values. This is
|
||||
* particularly important for shrink because the write verifier
|
||||
* will fail if sb_fdblocks is ever larger than sb_dblocks.
|
||||
*/
|
||||
if (xfs_sb_version_haslazysbcount(&mp->m_sb))
|
||||
xfs_log_sb(tp);
|
||||
|
||||
xfs_trans_set_sync(tp);
|
||||
error = xfs_trans_commit(tp);
|
||||
if (error)
|
||||
|
@ -139,28 +193,29 @@ xfs_growfs_data_private(
|
|||
xfs_set_low_space_thresholds(mp);
|
||||
mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
|
||||
|
||||
/*
|
||||
* If we expanded the last AG, free the per-AG reservation
|
||||
* so we can reinitialize it with the new size.
|
||||
*/
|
||||
if (delta) {
|
||||
struct xfs_perag *pag;
|
||||
if (delta > 0) {
|
||||
/*
|
||||
* If we expanded the last AG, free the per-AG reservation
|
||||
* so we can reinitialize it with the new size.
|
||||
*/
|
||||
if (lastag_extended) {
|
||||
struct xfs_perag *pag;
|
||||
|
||||
pag = xfs_perag_get(mp, id.agno);
|
||||
error = xfs_ag_resv_free(pag);
|
||||
xfs_perag_put(pag);
|
||||
if (error)
|
||||
return error;
|
||||
pag = xfs_perag_get(mp, id.agno);
|
||||
error = xfs_ag_resv_free(pag);
|
||||
xfs_perag_put(pag);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
/*
|
||||
* Reserve AG metadata blocks. ENOSPC here does not mean there
|
||||
* was a growfs failure, just that there still isn't space for
|
||||
* new user data after the grow has been run.
|
||||
*/
|
||||
error = xfs_fs_reserve_ag_blocks(mp);
|
||||
if (error == -ENOSPC)
|
||||
error = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve AG metadata blocks. ENOSPC here does not mean there was a
|
||||
* growfs failure, just that there still isn't space for new user data
|
||||
* after the grow has been run.
|
||||
*/
|
||||
error = xfs_fs_reserve_ag_blocks(mp);
|
||||
if (error == -ENOSPC)
|
||||
error = 0;
|
||||
return error;
|
||||
|
||||
out_trans_cancel:
|
||||
|
|
|
@ -63,7 +63,9 @@ xfs_inode_alloc(
|
|||
memset(&ip->i_df, 0, sizeof(ip->i_df));
|
||||
ip->i_flags = 0;
|
||||
ip->i_delayed_blks = 0;
|
||||
memset(&ip->i_d, 0, sizeof(ip->i_d));
|
||||
ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
|
||||
ip->i_nblocks = 0;
|
||||
ip->i_forkoff = 0;
|
||||
ip->i_sick = 0;
|
||||
ip->i_checked = 0;
|
||||
INIT_WORK(&ip->i_ioend_work, xfs_end_io);
|
||||
|
@ -307,7 +309,7 @@ xfs_iget_check_free_state(
|
|||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (ip->i_d.di_nblocks != 0) {
|
||||
if (ip->i_nblocks != 0) {
|
||||
xfs_warn(ip->i_mount,
|
||||
"Corruption detected! Free inode 0x%llx has blocks allocated!",
|
||||
ip->i_ino);
|
||||
|
@ -497,7 +499,7 @@ xfs_iget_cache_miss(
|
|||
* simply build the new inode core with a random generation number.
|
||||
*
|
||||
* For version 4 (and older) superblocks, log recovery is dependent on
|
||||
* the di_flushiter field being initialised from the current on-disk
|
||||
* the i_flushiter field being initialised from the current on-disk
|
||||
* value and hence we must also read the inode off disk even when
|
||||
* initializing new inodes.
|
||||
*/
|
||||
|
@ -505,14 +507,14 @@ xfs_iget_cache_miss(
|
|||
(flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
|
||||
VFS_I(ip)->i_generation = prandom_u32();
|
||||
} else {
|
||||
struct xfs_dinode *dip;
|
||||
struct xfs_buf *bp;
|
||||
|
||||
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0);
|
||||
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
|
||||
if (error)
|
||||
goto out_destroy;
|
||||
|
||||
error = xfs_inode_from_disk(ip, dip);
|
||||
error = xfs_inode_from_disk(ip,
|
||||
xfs_buf_offset(bp, ip->i_imap.im_boffset));
|
||||
if (!error)
|
||||
xfs_buf_set_ref(bp, XFS_INO_REF);
|
||||
xfs_trans_brelse(tp, bp);
|
||||
|
@ -1202,7 +1204,7 @@ xfs_inode_match_id(
|
|||
return false;
|
||||
|
||||
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
|
||||
ip->i_d.di_projid != eofb->eof_prid)
|
||||
ip->i_projid != eofb->eof_prid)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -1226,7 +1228,7 @@ xfs_inode_match_id_union(
|
|||
return true;
|
||||
|
||||
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
|
||||
ip->i_d.di_projid == eofb->eof_prid)
|
||||
ip->i_projid == eofb->eof_prid)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1294,13 +1296,6 @@ xfs_inode_free_eofblocks(
|
|||
if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
|
||||
return 0;
|
||||
|
||||
if (!xfs_can_free_eofblocks(ip, false)) {
|
||||
/* inode could be preallocated or append-only */
|
||||
trace_xfs_inode_free_eofblocks_invalid(ip);
|
||||
xfs_inode_clear_eofblocks_tag(ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the mapping is dirty the operation can block and wait for some
|
||||
* time. Unless we are waiting, skip it.
|
||||
|
@ -1322,7 +1317,13 @@ xfs_inode_free_eofblocks(
|
|||
}
|
||||
*lockflags |= XFS_IOLOCK_EXCL;
|
||||
|
||||
return xfs_free_eofblocks(ip);
|
||||
if (xfs_can_free_eofblocks(ip, false))
|
||||
return xfs_free_eofblocks(ip);
|
||||
|
||||
/* inode could be preallocated or append-only */
|
||||
trace_xfs_inode_free_eofblocks_invalid(ip);
|
||||
xfs_inode_clear_eofblocks_tag(ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1335,7 +1336,7 @@ xfs_blockgc_queue(
|
|||
{
|
||||
rcu_read_lock();
|
||||
if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
|
||||
queue_delayed_work(pag->pag_mount->m_blockgc_workqueue,
|
||||
queue_delayed_work(pag->pag_mount->m_gc_workqueue,
|
||||
&pag->pag_blockgc_work,
|
||||
msecs_to_jiffies(xfs_blockgc_secs * 1000));
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -60,8 +60,8 @@ xfs_get_extsz_hint(
|
|||
*/
|
||||
if (xfs_is_always_cow_inode(ip))
|
||||
return 0;
|
||||
if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
|
||||
return ip->i_d.di_extsize;
|
||||
if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
|
||||
return ip->i_extsize;
|
||||
if (XFS_IS_REALTIME_INODE(ip))
|
||||
return ip->i_mount->m_sb.sb_rextsize;
|
||||
return 0;
|
||||
|
@ -80,8 +80,8 @@ xfs_get_cowextsz_hint(
|
|||
xfs_extlen_t a, b;
|
||||
|
||||
a = 0;
|
||||
if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
|
||||
a = ip->i_d.di_cowextsize;
|
||||
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
|
||||
a = ip->i_cowextsize;
|
||||
b = xfs_get_extsz_hint(ip);
|
||||
|
||||
a = max(a, b);
|
||||
|
@ -111,8 +111,7 @@ xfs_ilock_data_map_shared(
|
|||
{
|
||||
uint lock_mode = XFS_ILOCK_SHARED;
|
||||
|
||||
if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE &&
|
||||
(ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
|
||||
if (xfs_need_iread_extents(&ip->i_df))
|
||||
lock_mode = XFS_ILOCK_EXCL;
|
||||
xfs_ilock(ip, lock_mode);
|
||||
return lock_mode;
|
||||
|
@ -124,9 +123,7 @@ xfs_ilock_attr_map_shared(
|
|||
{
|
||||
uint lock_mode = XFS_ILOCK_SHARED;
|
||||
|
||||
if (ip->i_afp &&
|
||||
ip->i_afp->if_format == XFS_DINODE_FMT_BTREE &&
|
||||
(ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
|
||||
if (ip->i_afp && xfs_need_iread_extents(ip->i_afp))
|
||||
lock_mode = XFS_ILOCK_EXCL;
|
||||
xfs_ilock(ip, lock_mode);
|
||||
return lock_mode;
|
||||
|
@ -598,65 +595,53 @@ xfs_lock_two_inodes(
|
|||
}
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
_xfs_dic2xflags(
|
||||
uint16_t di_flags,
|
||||
uint64_t di_flags2,
|
||||
bool has_attr)
|
||||
{
|
||||
uint flags = 0;
|
||||
|
||||
if (di_flags & XFS_DIFLAG_ANY) {
|
||||
if (di_flags & XFS_DIFLAG_REALTIME)
|
||||
flags |= FS_XFLAG_REALTIME;
|
||||
if (di_flags & XFS_DIFLAG_PREALLOC)
|
||||
flags |= FS_XFLAG_PREALLOC;
|
||||
if (di_flags & XFS_DIFLAG_IMMUTABLE)
|
||||
flags |= FS_XFLAG_IMMUTABLE;
|
||||
if (di_flags & XFS_DIFLAG_APPEND)
|
||||
flags |= FS_XFLAG_APPEND;
|
||||
if (di_flags & XFS_DIFLAG_SYNC)
|
||||
flags |= FS_XFLAG_SYNC;
|
||||
if (di_flags & XFS_DIFLAG_NOATIME)
|
||||
flags |= FS_XFLAG_NOATIME;
|
||||
if (di_flags & XFS_DIFLAG_NODUMP)
|
||||
flags |= FS_XFLAG_NODUMP;
|
||||
if (di_flags & XFS_DIFLAG_RTINHERIT)
|
||||
flags |= FS_XFLAG_RTINHERIT;
|
||||
if (di_flags & XFS_DIFLAG_PROJINHERIT)
|
||||
flags |= FS_XFLAG_PROJINHERIT;
|
||||
if (di_flags & XFS_DIFLAG_NOSYMLINKS)
|
||||
flags |= FS_XFLAG_NOSYMLINKS;
|
||||
if (di_flags & XFS_DIFLAG_EXTSIZE)
|
||||
flags |= FS_XFLAG_EXTSIZE;
|
||||
if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
|
||||
flags |= FS_XFLAG_EXTSZINHERIT;
|
||||
if (di_flags & XFS_DIFLAG_NODEFRAG)
|
||||
flags |= FS_XFLAG_NODEFRAG;
|
||||
if (di_flags & XFS_DIFLAG_FILESTREAM)
|
||||
flags |= FS_XFLAG_FILESTREAM;
|
||||
}
|
||||
|
||||
if (di_flags2 & XFS_DIFLAG2_ANY) {
|
||||
if (di_flags2 & XFS_DIFLAG2_DAX)
|
||||
flags |= FS_XFLAG_DAX;
|
||||
if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
|
||||
flags |= FS_XFLAG_COWEXTSIZE;
|
||||
}
|
||||
|
||||
if (has_attr)
|
||||
flags |= FS_XFLAG_HASATTR;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
uint
|
||||
xfs_ip2xflags(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_icdinode *dic = &ip->i_d;
|
||||
uint flags = 0;
|
||||
|
||||
return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
|
||||
if (ip->i_diflags & XFS_DIFLAG_ANY) {
|
||||
if (ip->i_diflags & XFS_DIFLAG_REALTIME)
|
||||
flags |= FS_XFLAG_REALTIME;
|
||||
if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
|
||||
flags |= FS_XFLAG_PREALLOC;
|
||||
if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
|
||||
flags |= FS_XFLAG_IMMUTABLE;
|
||||
if (ip->i_diflags & XFS_DIFLAG_APPEND)
|
||||
flags |= FS_XFLAG_APPEND;
|
||||
if (ip->i_diflags & XFS_DIFLAG_SYNC)
|
||||
flags |= FS_XFLAG_SYNC;
|
||||
if (ip->i_diflags & XFS_DIFLAG_NOATIME)
|
||||
flags |= FS_XFLAG_NOATIME;
|
||||
if (ip->i_diflags & XFS_DIFLAG_NODUMP)
|
||||
flags |= FS_XFLAG_NODUMP;
|
||||
if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
|
||||
flags |= FS_XFLAG_RTINHERIT;
|
||||
if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
|
||||
flags |= FS_XFLAG_PROJINHERIT;
|
||||
if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
|
||||
flags |= FS_XFLAG_NOSYMLINKS;
|
||||
if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
|
||||
flags |= FS_XFLAG_EXTSIZE;
|
||||
if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
|
||||
flags |= FS_XFLAG_EXTSZINHERIT;
|
||||
if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
|
||||
flags |= FS_XFLAG_NODEFRAG;
|
||||
if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
|
||||
flags |= FS_XFLAG_FILESTREAM;
|
||||
}
|
||||
|
||||
if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
|
||||
if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
|
||||
flags |= FS_XFLAG_DAX;
|
||||
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
|
||||
flags |= FS_XFLAG_COWEXTSIZE;
|
||||
}
|
||||
|
||||
if (XFS_IFORK_Q(ip))
|
||||
flags |= FS_XFLAG_HASATTR;
|
||||
return flags;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -708,42 +693,42 @@ xfs_inode_inherit_flags(
|
|||
umode_t mode = VFS_I(ip)->i_mode;
|
||||
|
||||
if (S_ISDIR(mode)) {
|
||||
if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
|
||||
if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
|
||||
di_flags |= XFS_DIFLAG_RTINHERIT;
|
||||
if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
|
||||
if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
|
||||
di_flags |= XFS_DIFLAG_EXTSZINHERIT;
|
||||
ip->i_d.di_extsize = pip->i_d.di_extsize;
|
||||
ip->i_extsize = pip->i_extsize;
|
||||
}
|
||||
if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
|
||||
if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
|
||||
di_flags |= XFS_DIFLAG_PROJINHERIT;
|
||||
} else if (S_ISREG(mode)) {
|
||||
if ((pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) &&
|
||||
if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
|
||||
xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
|
||||
di_flags |= XFS_DIFLAG_REALTIME;
|
||||
if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
|
||||
if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
|
||||
di_flags |= XFS_DIFLAG_EXTSIZE;
|
||||
ip->i_d.di_extsize = pip->i_d.di_extsize;
|
||||
ip->i_extsize = pip->i_extsize;
|
||||
}
|
||||
}
|
||||
if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
|
||||
if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
|
||||
xfs_inherit_noatime)
|
||||
di_flags |= XFS_DIFLAG_NOATIME;
|
||||
if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
|
||||
if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
|
||||
xfs_inherit_nodump)
|
||||
di_flags |= XFS_DIFLAG_NODUMP;
|
||||
if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
|
||||
if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
|
||||
xfs_inherit_sync)
|
||||
di_flags |= XFS_DIFLAG_SYNC;
|
||||
if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
|
||||
if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
|
||||
xfs_inherit_nosymlinks)
|
||||
di_flags |= XFS_DIFLAG_NOSYMLINKS;
|
||||
if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
|
||||
if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
|
||||
xfs_inherit_nodefrag)
|
||||
di_flags |= XFS_DIFLAG_NODEFRAG;
|
||||
if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
|
||||
if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
|
||||
di_flags |= XFS_DIFLAG_FILESTREAM;
|
||||
|
||||
ip->i_d.di_flags |= di_flags;
|
||||
ip->i_diflags |= di_flags;
|
||||
}
|
||||
|
||||
/* Propagate di_flags2 from a parent inode to a child inode. */
|
||||
|
@ -752,12 +737,12 @@ xfs_inode_inherit_flags2(
|
|||
struct xfs_inode *ip,
|
||||
const struct xfs_inode *pip)
|
||||
{
|
||||
if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
|
||||
ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
|
||||
ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
|
||||
if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
|
||||
ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
|
||||
ip->i_cowextsize = pip->i_cowextsize;
|
||||
}
|
||||
if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
|
||||
ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
|
||||
if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
|
||||
ip->i_diflags2 |= XFS_DIFLAG2_DAX;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -774,6 +759,7 @@ xfs_init_new_inode(
|
|||
xfs_nlink_t nlink,
|
||||
dev_t rdev,
|
||||
prid_t prid,
|
||||
bool init_xattrs,
|
||||
struct xfs_inode **ipp)
|
||||
{
|
||||
struct inode *dir = pip ? VFS_I(pip) : NULL;
|
||||
|
@ -808,7 +794,7 @@ xfs_init_new_inode(
|
|||
inode = VFS_I(ip);
|
||||
set_nlink(inode, nlink);
|
||||
inode->i_rdev = rdev;
|
||||
ip->i_d.di_projid = prid;
|
||||
ip->i_projid = prid;
|
||||
|
||||
if (dir && !(dir->i_mode & S_ISGID) &&
|
||||
(mp->m_flags & XFS_MOUNT_GRPID)) {
|
||||
|
@ -829,25 +815,22 @@ xfs_init_new_inode(
|
|||
!in_group_p(i_gid_into_mnt(mnt_userns, inode)))
|
||||
inode->i_mode &= ~S_ISGID;
|
||||
|
||||
ip->i_d.di_size = 0;
|
||||
ip->i_disk_size = 0;
|
||||
ip->i_df.if_nextents = 0;
|
||||
ASSERT(ip->i_d.di_nblocks == 0);
|
||||
ASSERT(ip->i_nblocks == 0);
|
||||
|
||||
tv = current_time(inode);
|
||||
inode->i_mtime = tv;
|
||||
inode->i_atime = tv;
|
||||
inode->i_ctime = tv;
|
||||
|
||||
ip->i_d.di_extsize = 0;
|
||||
ip->i_d.di_dmevmask = 0;
|
||||
ip->i_d.di_dmstate = 0;
|
||||
ip->i_d.di_flags = 0;
|
||||
ip->i_extsize = 0;
|
||||
ip->i_diflags = 0;
|
||||
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
inode_set_iversion(inode, 1);
|
||||
ip->i_d.di_flags2 = mp->m_ino_geo.new_diflags2;
|
||||
ip->i_d.di_cowextsize = 0;
|
||||
ip->i_d.di_crtime = tv;
|
||||
ip->i_cowextsize = 0;
|
||||
ip->i_crtime = tv;
|
||||
}
|
||||
|
||||
flags = XFS_ILOG_CORE;
|
||||
|
@ -857,19 +840,17 @@ xfs_init_new_inode(
|
|||
case S_IFBLK:
|
||||
case S_IFSOCK:
|
||||
ip->i_df.if_format = XFS_DINODE_FMT_DEV;
|
||||
ip->i_df.if_flags = 0;
|
||||
flags |= XFS_ILOG_DEV;
|
||||
break;
|
||||
case S_IFREG:
|
||||
case S_IFDIR:
|
||||
if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY))
|
||||
if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
|
||||
xfs_inode_inherit_flags(ip, pip);
|
||||
if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY))
|
||||
if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
|
||||
xfs_inode_inherit_flags2(ip, pip);
|
||||
/* FALLTHROUGH */
|
||||
case S_IFLNK:
|
||||
ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
|
||||
ip->i_df.if_flags = XFS_IFEXTENTS;
|
||||
ip->i_df.if_bytes = 0;
|
||||
ip->i_df.if_u1.if_root = NULL;
|
||||
break;
|
||||
|
@ -877,6 +858,20 @@ xfs_init_new_inode(
|
|||
ASSERT(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we need to create attributes immediately after allocating the
|
||||
* inode, initialise an empty attribute fork right now. We use the
|
||||
* default fork offset for attributes here as we don't know exactly what
|
||||
* size or how many attributes we might be adding. We can do this
|
||||
* safely here because we know the data fork is completely empty and
|
||||
* this saves us from needing to run a separate transaction to set the
|
||||
* fork offset in the immediate future.
|
||||
*/
|
||||
if (init_xattrs && xfs_sb_version_hasattr(&mp->m_sb)) {
|
||||
ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
|
||||
ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Log the new values stuffed into the inode.
|
||||
*/
|
||||
|
@ -910,6 +905,7 @@ xfs_dir_ialloc(
|
|||
xfs_nlink_t nlink,
|
||||
dev_t rdev,
|
||||
prid_t prid,
|
||||
bool init_xattrs,
|
||||
struct xfs_inode **ipp)
|
||||
{
|
||||
struct xfs_buf *agibp;
|
||||
|
@ -937,7 +933,7 @@ xfs_dir_ialloc(
|
|||
ASSERT(ino != NULLFSINO);
|
||||
|
||||
return xfs_init_new_inode(mnt_userns, *tpp, dp, ino, mode, nlink, rdev,
|
||||
prid, ipp);
|
||||
prid, init_xattrs, ipp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -982,6 +978,7 @@ xfs_create(
|
|||
struct xfs_name *name,
|
||||
umode_t mode,
|
||||
dev_t rdev,
|
||||
bool init_xattrs,
|
||||
xfs_inode_t **ipp)
|
||||
{
|
||||
int is_dir = S_ISDIR(mode);
|
||||
|
@ -1053,7 +1050,7 @@ xfs_create(
|
|||
* pointing to itself.
|
||||
*/
|
||||
error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, is_dir ? 2 : 1, rdev,
|
||||
prid, &ip);
|
||||
prid, init_xattrs, &ip);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
|
@ -1173,7 +1170,8 @@ xfs_create_tmpfile(
|
|||
if (error)
|
||||
goto out_release_dquots;
|
||||
|
||||
error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, 0, 0, prid, &ip);
|
||||
error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, 0, 0, prid,
|
||||
false, &ip);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
|
@ -1272,8 +1270,8 @@ xfs_link(
|
|||
* creation in our tree when the project IDs are the same; else
|
||||
* the tree quota mechanism could be circumvented.
|
||||
*/
|
||||
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
|
||||
tdp->i_d.di_projid != sip->i_d.di_projid)) {
|
||||
if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
|
||||
tdp->i_projid != sip->i_projid)) {
|
||||
error = -EXDEV;
|
||||
goto error_return;
|
||||
}
|
||||
|
@ -1331,7 +1329,7 @@ xfs_itruncate_clear_reflink_flags(
|
|||
dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
|
||||
cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
|
||||
if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
|
||||
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
if (cfork->if_bytes == 0)
|
||||
xfs_inode_clear_cowblocks_tag(ip);
|
||||
}
|
||||
|
@ -1442,7 +1440,7 @@ xfs_release(
|
|||
xfs_inode_t *ip)
|
||||
{
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
int error;
|
||||
int error = 0;
|
||||
|
||||
if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
|
||||
return 0;
|
||||
|
@ -1478,8 +1476,16 @@ xfs_release(
|
|||
if (VFS_I(ip)->i_nlink == 0)
|
||||
return 0;
|
||||
|
||||
if (xfs_can_free_eofblocks(ip, false)) {
|
||||
/*
|
||||
* If we can't get the iolock just skip truncating the blocks past EOF
|
||||
* because we could deadlock with the mmap_lock otherwise. We'll get
|
||||
* another chance to drop them once the last reference to the inode is
|
||||
* dropped, so we'll never leak blocks permanently.
|
||||
*/
|
||||
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
|
||||
return 0;
|
||||
|
||||
if (xfs_can_free_eofblocks(ip, false)) {
|
||||
/*
|
||||
* Check if the inode is being opened, written and closed
|
||||
* frequently and we have delayed allocation blocks outstanding
|
||||
|
@ -1495,26 +1501,20 @@ xfs_release(
|
|||
* place.
|
||||
*/
|
||||
if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
|
||||
return 0;
|
||||
/*
|
||||
* If we can't get the iolock just skip truncating the blocks
|
||||
* past EOF because we could deadlock with the mmap_lock
|
||||
* otherwise. We'll get another chance to drop them once the
|
||||
* last reference to the inode is dropped, so we'll never leak
|
||||
* blocks permanently.
|
||||
*/
|
||||
if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
|
||||
error = xfs_free_eofblocks(ip);
|
||||
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
goto out_unlock;
|
||||
|
||||
error = xfs_free_eofblocks(ip);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
/* delalloc blocks after truncation means it really is dirty */
|
||||
if (ip->i_delayed_blks)
|
||||
xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1543,7 +1543,7 @@ xfs_inactive_truncate(
|
|||
* of a system crash before the truncate completes. See the related
|
||||
* comment in xfs_vn_setattr_size() for details.
|
||||
*/
|
||||
ip->i_d.di_size = 0;
|
||||
ip->i_disk_size = 0;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
|
||||
|
@ -1697,6 +1697,10 @@ xfs_inactive(
|
|||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return;
|
||||
|
||||
/* Metadata inodes require explicit resource cleanup. */
|
||||
if (xfs_is_metadata_inode(ip))
|
||||
return;
|
||||
|
||||
/* Try to clean out the cow blocks if there are any. */
|
||||
if (xfs_inode_has_cow_data(ip))
|
||||
xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
|
||||
|
@ -1718,7 +1722,7 @@ xfs_inactive(
|
|||
}
|
||||
|
||||
if (S_ISREG(VFS_I(ip)->i_mode) &&
|
||||
(ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
|
||||
(ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
|
||||
ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
|
||||
truncate = 1;
|
||||
|
||||
|
@ -1745,7 +1749,7 @@ xfs_inactive(
|
|||
}
|
||||
|
||||
ASSERT(!ip->i_afp);
|
||||
ASSERT(ip->i_d.di_forkoff == 0);
|
||||
ASSERT(ip->i_forkoff == 0);
|
||||
|
||||
/*
|
||||
* Free the inode.
|
||||
|
@ -2053,9 +2057,10 @@ xfs_iunlink_update_inode(
|
|||
|
||||
ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
|
||||
|
||||
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0);
|
||||
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
|
||||
if (error)
|
||||
return error;
|
||||
dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
|
||||
|
||||
/* Make sure the old pointer isn't garbage. */
|
||||
old_value = be32_to_cpu(dip->di_next_unlinked);
|
||||
|
@ -2180,13 +2185,14 @@ xfs_iunlink_map_ino(
|
|||
return error;
|
||||
}
|
||||
|
||||
error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0);
|
||||
error = xfs_imap_to_bp(mp, tp, imap, bpp);
|
||||
if (error) {
|
||||
xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
|
||||
__func__, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
*dipp = xfs_buf_offset(*bpp, imap->im_boffset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2564,8 +2570,8 @@ xfs_ifree(
|
|||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
||||
ASSERT(VFS_I(ip)->i_nlink == 0);
|
||||
ASSERT(ip->i_df.if_nextents == 0);
|
||||
ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
|
||||
ASSERT(ip->i_d.di_nblocks == 0);
|
||||
ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
|
||||
ASSERT(ip->i_nblocks == 0);
|
||||
|
||||
/*
|
||||
* Pull the on-disk inode from the AGI unlinked list.
|
||||
|
@ -2590,11 +2596,12 @@ xfs_ifree(
|
|||
}
|
||||
|
||||
VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
|
||||
ip->i_d.di_flags = 0;
|
||||
ip->i_d.di_flags2 = ip->i_mount->m_ino_geo.new_diflags2;
|
||||
ip->i_d.di_dmevmask = 0;
|
||||
ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
|
||||
ip->i_diflags = 0;
|
||||
ip->i_diflags2 = ip->i_mount->m_ino_geo.new_diflags2;
|
||||
ip->i_forkoff = 0; /* mark the attr fork not in use */
|
||||
ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
|
||||
if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
|
||||
xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
|
||||
|
||||
/* Don't attempt to replay owner changes for a deleted inode */
|
||||
spin_lock(&iip->ili_lock);
|
||||
|
@ -2870,7 +2877,7 @@ xfs_finish_rename(
|
|||
/*
|
||||
* xfs_cross_rename()
|
||||
*
|
||||
* responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
|
||||
* responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
|
||||
*/
|
||||
STATIC int
|
||||
xfs_cross_rename(
|
||||
|
@ -3103,8 +3110,8 @@ xfs_rename(
|
|||
* into our tree when the project IDs are the same; else the
|
||||
* tree quota mechanism would be circumvented.
|
||||
*/
|
||||
if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
|
||||
target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
|
||||
if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
|
||||
target_dp->i_projid != src_ip->i_projid)) {
|
||||
error = -EXDEV;
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
@ -3414,34 +3421,33 @@ xfs_iflush(
|
|||
}
|
||||
}
|
||||
if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
|
||||
ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
|
||||
ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
|
||||
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
||||
"%s: detected corrupt incore inode %Lu, "
|
||||
"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
|
||||
__func__, ip->i_ino,
|
||||
ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
|
||||
ip->i_d.di_nblocks, ip);
|
||||
ip->i_nblocks, ip);
|
||||
goto flush_out;
|
||||
}
|
||||
if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
|
||||
if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
|
||||
mp, XFS_ERRTAG_IFLUSH_6)) {
|
||||
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
||||
"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
|
||||
__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
|
||||
__func__, ip->i_ino, ip->i_forkoff, ip);
|
||||
goto flush_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Inode item log recovery for v2 inodes are dependent on the
|
||||
* di_flushiter count for correct sequencing. We bump the flush
|
||||
* iteration count so we can detect flushes which postdate a log record
|
||||
* during recovery. This is redundant as we now log every change and
|
||||
* hence this can't happen but we need to still do it to ensure
|
||||
* backwards compatibility with old kernels that predate logging all
|
||||
* inode changes.
|
||||
* Inode item log recovery for v2 inodes are dependent on the flushiter
|
||||
* count for correct sequencing. We bump the flush iteration count so
|
||||
* we can detect flushes which postdate a log record during recovery.
|
||||
* This is redundant as we now log every change and hence this can't
|
||||
* happen but we need to still do it to ensure backwards compatibility
|
||||
* with old kernels that predate logging all inode changes.
|
||||
*/
|
||||
if (!xfs_sb_version_has_v3inode(&mp->m_sb))
|
||||
ip->i_d.di_flushiter++;
|
||||
ip->i_flushiter++;
|
||||
|
||||
/*
|
||||
* If there are inline format data / attr forks attached to this inode,
|
||||
|
@ -3462,8 +3468,10 @@ xfs_iflush(
|
|||
xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
|
||||
|
||||
/* Wrap, we never let the log put out DI_MAX_FLUSH */
|
||||
if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
|
||||
ip->i_d.di_flushiter = 0;
|
||||
if (!xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
if (ip->i_flushiter == DI_MAX_FLUSH)
|
||||
ip->i_flushiter = 0;
|
||||
}
|
||||
|
||||
xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
|
||||
if (XFS_IFORK_Q(ip))
|
||||
|
|
|
@ -54,8 +54,19 @@ typedef struct xfs_inode {
|
|||
/* Miscellaneous state. */
|
||||
unsigned long i_flags; /* see defined flags below */
|
||||
uint64_t i_delayed_blks; /* count of delay alloc blks */
|
||||
|
||||
struct xfs_icdinode i_d; /* most of ondisk inode */
|
||||
xfs_fsize_t i_disk_size; /* number of bytes in file */
|
||||
xfs_rfsblock_t i_nblocks; /* # of direct & btree blocks */
|
||||
prid_t i_projid; /* owner's project id */
|
||||
xfs_extlen_t i_extsize; /* basic/minimum extent size */
|
||||
/* cowextsize is only used for v3 inodes, flushiter for v1/2 */
|
||||
union {
|
||||
xfs_extlen_t i_cowextsize; /* basic cow extent size */
|
||||
uint16_t i_flushiter; /* incremented on flush */
|
||||
};
|
||||
uint8_t i_forkoff; /* attr fork offset >> 3 */
|
||||
uint16_t i_diflags; /* XFS_DIFLAG_... */
|
||||
uint64_t i_diflags2; /* XFS_DIFLAG2_... */
|
||||
struct timespec64 i_crtime; /* time created */
|
||||
|
||||
/* VFS inode */
|
||||
struct inode i_vnode; /* embedded VFS inode */
|
||||
|
@ -87,7 +98,7 @@ static inline xfs_fsize_t XFS_ISIZE(struct xfs_inode *ip)
|
|||
{
|
||||
if (S_ISREG(VFS_I(ip)->i_mode))
|
||||
return i_size_read(VFS_I(ip));
|
||||
return ip->i_d.di_size;
|
||||
return ip->i_disk_size;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -101,7 +112,7 @@ xfs_new_eof(struct xfs_inode *ip, xfs_fsize_t new_size)
|
|||
|
||||
if (new_size > i_size || new_size < 0)
|
||||
new_size = i_size;
|
||||
return new_size > ip->i_d.di_size ? new_size : 0;
|
||||
return new_size > ip->i_disk_size ? new_size : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -174,15 +185,23 @@ xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned short flags)
|
|||
static inline prid_t
|
||||
xfs_get_initial_prid(struct xfs_inode *dp)
|
||||
{
|
||||
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
|
||||
return dp->i_d.di_projid;
|
||||
if (dp->i_diflags & XFS_DIFLAG_PROJINHERIT)
|
||||
return dp->i_projid;
|
||||
|
||||
return XFS_PROJID_DEFAULT;
|
||||
}
|
||||
|
||||
static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
|
||||
{
|
||||
return ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
|
||||
return ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
|
||||
}
|
||||
|
||||
static inline bool xfs_is_metadata_inode(struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
|
||||
return ip == mp->m_rbmip || ip == mp->m_rsumip ||
|
||||
xfs_is_quota_inode(&mp->m_sb, ip->i_ino);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -196,7 +215,7 @@ static inline bool xfs_inode_has_cow_data(struct xfs_inode *ip)
|
|||
|
||||
static inline bool xfs_inode_has_bigtime(struct xfs_inode *ip)
|
||||
{
|
||||
return ip->i_d.di_flags2 & XFS_DIFLAG2_BIGTIME;
|
||||
return ip->i_diflags2 & XFS_DIFLAG2_BIGTIME;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -214,6 +233,7 @@ static inline bool xfs_inode_has_bigtime(struct xfs_inode *ip)
|
|||
#define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */
|
||||
#define __XFS_INEW_BIT 3 /* inode has just been allocated */
|
||||
#define XFS_INEW (1 << __XFS_INEW_BIT)
|
||||
#define XFS_IPRESERVE_DM_FIELDS (1 << 4) /* has legacy DMAPI fields set */
|
||||
#define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
|
||||
#define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
|
||||
#define XFS_IFLUSHING (1 << 7) /* inode is being flushed */
|
||||
|
@ -371,7 +391,8 @@ int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
|
|||
struct xfs_inode **ipp, struct xfs_name *ci_name);
|
||||
int xfs_create(struct user_namespace *mnt_userns,
|
||||
struct xfs_inode *dp, struct xfs_name *name,
|
||||
umode_t mode, dev_t rdev, struct xfs_inode **ipp);
|
||||
umode_t mode, dev_t rdev, bool need_xattr,
|
||||
struct xfs_inode **ipp);
|
||||
int xfs_create_tmpfile(struct user_namespace *mnt_userns,
|
||||
struct xfs_inode *dp, umode_t mode,
|
||||
struct xfs_inode **ipp);
|
||||
|
@ -413,7 +434,8 @@ xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
|
|||
int xfs_dir_ialloc(struct user_namespace *mnt_userns,
|
||||
struct xfs_trans **tpp, struct xfs_inode *dp,
|
||||
umode_t mode, xfs_nlink_t nlink, dev_t dev,
|
||||
prid_t prid, struct xfs_inode **ipp);
|
||||
prid_t prid, bool need_xattr,
|
||||
struct xfs_inode **ipp);
|
||||
|
||||
static inline int
|
||||
xfs_itruncate_extents(
|
||||
|
|
|
@ -196,7 +196,7 @@ xfs_inode_item_format_data_fork(
|
|||
*/
|
||||
data_bytes = roundup(ip->i_df.if_bytes, 4);
|
||||
ASSERT(ip->i_df.if_u1.if_data != NULL);
|
||||
ASSERT(ip->i_d.di_size > 0);
|
||||
ASSERT(ip->i_disk_size > 0);
|
||||
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL,
|
||||
ip->i_df.if_u1.if_data, data_bytes);
|
||||
ilf->ilf_dsize = (unsigned)data_bytes;
|
||||
|
@ -317,21 +317,47 @@ xfs_inode_to_log_dinode_ts(
|
|||
return its;
|
||||
}
|
||||
|
||||
/*
|
||||
* The legacy DMAPI fields are only present in the on-disk and in-log inodes,
|
||||
* but not in the in-memory one. But we are guaranteed to have an inode buffer
|
||||
* in memory when logging an inode, so we can just copy it from the on-disk
|
||||
* inode to the in-log inode here so that recovery of file system with these
|
||||
* fields set to non-zero values doesn't lose them. For all other cases we zero
|
||||
* the fields.
|
||||
*/
|
||||
static void
|
||||
xfs_copy_dm_fields_to_log_dinode(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_log_dinode *to)
|
||||
{
|
||||
struct xfs_dinode *dip;
|
||||
|
||||
dip = xfs_buf_offset(ip->i_itemp->ili_item.li_buf,
|
||||
ip->i_imap.im_boffset);
|
||||
|
||||
if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS)) {
|
||||
to->di_dmevmask = be32_to_cpu(dip->di_dmevmask);
|
||||
to->di_dmstate = be16_to_cpu(dip->di_dmstate);
|
||||
} else {
|
||||
to->di_dmevmask = 0;
|
||||
to->di_dmstate = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_inode_to_log_dinode(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_log_dinode *to,
|
||||
xfs_lsn_t lsn)
|
||||
{
|
||||
struct xfs_icdinode *from = &ip->i_d;
|
||||
struct inode *inode = VFS_I(ip);
|
||||
|
||||
to->di_magic = XFS_DINODE_MAGIC;
|
||||
to->di_format = xfs_ifork_format(&ip->i_df);
|
||||
to->di_uid = i_uid_read(inode);
|
||||
to->di_gid = i_gid_read(inode);
|
||||
to->di_projid_lo = from->di_projid & 0xffff;
|
||||
to->di_projid_hi = from->di_projid >> 16;
|
||||
to->di_projid_lo = ip->i_projid & 0xffff;
|
||||
to->di_projid_hi = ip->i_projid >> 16;
|
||||
|
||||
memset(to->di_pad, 0, sizeof(to->di_pad));
|
||||
memset(to->di_pad3, 0, sizeof(to->di_pad3));
|
||||
|
@ -342,16 +368,16 @@ xfs_inode_to_log_dinode(
|
|||
to->di_gen = inode->i_generation;
|
||||
to->di_mode = inode->i_mode;
|
||||
|
||||
to->di_size = from->di_size;
|
||||
to->di_nblocks = from->di_nblocks;
|
||||
to->di_extsize = from->di_extsize;
|
||||
to->di_size = ip->i_disk_size;
|
||||
to->di_nblocks = ip->i_nblocks;
|
||||
to->di_extsize = ip->i_extsize;
|
||||
to->di_nextents = xfs_ifork_nextents(&ip->i_df);
|
||||
to->di_anextents = xfs_ifork_nextents(ip->i_afp);
|
||||
to->di_forkoff = from->di_forkoff;
|
||||
to->di_forkoff = ip->i_forkoff;
|
||||
to->di_aformat = xfs_ifork_format(ip->i_afp);
|
||||
to->di_dmevmask = from->di_dmevmask;
|
||||
to->di_dmstate = from->di_dmstate;
|
||||
to->di_flags = from->di_flags;
|
||||
to->di_flags = ip->i_diflags;
|
||||
|
||||
xfs_copy_dm_fields_to_log_dinode(ip, to);
|
||||
|
||||
/* log a dummy value to ensure log structure is fully initialised */
|
||||
to->di_next_unlinked = NULLAGINO;
|
||||
|
@ -359,9 +385,9 @@ xfs_inode_to_log_dinode(
|
|||
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
|
||||
to->di_version = 3;
|
||||
to->di_changecount = inode_peek_iversion(inode);
|
||||
to->di_crtime = xfs_inode_to_log_dinode_ts(ip, from->di_crtime);
|
||||
to->di_flags2 = from->di_flags2;
|
||||
to->di_cowextsize = from->di_cowextsize;
|
||||
to->di_crtime = xfs_inode_to_log_dinode_ts(ip, ip->i_crtime);
|
||||
to->di_flags2 = ip->i_diflags2;
|
||||
to->di_cowextsize = ip->i_cowextsize;
|
||||
to->di_ino = ip->i_ino;
|
||||
to->di_lsn = lsn;
|
||||
memset(to->di_pad2, 0, sizeof(to->di_pad2));
|
||||
|
@ -369,7 +395,7 @@ xfs_inode_to_log_dinode(
|
|||
to->di_flushiter = 0;
|
||||
} else {
|
||||
to->di_version = 2;
|
||||
to->di_flushiter = from->di_flushiter;
|
||||
to->di_flushiter = ip->i_flushiter;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1060,14 +1060,16 @@ xfs_fill_fsxattr(
|
|||
int whichfork,
|
||||
struct fileattr *fa)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
||||
|
||||
fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
|
||||
fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
|
||||
fa->fsx_cowextsize = ip->i_d.di_cowextsize <<
|
||||
ip->i_mount->m_sb.sb_blocklog;
|
||||
fa->fsx_projid = ip->i_d.di_projid;
|
||||
if (ifp && (ifp->if_flags & XFS_IFEXTENTS))
|
||||
|
||||
fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
|
||||
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
|
||||
fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
|
||||
fa->fsx_projid = ip->i_projid;
|
||||
if (ifp && !xfs_need_iread_extents(ifp))
|
||||
fa->fsx_nextents = xfs_iext_count(ifp);
|
||||
else
|
||||
fa->fsx_nextents = xfs_ifork_nextents(ifp);
|
||||
|
@ -1111,7 +1113,7 @@ xfs_flags2diflags(
|
|||
{
|
||||
/* can't set PREALLOC this way, just preserve it */
|
||||
uint16_t di_flags =
|
||||
(ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
|
||||
(ip->i_diflags & XFS_DIFLAG_PREALLOC);
|
||||
|
||||
if (xflags & FS_XFLAG_IMMUTABLE)
|
||||
di_flags |= XFS_DIFLAG_IMMUTABLE;
|
||||
|
@ -1152,8 +1154,8 @@ xfs_flags2diflags2(
|
|||
unsigned int xflags)
|
||||
{
|
||||
uint64_t di_flags2 =
|
||||
(ip->i_d.di_flags2 & (XFS_DIFLAG2_REFLINK |
|
||||
XFS_DIFLAG2_BIGTIME));
|
||||
(ip->i_diflags2 & (XFS_DIFLAG2_REFLINK |
|
||||
XFS_DIFLAG2_BIGTIME));
|
||||
|
||||
if (xflags & FS_XFLAG_DAX)
|
||||
di_flags2 |= XFS_DIFLAG2_DAX;
|
||||
|
@ -1170,7 +1172,7 @@ xfs_ioctl_setattr_xflags(
|
|||
struct fileattr *fa)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
uint64_t di_flags2;
|
||||
uint64_t i_flags2;
|
||||
|
||||
/* Can't change realtime flag if any extents are allocated. */
|
||||
if ((ip->i_df.if_nextents || ip->i_delayed_blks) &&
|
||||
|
@ -1180,25 +1182,25 @@ xfs_ioctl_setattr_xflags(
|
|||
/* If realtime flag is set then must have realtime device */
|
||||
if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
|
||||
if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
|
||||
(ip->i_d.di_extsize % mp->m_sb.sb_rextsize))
|
||||
(ip->i_extsize % mp->m_sb.sb_rextsize))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Clear reflink if we are actually able to set the rt flag. */
|
||||
if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip))
|
||||
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
|
||||
/* Don't allow us to set DAX mode for a reflinked file for now. */
|
||||
if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip))
|
||||
return -EINVAL;
|
||||
|
||||
/* diflags2 only valid for v3 inodes. */
|
||||
di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
|
||||
if (di_flags2 && !xfs_sb_version_has_v3inode(&mp->m_sb))
|
||||
i_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
|
||||
if (i_flags2 && !xfs_sb_version_has_v3inode(&mp->m_sb))
|
||||
return -EINVAL;
|
||||
|
||||
ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
|
||||
ip->i_d.di_flags2 = di_flags2;
|
||||
ip->i_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
|
||||
ip->i_diflags2 = i_flags2;
|
||||
|
||||
xfs_diflags_to_iflags(ip, false);
|
||||
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
|
||||
|
@ -1223,9 +1225,9 @@ xfs_ioctl_setattr_prepare_dax(
|
|||
return;
|
||||
|
||||
if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
|
||||
!(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) ||
|
||||
!(ip->i_diflags2 & XFS_DIFLAG2_DAX)) ||
|
||||
(!(fa->fsx_xflags & FS_XFLAG_DAX) &&
|
||||
(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)))
|
||||
(ip->i_diflags2 & XFS_DIFLAG2_DAX)))
|
||||
d_mark_dontcache(inode);
|
||||
}
|
||||
|
||||
|
@ -1293,7 +1295,7 @@ xfs_ioctl_setattr_check_extsize(
|
|||
return 0;
|
||||
|
||||
if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents &&
|
||||
((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
|
||||
((ip->i_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
|
||||
return -EINVAL;
|
||||
|
||||
if (fa->fsx_extsize == 0)
|
||||
|
@ -1463,12 +1465,12 @@ xfs_fileattr_set(
|
|||
VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
|
||||
|
||||
/* Change the ownerships and register project quota modifications */
|
||||
if (ip->i_d.di_projid != fa->fsx_projid) {
|
||||
if (ip->i_projid != fa->fsx_projid) {
|
||||
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
|
||||
olddquot = xfs_qm_vop_chown(tp, ip,
|
||||
&ip->i_pdquot, pdqp);
|
||||
}
|
||||
ip->i_d.di_projid = fa->fsx_projid;
|
||||
ip->i_projid = fa->fsx_projid;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1476,16 +1478,17 @@ xfs_fileattr_set(
|
|||
* extent size hint should be set on the inode. If no extent size flags
|
||||
* are set on the inode then unconditionally clear the extent size hint.
|
||||
*/
|
||||
if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
|
||||
ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
|
||||
if (ip->i_diflags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
|
||||
ip->i_extsize = XFS_B_TO_FSB(mp, fa->fsx_extsize);
|
||||
else
|
||||
ip->i_d.di_extsize = 0;
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
|
||||
(ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
|
||||
ip->i_d.di_cowextsize = fa->fsx_cowextsize >>
|
||||
mp->m_sb.sb_blocklog;
|
||||
else
|
||||
ip->i_d.di_cowextsize = 0;
|
||||
ip->i_extsize = 0;
|
||||
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
|
||||
ip->i_cowextsize = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
|
||||
else
|
||||
ip->i_cowextsize = 0;
|
||||
}
|
||||
|
||||
skip_xattr:
|
||||
error = xfs_trans_commit(tp);
|
||||
|
@ -1543,8 +1546,6 @@ xfs_ioc_getbmap(
|
|||
bmx.bmv_iflags = BMV_IF_ATTRFORK;
|
||||
/*FALLTHRU*/
|
||||
case XFS_IOC_GETBMAP:
|
||||
if (file->f_mode & FMODE_NOCMTIME)
|
||||
bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
|
||||
/* struct getbmap is a strict subset of struct getbmapx. */
|
||||
recsize = sizeof(struct getbmap);
|
||||
break;
|
||||
|
@ -1720,7 +1721,7 @@ xfs_ioc_getfsmap(
|
|||
|
||||
STATIC int
|
||||
xfs_ioc_scrub_metadata(
|
||||
struct xfs_inode *ip,
|
||||
struct file *file,
|
||||
void __user *arg)
|
||||
{
|
||||
struct xfs_scrub_metadata scrub;
|
||||
|
@ -1732,7 +1733,7 @@ xfs_ioc_scrub_metadata(
|
|||
if (copy_from_user(&scrub, arg, sizeof(scrub)))
|
||||
return -EFAULT;
|
||||
|
||||
error = xfs_scrub_metadata(ip, &scrub);
|
||||
error = xfs_scrub_metadata(file, &scrub);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -2023,7 +2024,7 @@ xfs_file_ioctl(
|
|||
return xfs_ioc_getfsmap(ip, arg);
|
||||
|
||||
case XFS_IOC_SCRUB_METADATA:
|
||||
return xfs_ioc_scrub_metadata(ip, arg);
|
||||
return xfs_ioc_scrub_metadata(filp, arg);
|
||||
|
||||
case XFS_IOC_FD_TO_HANDLE:
|
||||
case XFS_IOC_PATH_TO_HANDLE:
|
||||
|
|
|
@ -159,7 +159,7 @@ xfs_iomap_eof_align_last_fsb(
|
|||
struct xfs_bmbt_irec irec;
|
||||
struct xfs_iext_cursor icur;
|
||||
|
||||
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
|
||||
ASSERT(!xfs_need_iread_extents(ifp));
|
||||
|
||||
/*
|
||||
* Always round up the allocation request to the extent hint boundary.
|
||||
|
@ -198,6 +198,7 @@ xfs_iomap_write_direct(
|
|||
bool force = false;
|
||||
int error;
|
||||
int bmapi_flags = XFS_BMAPI_PREALLOC;
|
||||
int nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT;
|
||||
|
||||
ASSERT(count_fsb > 0);
|
||||
|
||||
|
@ -232,6 +233,7 @@ xfs_iomap_write_direct(
|
|||
bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
|
||||
if (imap->br_state == XFS_EXT_UNWRITTEN) {
|
||||
force = true;
|
||||
nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
|
||||
dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
|
||||
}
|
||||
}
|
||||
|
@ -241,8 +243,7 @@ xfs_iomap_write_direct(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
|
||||
XFS_IEXT_ADD_NOSPLIT_CNT);
|
||||
error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, nr_exts);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
|
@ -572,7 +573,7 @@ xfs_iomap_write_unwritten(
|
|||
i_size_write(inode, i_size);
|
||||
i_size = xfs_new_eof(ip, i_size);
|
||||
if (i_size) {
|
||||
ip->i_d.di_size = i_size;
|
||||
ip->i_disk_size = i_size;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
}
|
||||
|
||||
|
@ -666,7 +667,7 @@ xfs_ilock_for_iomap(
|
|||
* is an opencoded xfs_ilock_data_map_shared() call but with
|
||||
* non-blocking behaviour.
|
||||
*/
|
||||
if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
|
||||
if (xfs_need_iread_extents(&ip->i_df)) {
|
||||
if (flags & IOMAP_NOWAIT)
|
||||
return -EAGAIN;
|
||||
mode = XFS_ILOCK_EXCL;
|
||||
|
@ -893,11 +894,9 @@ xfs_buffered_write_iomap_begin(
|
|||
|
||||
XFS_STATS_INC(mp, xs_blk_mapw);
|
||||
|
||||
if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Search the data fork first to look up our source mapping. We
|
||||
|
@ -1208,11 +1207,9 @@ xfs_seek_iomap_begin(
|
|||
return -EIO;
|
||||
|
||||
lockmode = xfs_ilock_data_map_shared(ip);
|
||||
if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
|
||||
/*
|
||||
|
|
|
@ -127,6 +127,37 @@ xfs_cleanup_inode(
|
|||
xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to see if we are likely to need an extended attribute to be added to
|
||||
* the inode we are about to allocate. This allows the attribute fork to be
|
||||
* created during the inode allocation, reducing the number of transactions we
|
||||
* need to do in this fast path.
|
||||
*
|
||||
* The security checks are optimistic, but not guaranteed. The two LSMs that
|
||||
* require xattrs to be added here (selinux and smack) are also the only two
|
||||
* LSMs that add a sb->s_security structure to the superblock. Hence if security
|
||||
* is enabled and sb->s_security is set, we have a pretty good idea that we are
|
||||
* going to be asked to add a security xattr immediately after allocating the
|
||||
* xfs inode and instantiating the VFS inode.
|
||||
*/
|
||||
static inline bool
|
||||
xfs_create_need_xattr(
|
||||
struct inode *dir,
|
||||
struct posix_acl *default_acl,
|
||||
struct posix_acl *acl)
|
||||
{
|
||||
if (acl)
|
||||
return true;
|
||||
if (default_acl)
|
||||
return true;
|
||||
#if IS_ENABLED(CONFIG_SECURITY)
|
||||
if (dir->i_sb->s_security)
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
STATIC int
|
||||
xfs_generic_create(
|
||||
struct user_namespace *mnt_userns,
|
||||
|
@ -164,7 +195,8 @@ xfs_generic_create(
|
|||
|
||||
if (!tmpfile) {
|
||||
error = xfs_create(mnt_userns, XFS_I(dir), &name, mode, rdev,
|
||||
&ip);
|
||||
xfs_create_need_xattr(dir, default_acl, acl),
|
||||
&ip);
|
||||
} else {
|
||||
error = xfs_create_tmpfile(mnt_userns, XFS_I(dir), mode, &ip);
|
||||
}
|
||||
|
@ -488,7 +520,7 @@ xfs_vn_get_link_inline(
|
|||
struct xfs_inode *ip = XFS_I(inode);
|
||||
char *link;
|
||||
|
||||
ASSERT(ip->i_df.if_flags & XFS_IFINLINE);
|
||||
ASSERT(ip->i_df.if_format == XFS_DINODE_FMT_LOCAL);
|
||||
|
||||
/*
|
||||
* The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
|
||||
|
@ -563,13 +595,12 @@ xfs_vn_getattr(
|
|||
stat->atime = inode->i_atime;
|
||||
stat->mtime = inode->i_mtime;
|
||||
stat->ctime = inode->i_ctime;
|
||||
stat->blocks =
|
||||
XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
|
||||
stat->blocks = XFS_FSB_TO_BB(mp, ip->i_nblocks + ip->i_delayed_blks);
|
||||
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
if (request_mask & STATX_BTIME) {
|
||||
stat->result_mask |= STATX_BTIME;
|
||||
stat->btime = ip->i_d.di_crtime;
|
||||
stat->btime = ip->i_crtime;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -577,11 +608,11 @@ xfs_vn_getattr(
|
|||
* Note: If you add another clause to set an attribute flag, please
|
||||
* update attributes_mask below.
|
||||
*/
|
||||
if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
|
||||
if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
|
||||
stat->attributes |= STATX_ATTR_IMMUTABLE;
|
||||
if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
|
||||
if (ip->i_diflags & XFS_DIFLAG_APPEND)
|
||||
stat->attributes |= STATX_ATTR_APPEND;
|
||||
if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
|
||||
if (ip->i_diflags & XFS_DIFLAG_NODUMP)
|
||||
stat->attributes |= STATX_ATTR_NODUMP;
|
||||
|
||||
stat->attributes_mask |= (STATX_ATTR_IMMUTABLE |
|
||||
|
@ -706,7 +737,7 @@ xfs_setattr_nonsize(
|
|||
*/
|
||||
ASSERT(udqp == NULL);
|
||||
ASSERT(gdqp == NULL);
|
||||
error = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid,
|
||||
error = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_projid,
|
||||
qflags, &udqp, &gdqp, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -918,8 +949,8 @@ xfs_setattr_size(
|
|||
* operation.
|
||||
*
|
||||
* And we update in-core i_size and truncate page cache beyond newsize
|
||||
* before writeback the [di_size, newsize] range, so we're guaranteed
|
||||
* not to write stale data past the new EOF on truncate down.
|
||||
* before writeback the [i_disk_size, newsize] range, so we're
|
||||
* guaranteed not to write stale data past the new EOF on truncate down.
|
||||
*/
|
||||
truncate_setsize(inode, newsize);
|
||||
|
||||
|
@ -932,9 +963,9 @@ xfs_setattr_size(
|
|||
* otherwise those blocks may not be zeroed after a crash.
|
||||
*/
|
||||
if (did_zeroing ||
|
||||
(newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
|
||||
(newsize > ip->i_disk_size && oldsize != ip->i_disk_size)) {
|
||||
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
|
||||
ip->i_d.di_size, newsize - 1);
|
||||
ip->i_disk_size, newsize - 1);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
@ -976,7 +1007,7 @@ xfs_setattr_size(
|
|||
* permanent before actually freeing any blocks it doesn't matter if
|
||||
* they get written to.
|
||||
*/
|
||||
ip->i_d.di_size = newsize;
|
||||
ip->i_disk_size = newsize;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
if (newsize <= oldsize) {
|
||||
|
@ -1262,7 +1293,7 @@ xfs_inode_should_enable_dax(
|
|||
return false;
|
||||
if (ip->i_mount->m_flags & XFS_MOUNT_DAX_ALWAYS)
|
||||
return true;
|
||||
if (ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
|
||||
if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
@ -1319,7 +1350,7 @@ xfs_setup_inode(
|
|||
/* make the inode look hashed for the writeback code */
|
||||
inode_fake_hash(inode);
|
||||
|
||||
i_size_write(inode, ip->i_d.di_size);
|
||||
i_size_write(inode, ip->i_disk_size);
|
||||
xfs_diflags_to_iflags(ip, true);
|
||||
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
|
@ -1377,7 +1408,7 @@ xfs_setup_iops(
|
|||
inode->i_fop = &xfs_dir_file_operations;
|
||||
break;
|
||||
case S_IFLNK:
|
||||
if (ip->i_df.if_flags & XFS_IFINLINE)
|
||||
if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL)
|
||||
inode->i_op = &xfs_inline_symlink_inode_operations;
|
||||
else
|
||||
inode->i_op = &xfs_symlink_inode_operations;
|
||||
|
|
|
@ -60,7 +60,6 @@ xfs_bulkstat_one_int(
|
|||
struct xfs_bstat_chunk *bc)
|
||||
{
|
||||
struct user_namespace *sb_userns = mp->m_super->s_user_ns;
|
||||
struct xfs_icdinode *dic; /* dinode core info pointer */
|
||||
struct xfs_inode *ip; /* incore inode pointer */
|
||||
struct inode *inode;
|
||||
struct xfs_bulkstat *buf = bc->buf;
|
||||
|
@ -81,16 +80,14 @@ xfs_bulkstat_one_int(
|
|||
ASSERT(ip->i_imap.im_blkno != 0);
|
||||
inode = VFS_I(ip);
|
||||
|
||||
dic = &ip->i_d;
|
||||
|
||||
/* xfs_iget returns the following without needing
|
||||
* further change.
|
||||
*/
|
||||
buf->bs_projectid = ip->i_d.di_projid;
|
||||
buf->bs_projectid = ip->i_projid;
|
||||
buf->bs_ino = ino;
|
||||
buf->bs_uid = from_kuid(sb_userns, i_uid_into_mnt(mnt_userns, inode));
|
||||
buf->bs_gid = from_kgid(sb_userns, i_gid_into_mnt(mnt_userns, inode));
|
||||
buf->bs_size = dic->di_size;
|
||||
buf->bs_size = ip->i_disk_size;
|
||||
|
||||
buf->bs_nlink = inode->i_nlink;
|
||||
buf->bs_atime = inode->i_atime.tv_sec;
|
||||
|
@ -99,13 +96,11 @@ xfs_bulkstat_one_int(
|
|||
buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
|
||||
buf->bs_ctime = inode->i_ctime.tv_sec;
|
||||
buf->bs_ctime_nsec = inode->i_ctime.tv_nsec;
|
||||
buf->bs_btime = dic->di_crtime.tv_sec;
|
||||
buf->bs_btime_nsec = dic->di_crtime.tv_nsec;
|
||||
buf->bs_gen = inode->i_generation;
|
||||
buf->bs_mode = inode->i_mode;
|
||||
|
||||
buf->bs_xflags = xfs_ip2xflags(ip);
|
||||
buf->bs_extsize_blks = dic->di_extsize;
|
||||
buf->bs_extsize_blks = ip->i_extsize;
|
||||
buf->bs_extents = xfs_ifork_nextents(&ip->i_df);
|
||||
xfs_bulkstat_health(ip, buf);
|
||||
buf->bs_aextents = xfs_ifork_nextents(ip->i_afp);
|
||||
|
@ -113,8 +108,10 @@ xfs_bulkstat_one_int(
|
|||
buf->bs_version = XFS_BULKSTAT_VERSION_V5;
|
||||
|
||||
if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
|
||||
if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
|
||||
buf->bs_cowextsize_blks = dic->di_cowextsize;
|
||||
buf->bs_btime = ip->i_crtime.tv_sec;
|
||||
buf->bs_btime_nsec = ip->i_crtime.tv_nsec;
|
||||
if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
|
||||
buf->bs_cowextsize_blks = ip->i_cowextsize;
|
||||
}
|
||||
|
||||
switch (ip->i_df.if_format) {
|
||||
|
@ -132,7 +129,7 @@ xfs_bulkstat_one_int(
|
|||
case XFS_DINODE_FMT_BTREE:
|
||||
buf->bs_rdev = 0;
|
||||
buf->bs_blksize = mp->m_sb.sb_blocksize;
|
||||
buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
|
||||
buf->bs_blocks = ip->i_nblocks + ip->i_delayed_blks;
|
||||
break;
|
||||
}
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
|
|
|
@ -233,7 +233,7 @@ int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
|
|||
* configured realtime device.
|
||||
*/
|
||||
#define XFS_IS_REALTIME_INODE(ip) \
|
||||
(((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
|
||||
(((ip)->i_diflags & XFS_DIFLAG_REALTIME) && \
|
||||
(ip)->i_mount->m_rtdev_targp)
|
||||
#define XFS_IS_REALTIME_MOUNT(mp) ((mp)->m_rtdev_targp ? 1 : 0)
|
||||
#else
|
||||
|
|
|
@ -2683,9 +2683,10 @@ xlog_recover_process_one_iunlink(
|
|||
/*
|
||||
* Get the on disk inode to find the next inode in the bucket.
|
||||
*/
|
||||
error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0);
|
||||
error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &ibp);
|
||||
if (error)
|
||||
goto fail_iput;
|
||||
dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
|
||||
|
||||
xfs_iflags_clear(ip, XFS_IRECOVERY);
|
||||
ASSERT(VFS_I(ip)->i_nlink == 0);
|
||||
|
@ -2695,12 +2696,6 @@ xlog_recover_process_one_iunlink(
|
|||
agino = be32_to_cpu(dip->di_next_unlinked);
|
||||
xfs_buf_relse(ibp);
|
||||
|
||||
/*
|
||||
* Prevent any DMAPI event from being sent when the reference on
|
||||
* the inode is dropped.
|
||||
*/
|
||||
ip->i_d.di_dmevmask = 0;
|
||||
|
||||
xfs_irele(ip);
|
||||
return agino;
|
||||
|
||||
|
@ -2736,7 +2731,7 @@ xlog_recover_process_one_iunlink(
|
|||
* of log space.
|
||||
*
|
||||
* This behaviour is bad for latency on single CPU and non-preemptible kernels,
|
||||
* and can prevent other filesytem work (such as CIL pushes) from running. This
|
||||
* and can prevent other filesystem work (such as CIL pushes) from running. This
|
||||
* can lead to deadlocks if the recovery process runs out of log reservation
|
||||
* space. Hence we need to yield the CPU when there is other kernel work
|
||||
* scheduled on this CPU to ensure other scheduled work can run without undue
|
||||
|
@ -3404,7 +3399,7 @@ xlog_recover(
|
|||
|
||||
/*
|
||||
* Delay log recovery if the debug hook is set. This is debug
|
||||
* instrumention to coordinate simulation of I/O failures with
|
||||
* instrumentation to coordinate simulation of I/O failures with
|
||||
* log recovery.
|
||||
*/
|
||||
if (xfs_globals.log_recovery_delay) {
|
||||
|
|
|
@ -675,6 +675,18 @@ xfs_unmount_flush_inodes(
|
|||
xfs_health_unmount(mp);
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_mount_setup_inode_geom(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
struct xfs_ino_geometry *igeo = M_IGEO(mp);
|
||||
|
||||
igeo->attr_fork_offset = xfs_bmap_compute_attr_offset(mp);
|
||||
ASSERT(igeo->attr_fork_offset < XFS_LITINO(mp));
|
||||
|
||||
xfs_ialloc_setup_geometry(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function does the following on an initial mount of a file system:
|
||||
* - reads the superblock from disk and init the mount struct
|
||||
|
@ -758,7 +770,7 @@ xfs_mountfs(
|
|||
xfs_alloc_compute_maxlevels(mp);
|
||||
xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
|
||||
xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
|
||||
xfs_ialloc_setup_geometry(mp);
|
||||
xfs_mount_setup_inode_geom(mp);
|
||||
xfs_rmapbt_compute_maxlevels(mp);
|
||||
xfs_refcountbt_compute_maxlevels(mp);
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ typedef struct xfs_mount {
|
|||
struct workqueue_struct *m_unwritten_workqueue;
|
||||
struct workqueue_struct *m_cil_workqueue;
|
||||
struct workqueue_struct *m_reclaim_workqueue;
|
||||
struct workqueue_struct *m_blockgc_workqueue;
|
||||
struct workqueue_struct *m_gc_workqueue;
|
||||
struct workqueue_struct *m_sync_workqueue;
|
||||
|
||||
int m_bsize; /* fs logical block size */
|
||||
|
|
|
@ -286,7 +286,7 @@ xfs_fs_commit_blocks(
|
|||
xfs_setattr_time(ip, iattr);
|
||||
if (update_isize) {
|
||||
i_size_write(inode, iattr->ia_size);
|
||||
ip->i_d.di_size = iattr->ia_size;
|
||||
ip->i_disk_size = iattr->ia_size;
|
||||
}
|
||||
|
||||
xfs_trans_set_sync(tp);
|
||||
|
|
|
@ -788,7 +788,7 @@ xfs_qm_qino_alloc(
|
|||
|
||||
if (need_alloc) {
|
||||
error = xfs_dir_ialloc(&init_user_ns, &tp, NULL, S_IFREG, 1, 0,
|
||||
0, ipp);
|
||||
0, false, ipp);
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp);
|
||||
return error;
|
||||
|
@ -992,7 +992,7 @@ xfs_qm_reset_dqcounts_buf(
|
|||
* trans_reserve. But, this gets called during quotacheck, and that
|
||||
* happens only at mount time which is single threaded.
|
||||
*/
|
||||
if (qip->i_d.di_nblocks == 0)
|
||||
if (qip->i_nblocks == 0)
|
||||
return 0;
|
||||
|
||||
map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
|
||||
|
@ -1165,16 +1165,14 @@ xfs_qm_dqusage_adjust(
|
|||
if (XFS_IS_REALTIME_INODE(ip)) {
|
||||
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto error0;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto error0;
|
||||
|
||||
xfs_bmap_count_leaves(ifp, &rtblks);
|
||||
}
|
||||
|
||||
nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
|
||||
nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
|
||||
|
||||
/*
|
||||
* Add the (disk blocks and inode) resources occupied by this
|
||||
|
@ -1716,7 +1714,7 @@ xfs_qm_vop_dqalloc(
|
|||
}
|
||||
if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
|
||||
ASSERT(O_pdqpp);
|
||||
if (ip->i_d.di_projid != prid) {
|
||||
if (ip->i_projid != prid) {
|
||||
xfs_iunlock(ip, lockflags);
|
||||
error = xfs_qm_dqget(mp, prid,
|
||||
XFS_DQTYPE_PROJ, true, &pq);
|
||||
|
@ -1779,11 +1777,11 @@ xfs_qm_vop_chown(
|
|||
ASSERT(prevdq);
|
||||
ASSERT(prevdq != newdq);
|
||||
|
||||
xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
|
||||
xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
|
||||
xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
|
||||
|
||||
/* the sparkling new dquot */
|
||||
xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
|
||||
xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
|
||||
xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
|
||||
|
||||
/*
|
||||
|
@ -1877,7 +1875,7 @@ xfs_qm_vop_create_dqattach(
|
|||
}
|
||||
if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
|
||||
ASSERT(ip->i_pdquot == NULL);
|
||||
ASSERT(ip->i_d.di_projid == pdqp->q_id);
|
||||
ASSERT(ip->i_projid == pdqp->q_id);
|
||||
|
||||
ip->i_pdquot = xfs_qm_dqhold(pdqp);
|
||||
xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
|
||||
|
|
|
@ -60,7 +60,7 @@ xfs_qm_statvfs(
|
|||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_dquot *dqp;
|
||||
|
||||
if (!xfs_qm_dqget(mp, ip->i_d.di_projid, XFS_DQTYPE_PROJ, false, &dqp)) {
|
||||
if (!xfs_qm_dqget(mp, ip->i_projid, XFS_DQTYPE_PROJ, false, &dqp)) {
|
||||
xfs_fill_statvfs_from_dquot(statp, dqp);
|
||||
xfs_qm_dqput(dqp);
|
||||
}
|
||||
|
|
|
@ -293,7 +293,7 @@ xfs_qm_scall_trunc_qfile(
|
|||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
ip->i_d.di_size = 0;
|
||||
ip->i_disk_size = 0;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
|
||||
|
|
|
@ -35,7 +35,7 @@ xfs_qm_fill_state(
|
|||
tempqip = true;
|
||||
}
|
||||
tstate->flags |= QCI_SYSFILE;
|
||||
tstate->blocks = ip->i_d.di_nblocks;
|
||||
tstate->blocks = ip->i_nblocks;
|
||||
tstate->nextents = ip->i_df.if_nextents;
|
||||
tstate->spc_timelimit = (u32)defq->blk.time;
|
||||
tstate->ino_timelimit = (u32)defq->ino.time;
|
||||
|
|
|
@ -874,7 +874,7 @@ xfs_reflink_set_inode_flag(
|
|||
if (!xfs_is_reflink_inode(src)) {
|
||||
trace_xfs_reflink_set_inode_flag(src);
|
||||
xfs_trans_ijoin(tp, src, XFS_ILOCK_EXCL);
|
||||
src->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
|
||||
src->i_diflags2 |= XFS_DIFLAG2_REFLINK;
|
||||
xfs_trans_log_inode(tp, src, XFS_ILOG_CORE);
|
||||
xfs_ifork_init_cow(src);
|
||||
} else
|
||||
|
@ -886,7 +886,7 @@ xfs_reflink_set_inode_flag(
|
|||
if (!xfs_is_reflink_inode(dest)) {
|
||||
trace_xfs_reflink_set_inode_flag(dest);
|
||||
xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
|
||||
dest->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
|
||||
dest->i_diflags2 |= XFS_DIFLAG2_REFLINK;
|
||||
xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
|
||||
xfs_ifork_init_cow(dest);
|
||||
} else
|
||||
|
@ -930,12 +930,12 @@ xfs_reflink_update_dest(
|
|||
if (newlen > i_size_read(VFS_I(dest))) {
|
||||
trace_xfs_reflink_update_inode_size(dest, newlen);
|
||||
i_size_write(VFS_I(dest), newlen);
|
||||
dest->i_d.di_size = newlen;
|
||||
dest->i_disk_size = newlen;
|
||||
}
|
||||
|
||||
if (cowextsize) {
|
||||
dest->i_d.di_cowextsize = cowextsize;
|
||||
dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
|
||||
dest->i_cowextsize = cowextsize;
|
||||
dest->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
|
||||
}
|
||||
|
||||
xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
|
||||
|
@ -1156,7 +1156,7 @@ xfs_reflink_remap_extent(
|
|||
if (newlen > i_size_read(VFS_I(ip))) {
|
||||
trace_xfs_reflink_update_inode_size(ip, newlen);
|
||||
i_size_write(VFS_I(ip), newlen);
|
||||
ip->i_d.di_size = newlen;
|
||||
ip->i_disk_size = newlen;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
}
|
||||
|
||||
|
@ -1392,11 +1392,9 @@ xfs_reflink_inode_has_shared_extents(
|
|||
int error;
|
||||
|
||||
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
*has_shared = false;
|
||||
found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got);
|
||||
|
@ -1455,7 +1453,7 @@ xfs_reflink_clear_inode_flag(
|
|||
|
||||
/* Clear the inode flag. */
|
||||
trace_xfs_reflink_unset_inode_flag(ip);
|
||||
ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
|
||||
xfs_inode_clear_cowblocks_tag(ip);
|
||||
xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
|
||||
|
||||
|
|
|
@ -966,8 +966,8 @@ xfs_growfs_rt(
|
|||
* Get the old block counts for bitmap and summary inodes.
|
||||
* These can't change since other growfs callers are locked out.
|
||||
*/
|
||||
rbmblocks = XFS_B_TO_FSB(mp, mp->m_rbmip->i_d.di_size);
|
||||
rsumblocks = XFS_B_TO_FSB(mp, mp->m_rsumip->i_d.di_size);
|
||||
rbmblocks = XFS_B_TO_FSB(mp, mp->m_rbmip->i_disk_size);
|
||||
rsumblocks = XFS_B_TO_FSB(mp, mp->m_rsumip->i_disk_size);
|
||||
/*
|
||||
* Allocate space to the bitmap and summary files, as necessary.
|
||||
*/
|
||||
|
@ -1036,9 +1036,9 @@ xfs_growfs_rt(
|
|||
* to update the incore size so that inode inactivation won't
|
||||
* punch what it thinks are "posteof" blocks.
|
||||
*/
|
||||
mp->m_rbmip->i_d.di_size =
|
||||
mp->m_rbmip->i_disk_size =
|
||||
nsbp->sb_rbmblocks * nsbp->sb_blocksize;
|
||||
i_size_write(VFS_I(mp->m_rbmip), mp->m_rbmip->i_d.di_size);
|
||||
i_size_write(VFS_I(mp->m_rbmip), mp->m_rbmip->i_disk_size);
|
||||
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
|
||||
/*
|
||||
* Get the summary inode into the transaction.
|
||||
|
@ -1050,8 +1050,8 @@ xfs_growfs_rt(
|
|||
* incore size so that inode inactivation won't punch what it
|
||||
* thinks are "posteof" blocks.
|
||||
*/
|
||||
mp->m_rsumip->i_d.di_size = nmp->m_rsumsize;
|
||||
i_size_write(VFS_I(mp->m_rsumip), mp->m_rsumip->i_d.di_size);
|
||||
mp->m_rsumip->i_disk_size = nmp->m_rsumsize;
|
||||
i_size_write(VFS_I(mp->m_rsumip), mp->m_rsumip->i_disk_size);
|
||||
xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE);
|
||||
/*
|
||||
* Copy summary data from old to new sizes.
|
||||
|
@ -1318,8 +1318,8 @@ xfs_rtpick_extent(
|
|||
ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
|
||||
|
||||
seqp = (uint64_t *)&VFS_I(mp->m_rbmip)->i_atime;
|
||||
if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) {
|
||||
mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
|
||||
if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
|
||||
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
|
||||
*seqp = 0;
|
||||
}
|
||||
seq = *seqp;
|
||||
|
|
|
@ -519,10 +519,10 @@ xfs_init_mount_workqueues(
|
|||
if (!mp->m_reclaim_workqueue)
|
||||
goto out_destroy_cil;
|
||||
|
||||
mp->m_blockgc_workqueue = alloc_workqueue("xfs-blockgc/%s",
|
||||
mp->m_gc_workqueue = alloc_workqueue("xfs-gc/%s",
|
||||
WQ_SYSFS | WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM,
|
||||
0, mp->m_super->s_id);
|
||||
if (!mp->m_blockgc_workqueue)
|
||||
if (!mp->m_gc_workqueue)
|
||||
goto out_destroy_reclaim;
|
||||
|
||||
mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
|
||||
|
@ -533,7 +533,7 @@ xfs_init_mount_workqueues(
|
|||
return 0;
|
||||
|
||||
out_destroy_eofb:
|
||||
destroy_workqueue(mp->m_blockgc_workqueue);
|
||||
destroy_workqueue(mp->m_gc_workqueue);
|
||||
out_destroy_reclaim:
|
||||
destroy_workqueue(mp->m_reclaim_workqueue);
|
||||
out_destroy_cil:
|
||||
|
@ -551,7 +551,7 @@ xfs_destroy_mount_workqueues(
|
|||
struct xfs_mount *mp)
|
||||
{
|
||||
destroy_workqueue(mp->m_sync_workqueue);
|
||||
destroy_workqueue(mp->m_blockgc_workqueue);
|
||||
destroy_workqueue(mp->m_gc_workqueue);
|
||||
destroy_workqueue(mp->m_reclaim_workqueue);
|
||||
destroy_workqueue(mp->m_cil_workqueue);
|
||||
destroy_workqueue(mp->m_unwritten_workqueue);
|
||||
|
@ -834,13 +834,13 @@ xfs_fs_statfs(
|
|||
statp->f_ffree = max_t(int64_t, ffree, 0);
|
||||
|
||||
|
||||
if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
|
||||
if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
|
||||
((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
|
||||
(XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
|
||||
xfs_qm_statvfs(ip, statp);
|
||||
|
||||
if (XFS_IS_REALTIME_MOUNT(mp) &&
|
||||
(ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
|
||||
(ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
|
||||
statp->f_blocks = sbp->sb_rblocks;
|
||||
statp->f_bavail = statp->f_bfree =
|
||||
sbp->sb_frextents * sbp->sb_rextsize;
|
||||
|
@ -1126,6 +1126,22 @@ suffix_kstrtoint(
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_fs_warn_deprecated(
|
||||
struct fs_context *fc,
|
||||
struct fs_parameter *param,
|
||||
uint64_t flag,
|
||||
bool value)
|
||||
{
|
||||
/* Don't print the warning if reconfiguring and current mount point
|
||||
* already had the flag set
|
||||
*/
|
||||
if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
|
||||
!!(XFS_M(fc->root->d_sb)->m_flags & flag) == value)
|
||||
return;
|
||||
xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set mount state from a mount option.
|
||||
*
|
||||
|
@ -1136,7 +1152,7 @@ xfs_fs_parse_param(
|
|||
struct fs_context *fc,
|
||||
struct fs_parameter *param)
|
||||
{
|
||||
struct xfs_mount *mp = fc->s_fs_info;
|
||||
struct xfs_mount *parsing_mp = fc->s_fs_info;
|
||||
struct fs_parse_result result;
|
||||
int size = 0;
|
||||
int opt;
|
||||
|
@ -1147,142 +1163,142 @@ xfs_fs_parse_param(
|
|||
|
||||
switch (opt) {
|
||||
case Opt_logbufs:
|
||||
mp->m_logbufs = result.uint_32;
|
||||
parsing_mp->m_logbufs = result.uint_32;
|
||||
return 0;
|
||||
case Opt_logbsize:
|
||||
if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
|
||||
if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
case Opt_logdev:
|
||||
kfree(mp->m_logname);
|
||||
mp->m_logname = kstrdup(param->string, GFP_KERNEL);
|
||||
if (!mp->m_logname)
|
||||
kfree(parsing_mp->m_logname);
|
||||
parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
|
||||
if (!parsing_mp->m_logname)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
case Opt_rtdev:
|
||||
kfree(mp->m_rtname);
|
||||
mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
|
||||
if (!mp->m_rtname)
|
||||
kfree(parsing_mp->m_rtname);
|
||||
parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
|
||||
if (!parsing_mp->m_rtname)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
case Opt_allocsize:
|
||||
if (suffix_kstrtoint(param->string, 10, &size))
|
||||
return -EINVAL;
|
||||
mp->m_allocsize_log = ffs(size) - 1;
|
||||
mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
|
||||
parsing_mp->m_allocsize_log = ffs(size) - 1;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
|
||||
return 0;
|
||||
case Opt_grpid:
|
||||
case Opt_bsdgroups:
|
||||
mp->m_flags |= XFS_MOUNT_GRPID;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_GRPID;
|
||||
return 0;
|
||||
case Opt_nogrpid:
|
||||
case Opt_sysvgroups:
|
||||
mp->m_flags &= ~XFS_MOUNT_GRPID;
|
||||
parsing_mp->m_flags &= ~XFS_MOUNT_GRPID;
|
||||
return 0;
|
||||
case Opt_wsync:
|
||||
mp->m_flags |= XFS_MOUNT_WSYNC;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_WSYNC;
|
||||
return 0;
|
||||
case Opt_norecovery:
|
||||
mp->m_flags |= XFS_MOUNT_NORECOVERY;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY;
|
||||
return 0;
|
||||
case Opt_noalign:
|
||||
mp->m_flags |= XFS_MOUNT_NOALIGN;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_NOALIGN;
|
||||
return 0;
|
||||
case Opt_swalloc:
|
||||
mp->m_flags |= XFS_MOUNT_SWALLOC;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_SWALLOC;
|
||||
return 0;
|
||||
case Opt_sunit:
|
||||
mp->m_dalign = result.uint_32;
|
||||
parsing_mp->m_dalign = result.uint_32;
|
||||
return 0;
|
||||
case Opt_swidth:
|
||||
mp->m_swidth = result.uint_32;
|
||||
parsing_mp->m_swidth = result.uint_32;
|
||||
return 0;
|
||||
case Opt_inode32:
|
||||
mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
|
||||
return 0;
|
||||
case Opt_inode64:
|
||||
mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
|
||||
parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
|
||||
return 0;
|
||||
case Opt_nouuid:
|
||||
mp->m_flags |= XFS_MOUNT_NOUUID;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_NOUUID;
|
||||
return 0;
|
||||
case Opt_largeio:
|
||||
mp->m_flags |= XFS_MOUNT_LARGEIO;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_LARGEIO;
|
||||
return 0;
|
||||
case Opt_nolargeio:
|
||||
mp->m_flags &= ~XFS_MOUNT_LARGEIO;
|
||||
parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO;
|
||||
return 0;
|
||||
case Opt_filestreams:
|
||||
mp->m_flags |= XFS_MOUNT_FILESTREAMS;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS;
|
||||
return 0;
|
||||
case Opt_noquota:
|
||||
mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
|
||||
mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
|
||||
mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
|
||||
parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
|
||||
parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
|
||||
parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
|
||||
return 0;
|
||||
case Opt_quota:
|
||||
case Opt_uquota:
|
||||
case Opt_usrquota:
|
||||
mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
|
||||
parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
|
||||
XFS_UQUOTA_ENFD);
|
||||
return 0;
|
||||
case Opt_qnoenforce:
|
||||
case Opt_uqnoenforce:
|
||||
mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
|
||||
mp->m_qflags &= ~XFS_UQUOTA_ENFD;
|
||||
parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
|
||||
parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
|
||||
return 0;
|
||||
case Opt_pquota:
|
||||
case Opt_prjquota:
|
||||
mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
|
||||
parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
|
||||
XFS_PQUOTA_ENFD);
|
||||
return 0;
|
||||
case Opt_pqnoenforce:
|
||||
mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
|
||||
mp->m_qflags &= ~XFS_PQUOTA_ENFD;
|
||||
parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
|
||||
parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
|
||||
return 0;
|
||||
case Opt_gquota:
|
||||
case Opt_grpquota:
|
||||
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
|
||||
parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
|
||||
XFS_GQUOTA_ENFD);
|
||||
return 0;
|
||||
case Opt_gqnoenforce:
|
||||
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
|
||||
mp->m_qflags &= ~XFS_GQUOTA_ENFD;
|
||||
parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
|
||||
parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
|
||||
return 0;
|
||||
case Opt_discard:
|
||||
mp->m_flags |= XFS_MOUNT_DISCARD;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_DISCARD;
|
||||
return 0;
|
||||
case Opt_nodiscard:
|
||||
mp->m_flags &= ~XFS_MOUNT_DISCARD;
|
||||
parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD;
|
||||
return 0;
|
||||
#ifdef CONFIG_FS_DAX
|
||||
case Opt_dax:
|
||||
xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS);
|
||||
xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
|
||||
return 0;
|
||||
case Opt_dax_enum:
|
||||
xfs_mount_set_dax_mode(mp, result.uint_32);
|
||||
xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
|
||||
return 0;
|
||||
#endif
|
||||
/* Following mount options will be removed in September 2025 */
|
||||
case Opt_ikeep:
|
||||
xfs_warn(mp, "%s mount option is deprecated.", param->key);
|
||||
mp->m_flags |= XFS_MOUNT_IKEEP;
|
||||
xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true);
|
||||
parsing_mp->m_flags |= XFS_MOUNT_IKEEP;
|
||||
return 0;
|
||||
case Opt_noikeep:
|
||||
xfs_warn(mp, "%s mount option is deprecated.", param->key);
|
||||
mp->m_flags &= ~XFS_MOUNT_IKEEP;
|
||||
xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false);
|
||||
parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP;
|
||||
return 0;
|
||||
case Opt_attr2:
|
||||
xfs_warn(mp, "%s mount option is deprecated.", param->key);
|
||||
mp->m_flags |= XFS_MOUNT_ATTR2;
|
||||
xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true);
|
||||
parsing_mp->m_flags |= XFS_MOUNT_ATTR2;
|
||||
return 0;
|
||||
case Opt_noattr2:
|
||||
xfs_warn(mp, "%s mount option is deprecated.", param->key);
|
||||
mp->m_flags &= ~XFS_MOUNT_ATTR2;
|
||||
mp->m_flags |= XFS_MOUNT_NOATTR2;
|
||||
xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true);
|
||||
parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2;
|
||||
parsing_mp->m_flags |= XFS_MOUNT_NOATTR2;
|
||||
return 0;
|
||||
default:
|
||||
xfs_warn(mp, "unknown mount option [%s].", param->key);
|
||||
xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1918,7 +1934,7 @@ xfs_init_zones(void)
|
|||
if (!xfs_ifork_zone)
|
||||
goto out_destroy_da_state_zone;
|
||||
|
||||
xfs_trans_zone = kmem_cache_create("xf_trans",
|
||||
xfs_trans_zone = kmem_cache_create("xfs_trans",
|
||||
sizeof(struct xfs_trans),
|
||||
0, 0, NULL);
|
||||
if (!xfs_trans_zone)
|
||||
|
|
|
@ -86,7 +86,6 @@ struct xfs_mount;
|
|||
struct xfs_buftarg;
|
||||
struct block_device;
|
||||
|
||||
extern void xfs_quiesce_attr(struct xfs_mount *mp);
|
||||
extern void xfs_flush_inodes(struct xfs_mount *mp);
|
||||
extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
|
||||
extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *,
|
||||
|
|
|
@ -33,7 +33,7 @@ xfs_readlink_bmap_ilocked(
|
|||
struct xfs_buf *bp;
|
||||
xfs_daddr_t d;
|
||||
char *cur_chunk;
|
||||
int pathlen = ip->i_d.di_size;
|
||||
int pathlen = ip->i_disk_size;
|
||||
int nmaps = XFS_SYMLINK_MAPS;
|
||||
int byte_cnt;
|
||||
int n;
|
||||
|
@ -86,7 +86,7 @@ xfs_readlink_bmap_ilocked(
|
|||
}
|
||||
ASSERT(pathlen == 0);
|
||||
|
||||
link[ip->i_d.di_size] = '\0';
|
||||
link[ip->i_disk_size] = '\0';
|
||||
error = 0;
|
||||
|
||||
out:
|
||||
|
@ -104,14 +104,14 @@ xfs_readlink(
|
|||
|
||||
trace_xfs_readlink(ip);
|
||||
|
||||
ASSERT(!(ip->i_df.if_flags & XFS_IFINLINE));
|
||||
ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_LOCAL);
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return -EIO;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
|
||||
pathlen = ip->i_d.di_size;
|
||||
pathlen = ip->i_disk_size;
|
||||
if (!pathlen)
|
||||
goto out;
|
||||
|
||||
|
@ -210,7 +210,7 @@ xfs_symlink(
|
|||
/*
|
||||
* Check whether the directory allows new symlinks or not.
|
||||
*/
|
||||
if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
|
||||
if (dp->i_diflags & XFS_DIFLAG_NOSYMLINKS) {
|
||||
error = -EPERM;
|
||||
goto out_trans_cancel;
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ xfs_symlink(
|
|||
* Allocate an inode for the symlink.
|
||||
*/
|
||||
error = xfs_dir_ialloc(mnt_userns, &tp, dp, S_IFLNK | (mode & ~S_IFMT),
|
||||
1, 0, prid, &ip);
|
||||
1, 0, prid, false, &ip);
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
|
||||
|
@ -250,7 +250,7 @@ xfs_symlink(
|
|||
if (pathlen <= XFS_IFORK_DSIZE(ip)) {
|
||||
xfs_init_local_fork(ip, XFS_DATA_FORK, target_path, pathlen);
|
||||
|
||||
ip->i_d.di_size = pathlen;
|
||||
ip->i_disk_size = pathlen;
|
||||
ip->i_df.if_format = XFS_DINODE_FMT_LOCAL;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);
|
||||
} else {
|
||||
|
@ -265,7 +265,7 @@ xfs_symlink(
|
|||
goto out_trans_cancel;
|
||||
|
||||
resblks -= fs_blocks;
|
||||
ip->i_d.di_size = pathlen;
|
||||
ip->i_disk_size = pathlen;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
cur_chunk = target_path;
|
||||
|
@ -300,7 +300,7 @@ xfs_symlink(
|
|||
}
|
||||
ASSERT(pathlen == 0);
|
||||
}
|
||||
i_size_write(VFS_I(ip), ip->i_d.di_size);
|
||||
i_size_write(VFS_I(ip), ip->i_disk_size);
|
||||
|
||||
/*
|
||||
* Create the directory entry for the symlink.
|
||||
|
@ -377,7 +377,7 @@ xfs_inactive_symlink_rmt(
|
|||
xfs_trans_t *tp;
|
||||
|
||||
mp = ip->i_mount;
|
||||
ASSERT(ip->i_df.if_flags & XFS_IFEXTENTS);
|
||||
ASSERT(!xfs_need_iread_extents(&ip->i_df));
|
||||
/*
|
||||
* We're freeing a symlink that has some
|
||||
* blocks allocated to it. Free the
|
||||
|
@ -400,8 +400,8 @@ xfs_inactive_symlink_rmt(
|
|||
* locked for the second transaction. In the error paths we need it
|
||||
* held so the cancel won't rele it, see below.
|
||||
*/
|
||||
size = (int)ip->i_d.di_size;
|
||||
ip->i_d.di_size = 0;
|
||||
size = (int)ip->i_disk_size;
|
||||
ip->i_disk_size = 0;
|
||||
VFS_I(ip)->i_mode = (VFS_I(ip)->i_mode & ~S_IFMT) | S_IFREG;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
/*
|
||||
|
@ -477,7 +477,7 @@ xfs_inactive_symlink(
|
|||
return -EIO;
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
pathlen = (int)ip->i_d.di_size;
|
||||
pathlen = (int)ip->i_disk_size;
|
||||
ASSERT(pathlen);
|
||||
|
||||
if (pathlen <= 0 || pathlen > XFS_SYMLINK_MAXLEN) {
|
||||
|
@ -492,7 +492,7 @@ xfs_inactive_symlink(
|
|||
* Inline fork state gets removed by xfs_difree() so we have nothing to
|
||||
* do here in that case.
|
||||
*/
|
||||
if (ip->i_df.if_flags & XFS_IFINLINE) {
|
||||
if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1298,7 +1298,7 @@ DECLARE_EVENT_CLASS(xfs_file_class,
|
|||
TP_fast_assign(
|
||||
__entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
|
||||
__entry->ino = XFS_I(file_inode(iocb->ki_filp))->i_ino;
|
||||
__entry->size = XFS_I(file_inode(iocb->ki_filp))->i_d.di_size;
|
||||
__entry->size = XFS_I(file_inode(iocb->ki_filp))->i_disk_size;
|
||||
__entry->offset = iocb->ki_pos;
|
||||
__entry->count = iov_iter_count(iter);
|
||||
),
|
||||
|
@ -1341,7 +1341,7 @@ DECLARE_EVENT_CLASS(xfs_imap_class,
|
|||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ip)->i_sb->s_dev;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->size = ip->i_d.di_size;
|
||||
__entry->size = ip->i_disk_size;
|
||||
__entry->offset = offset;
|
||||
__entry->count = count;
|
||||
__entry->whichfork = whichfork;
|
||||
|
@ -1387,7 +1387,7 @@ DECLARE_EVENT_CLASS(xfs_simple_io_class,
|
|||
__entry->dev = VFS_I(ip)->i_sb->s_dev;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->isize = VFS_I(ip)->i_size;
|
||||
__entry->disize = ip->i_d.di_size;
|
||||
__entry->disize = ip->i_disk_size;
|
||||
__entry->offset = offset;
|
||||
__entry->count = count;
|
||||
),
|
||||
|
@ -1425,7 +1425,7 @@ DECLARE_EVENT_CLASS(xfs_itrunc_class,
|
|||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ip)->i_sb->s_dev;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->size = ip->i_d.di_size;
|
||||
__entry->size = ip->i_disk_size;
|
||||
__entry->new_size = new_size;
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx",
|
||||
|
@ -1455,7 +1455,7 @@ TRACE_EVENT(xfs_pagecache_inval,
|
|||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ip)->i_sb->s_dev;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->size = ip->i_d.di_size;
|
||||
__entry->size = ip->i_disk_size;
|
||||
__entry->start = start;
|
||||
__entry->finish = finish;
|
||||
),
|
||||
|
@ -1483,7 +1483,7 @@ TRACE_EVENT(xfs_bunmap,
|
|||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ip)->i_sb->s_dev;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->size = ip->i_d.di_size;
|
||||
__entry->size = ip->i_disk_size;
|
||||
__entry->bno = bno;
|
||||
__entry->len = len;
|
||||
__entry->caller_ip = caller_ip;
|
||||
|
@ -3145,12 +3145,12 @@ DECLARE_EVENT_CLASS(xfs_double_io_class,
|
|||
__entry->dev = VFS_I(src)->i_sb->s_dev;
|
||||
__entry->src_ino = src->i_ino;
|
||||
__entry->src_isize = VFS_I(src)->i_size;
|
||||
__entry->src_disize = src->i_d.di_size;
|
||||
__entry->src_disize = src->i_disk_size;
|
||||
__entry->src_offset = soffset;
|
||||
__entry->len = len;
|
||||
__entry->dest_ino = dest->i_ino;
|
||||
__entry->dest_isize = VFS_I(dest)->i_size;
|
||||
__entry->dest_disize = dest->i_d.di_size;
|
||||
__entry->dest_disize = dest->i_disk_size;
|
||||
__entry->dest_offset = doffset;
|
||||
),
|
||||
TP_printk("dev %d:%d count %zd "
|
||||
|
|
|
@ -436,7 +436,6 @@ xfs_trans_mod_sb(
|
|||
tp->t_res_frextents_delta += delta;
|
||||
break;
|
||||
case XFS_TRANS_SB_DBLOCKS:
|
||||
ASSERT(delta > 0);
|
||||
tp->t_dblocks_delta += delta;
|
||||
break;
|
||||
case XFS_TRANS_SB_AGCOUNT:
|
||||
|
@ -618,19 +617,12 @@ xfs_trans_unreserve_and_mod_sb(
|
|||
ASSERT(!error);
|
||||
}
|
||||
|
||||
if (idelta) {
|
||||
if (idelta)
|
||||
percpu_counter_add_batch(&mp->m_icount, idelta,
|
||||
XFS_ICOUNT_BATCH);
|
||||
if (idelta < 0)
|
||||
ASSERT(__percpu_counter_compare(&mp->m_icount, 0,
|
||||
XFS_ICOUNT_BATCH) >= 0);
|
||||
}
|
||||
|
||||
if (ifreedelta) {
|
||||
if (ifreedelta)
|
||||
percpu_counter_add(&mp->m_ifree, ifreedelta);
|
||||
if (ifreedelta < 0)
|
||||
ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0);
|
||||
}
|
||||
|
||||
if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
|
||||
return;
|
||||
|
@ -1197,7 +1189,7 @@ xfs_trans_alloc_ichange(
|
|||
* though that part is only semi-transactional.
|
||||
*/
|
||||
error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
|
||||
pdqp, ip->i_d.di_nblocks + ip->i_delayed_blks,
|
||||
pdqp, ip->i_nblocks + ip->i_delayed_blks,
|
||||
1, qflags);
|
||||
if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
|
||||
xfs_trans_cancel(tp);
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#include "xfs_format.h"
|
||||
#include "xfs_log_format.h"
|
||||
#include "xfs_da_format.h"
|
||||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_acl.h"
|
||||
|
|
Loading…
Reference in New Issue