mirror of https://gitee.com/openkylin/linux.git
Changes for 4.13:
- Avoid quotacheck deadlocks - Fix transaction overflows when bunmapping fragmented files - Refactor directory readahead - Allow admin to configure if ASSERT is fatal - Improve transaction usage detail logging during overflows - Minor cleanups - Don't leak log items when the log shuts down - Remove double-underscore typedefs - Various preparation for online scrubbing - Introduce new error injection configuration sysfs knobs - Refactor dq_get_next to use extent map directly - Fix problems with iterating the page cache for unwritten data - Implement SEEK_{HOLE,DATA} via iomap - Refactor XFS to use iomap SEEK_HOLE and SEEK_DATA - Don't use MAXPATHLEN to check on-disk symlink target lengths -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCgAGBQJZYDw4AAoJEPh/dxk0SrTr2IMP/3JLeygIDtKBBVRPvlCmEXQC j8w1C/ntn46zZKQ8l14fAFV4HV2d+KJWf8+yDuPuGdMXJfPeKZf95otYhnSx/9Th MvCH7Nzg63yjEGqXpBkfIVr/GT0KTx28lxiqNViChr7XiXWookgf3SSLINO+vU4J L2jgLqieJfijiHTBs4qGCQPDwSXVoSOi5XCCQWDYQrXz6DI5UEJc70U53WkH4tRu RctOgp1lralwEO0PhfomD3m/Gk94taE/4ZpX/j/5Y4tvH/yh5aY3/KTCLm6+mYT3 rgMpmg5hmm+UiCTNoTnQ5RxzGZWCfI1I9FZ3HqDsbhmFtaWh32ti0dEEDYsF8Opj ARnTty3cRx41LH9dULrVWdwW105AHgwEz8/OZlG0JOca9qzj9GKERMg/hpHINAKN TrBlkweg86LWZDy23udZJ/v35svNqSFsqL1yV8j5dXyBi+Yi2CGfU27zbBwnj4Jk 047l+OuRbBnEOUULqJTEVBY3euoclwl/yQrW2m409s7vPGkGQBLuFCsDKQdnvJ/A D7frZqH8XypwnhFOkKybUnBkn4P7vZ2sEuCIZMsrH5k/ys8XyEkaBaOurjvMBOKA vLIMD6RXDWrFbOoovfK/stEM6/UFoQkgMhBe7vB9EXk1AjM8NYyWZgp5BkHtytC7 qa6GRjtGefhc67hbwXJd =/GZI -----END PGP SIGNATURE----- Merge tag 'xfs-4.13-merge-5' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux Pull XFS updates from Darrick Wong: "Here are some changes for you for 4.13. For the most part it's fixes for bugs and deadlock problems, and preparation for online fsck in some future merge window. - Avoid quotacheck deadlocks - Fix transaction overflows when bunmapping fragmented files - Refactor directory readahead - Allow admin to configure if ASSERT is fatal - Improve transaction usage detail logging during overflows - Minor cleanups - Don't leak log items when the log shuts down - Remove double-underscore typedefs - Various preparation for online scrubbing - Introduce new error injection configuration sysfs knobs - Refactor dq_get_next to use extent map directly - Fix problems with iterating the page cache for unwritten data - Implement SEEK_{HOLE,DATA} via iomap - Refactor XFS to use iomap SEEK_HOLE and SEEK_DATA - Don't use MAXPATHLEN to check on-disk symlink target lengths" * tag 'xfs-4.13-merge-5' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (48 commits) xfs: don't crash on unexpected holes in dir/attr btrees xfs: rename MAXPATHLEN to XFS_SYMLINK_MAXLEN xfs: fix contiguous dquot chunk iteration livelock xfs: Switch to iomap for SEEK_HOLE / SEEK_DATA vfs: Add iomap_seek_hole and iomap_seek_data helpers vfs: Add page_cache_seek_hole_data helper xfs: remove a whitespace-only line from xfs_fs_get_nextdqblk xfs: rewrite xfs_dq_get_next_id using xfs_iext_lookup_extent xfs: Check for m_errortag initialization in xfs_errortag_test xfs: grab dquots without taking the ilock xfs: fix semicolon.cocci warnings xfs: Don't clear SGID when inheriting ACLs xfs: free cowblocks and retry on buffered write ENOSPC xfs: replace log_badcrc_factor knob with error injection tag xfs: convert drop_writes to use the errortag mechanism xfs: remove unneeded parameter from XFS_TEST_ERROR xfs: expose errortag knobs via sysfs xfs: make errortag a per-mountpoint structure xfs: free uncommitted transactions during log recovery xfs: don't allow bmap on rt files ...
This commit is contained in:
commit
642338ba33
124
fs/buffer.c
124
fs/buffer.c
|
@ -3501,6 +3501,130 @@ int bh_submit_read(struct buffer_head *bh)
|
|||
}
|
||||
EXPORT_SYMBOL(bh_submit_read);
|
||||
|
||||
/*
|
||||
* Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
|
||||
*
|
||||
* Returns the offset within the file on success, and -ENOENT otherwise.
|
||||
*/
|
||||
static loff_t
|
||||
page_seek_hole_data(struct page *page, loff_t lastoff, int whence)
|
||||
{
|
||||
loff_t offset = page_offset(page);
|
||||
struct buffer_head *bh, *head;
|
||||
bool seek_data = whence == SEEK_DATA;
|
||||
|
||||
if (lastoff < offset)
|
||||
lastoff = offset;
|
||||
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
offset += bh->b_size;
|
||||
if (lastoff >= offset)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Unwritten extents that have data in the page cache covering
|
||||
* them can be identified by the BH_Unwritten state flag.
|
||||
* Pages with multiple buffers might have a mix of holes, data
|
||||
* and unwritten extents - any buffer with valid data in it
|
||||
* should have BH_Uptodate flag set on it.
|
||||
*/
|
||||
|
||||
if ((buffer_unwritten(bh) || buffer_uptodate(bh)) == seek_data)
|
||||
return lastoff;
|
||||
|
||||
lastoff = offset;
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Seek for SEEK_DATA / SEEK_HOLE in the page cache.
|
||||
*
|
||||
* Within unwritten extents, the page cache determines which parts are holes
|
||||
* and which are data: unwritten and uptodate buffer heads count as data;
|
||||
* everything else counts as a hole.
|
||||
*
|
||||
* Returns the resulting offset on successs, and -ENOENT otherwise.
|
||||
*/
|
||||
loff_t
|
||||
page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
|
||||
int whence)
|
||||
{
|
||||
pgoff_t index = offset >> PAGE_SHIFT;
|
||||
pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
|
||||
loff_t lastoff = offset;
|
||||
struct pagevec pvec;
|
||||
|
||||
if (length <= 0)
|
||||
return -ENOENT;
|
||||
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
do {
|
||||
unsigned want, nr_pages, i;
|
||||
|
||||
want = min_t(unsigned, end - index, PAGEVEC_SIZE);
|
||||
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, want);
|
||||
if (nr_pages == 0)
|
||||
break;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
/*
|
||||
* At this point, the page may be truncated or
|
||||
* invalidated (changing page->mapping to NULL), or
|
||||
* even swizzled back from swapper_space to tmpfs file
|
||||
* mapping. However, page->index will not change
|
||||
* because we have a reference on the page.
|
||||
*
|
||||
* If current page offset is beyond where we've ended,
|
||||
* we've found a hole.
|
||||
*/
|
||||
if (whence == SEEK_HOLE &&
|
||||
lastoff < page_offset(page))
|
||||
goto check_range;
|
||||
|
||||
/* Searching done if the page index is out of range. */
|
||||
if (page->index >= end)
|
||||
goto not_found;
|
||||
|
||||
lock_page(page);
|
||||
if (likely(page->mapping == inode->i_mapping) &&
|
||||
page_has_buffers(page)) {
|
||||
lastoff = page_seek_hole_data(page, lastoff, whence);
|
||||
if (lastoff >= 0) {
|
||||
unlock_page(page);
|
||||
goto check_range;
|
||||
}
|
||||
}
|
||||
unlock_page(page);
|
||||
lastoff = page_offset(page) + PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Searching done if fewer pages returned than wanted. */
|
||||
if (nr_pages < want)
|
||||
break;
|
||||
|
||||
index = pvec.pages[i - 1]->index + 1;
|
||||
pagevec_release(&pvec);
|
||||
} while (index < end);
|
||||
|
||||
/* When no page at lastoff and we are not done, we found a hole. */
|
||||
if (whence != SEEK_HOLE)
|
||||
goto not_found;
|
||||
|
||||
check_range:
|
||||
if (lastoff < offset + length)
|
||||
goto out;
|
||||
not_found:
|
||||
lastoff = -ENOENT;
|
||||
out:
|
||||
pagevec_release(&pvec);
|
||||
return lastoff;
|
||||
}
|
||||
|
||||
void __init buffer_init(void)
|
||||
{
|
||||
unsigned long nrpages;
|
||||
|
|
94
fs/iomap.c
94
fs/iomap.c
|
@ -584,6 +584,100 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_fiemap);
|
||||
|
||||
static loff_t
|
||||
iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
|
||||
void *data, struct iomap *iomap)
|
||||
{
|
||||
switch (iomap->type) {
|
||||
case IOMAP_UNWRITTEN:
|
||||
offset = page_cache_seek_hole_data(inode, offset, length,
|
||||
SEEK_HOLE);
|
||||
if (offset < 0)
|
||||
return length;
|
||||
/* fall through */
|
||||
case IOMAP_HOLE:
|
||||
*(loff_t *)data = offset;
|
||||
return 0;
|
||||
default:
|
||||
return length;
|
||||
}
|
||||
}
|
||||
|
||||
loff_t
|
||||
iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
|
||||
{
|
||||
loff_t size = i_size_read(inode);
|
||||
loff_t length = size - offset;
|
||||
loff_t ret;
|
||||
|
||||
/* Nothing to be found beyond the end of the file. */
|
||||
if (offset >= size)
|
||||
return -ENXIO;
|
||||
|
||||
while (length > 0) {
|
||||
ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
|
||||
&offset, iomap_seek_hole_actor);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
break;
|
||||
|
||||
offset += ret;
|
||||
length -= ret;
|
||||
}
|
||||
|
||||
return offset;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_seek_hole);
|
||||
|
||||
static loff_t
|
||||
iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
|
||||
void *data, struct iomap *iomap)
|
||||
{
|
||||
switch (iomap->type) {
|
||||
case IOMAP_HOLE:
|
||||
return length;
|
||||
case IOMAP_UNWRITTEN:
|
||||
offset = page_cache_seek_hole_data(inode, offset, length,
|
||||
SEEK_DATA);
|
||||
if (offset < 0)
|
||||
return length;
|
||||
/*FALLTHRU*/
|
||||
default:
|
||||
*(loff_t *)data = offset;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
loff_t
|
||||
iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
|
||||
{
|
||||
loff_t size = i_size_read(inode);
|
||||
loff_t length = size - offset;
|
||||
loff_t ret;
|
||||
|
||||
/* Nothing to be found beyond the end of the file. */
|
||||
if (offset >= size)
|
||||
return -ENXIO;
|
||||
|
||||
while (length > 0) {
|
||||
ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
|
||||
&offset, iomap_seek_data_actor);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
break;
|
||||
|
||||
offset += ret;
|
||||
length -= ret;
|
||||
}
|
||||
|
||||
if (length <= 0)
|
||||
return -ENXIO;
|
||||
return offset;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_seek_data);
|
||||
|
||||
/*
|
||||
* Private flags for iomap_dio, must not overlap with the public ones in
|
||||
* iomap.h:
|
||||
|
|
|
@ -96,3 +96,16 @@ config XFS_DEBUG
|
|||
not useful unless you are debugging a particular problem.
|
||||
|
||||
Say N unless you are an XFS developer, or you play one on TV.
|
||||
|
||||
config XFS_ASSERT_FATAL
|
||||
bool "XFS fatal asserts"
|
||||
default y
|
||||
depends on XFS_FS && XFS_DEBUG
|
||||
help
|
||||
Set the default DEBUG mode ASSERT failure behavior.
|
||||
|
||||
Say Y here to cause DEBUG mode ASSERT failures to result in fatal
|
||||
errors that BUG() the kernel by default. If you say N, ASSERT failures
|
||||
result in warnings.
|
||||
|
||||
This behavior can be modified at runtime via sysfs.
|
||||
|
|
|
@ -111,8 +111,7 @@ xfs_ag_resv_critical(
|
|||
|
||||
/* Critically low if less than 10% or max btree height remains. */
|
||||
return XFS_TEST_ERROR(avail < orig / 10 || avail < XFS_BTREE_MAXLEVELS,
|
||||
pag->pag_mount, XFS_ERRTAG_AG_RESV_CRITICAL,
|
||||
XFS_RANDOM_AG_RESV_CRITICAL);
|
||||
pag->pag_mount, XFS_ERRTAG_AG_RESV_CRITICAL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -606,7 +606,7 @@ const struct xfs_buf_ops xfs_agfl_buf_ops = {
|
|||
/*
|
||||
* Read in the allocation group free block array.
|
||||
*/
|
||||
STATIC int /* error */
|
||||
int /* error */
|
||||
xfs_alloc_read_agfl(
|
||||
xfs_mount_t *mp, /* mount point structure */
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
|
@ -2454,8 +2454,7 @@ xfs_agf_read_verify(
|
|||
!xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
|
||||
xfs_buf_ioerror(bp, -EFSBADCRC);
|
||||
else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp,
|
||||
XFS_ERRTAG_ALLOC_READ_AGF,
|
||||
XFS_RANDOM_ALLOC_READ_AGF))
|
||||
XFS_ERRTAG_ALLOC_READ_AGF))
|
||||
xfs_buf_ioerror(bp, -EFSCORRUPTED);
|
||||
|
||||
if (bp->b_error)
|
||||
|
@ -2842,8 +2841,7 @@ xfs_free_extent(
|
|||
ASSERT(type != XFS_AG_RESV_AGFL);
|
||||
|
||||
if (XFS_TEST_ERROR(false, mp,
|
||||
XFS_ERRTAG_FREE_EXTENT,
|
||||
XFS_RANDOM_FREE_EXTENT))
|
||||
XFS_ERRTAG_FREE_EXTENT))
|
||||
return -EIO;
|
||||
|
||||
error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
|
||||
|
|
|
@ -213,6 +213,8 @@ xfs_alloc_get_rec(
|
|||
|
||||
int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
|
||||
int xfs_alloc_read_agfl(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_agnumber_t agno, struct xfs_buf **bpp);
|
||||
int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, int flags);
|
||||
int xfs_free_extent_fix_freelist(struct xfs_trans *tp, xfs_agnumber_t agno,
|
||||
struct xfs_buf **agbp);
|
||||
|
|
|
@ -253,7 +253,7 @@ xfs_allocbt_init_ptr_from_cur(
|
|||
ptr->s = agf->agf_roots[cur->bc_btnum];
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_bnobt_key_diff(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key)
|
||||
|
@ -261,42 +261,42 @@ xfs_bnobt_key_diff(
|
|||
xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
|
||||
xfs_alloc_key_t *kp = &key->alloc;
|
||||
|
||||
return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
|
||||
return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_cntbt_key_diff(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key)
|
||||
{
|
||||
xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
|
||||
xfs_alloc_key_t *kp = &key->alloc;
|
||||
__int64_t diff;
|
||||
int64_t diff;
|
||||
|
||||
diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
|
||||
diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
|
||||
if (diff)
|
||||
return diff;
|
||||
|
||||
return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
|
||||
return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_bnobt_diff_two_keys(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *k1,
|
||||
union xfs_btree_key *k2)
|
||||
{
|
||||
return (__int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
|
||||
return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
|
||||
be32_to_cpu(k2->alloc.ar_startblock);
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_cntbt_diff_two_keys(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *k1,
|
||||
union xfs_btree_key *k2)
|
||||
{
|
||||
__int64_t diff;
|
||||
int64_t diff;
|
||||
|
||||
diff = be32_to_cpu(k1->alloc.ar_blockcount) -
|
||||
be32_to_cpu(k2->alloc.ar_blockcount);
|
||||
|
@ -395,7 +395,6 @@ const struct xfs_buf_ops xfs_allocbt_buf_ops = {
|
|||
};
|
||||
|
||||
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
STATIC int
|
||||
xfs_bnobt_keys_inorder(
|
||||
struct xfs_btree_cur *cur,
|
||||
|
@ -442,7 +441,6 @@ xfs_cntbt_recs_inorder(
|
|||
be32_to_cpu(r1->alloc.ar_startblock) <
|
||||
be32_to_cpu(r2->alloc.ar_startblock));
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
||||
static const struct xfs_btree_ops xfs_bnobt_ops = {
|
||||
.rec_len = sizeof(xfs_alloc_rec_t),
|
||||
|
@ -462,10 +460,8 @@ static const struct xfs_btree_ops xfs_bnobt_ops = {
|
|||
.key_diff = xfs_bnobt_key_diff,
|
||||
.buf_ops = &xfs_allocbt_buf_ops,
|
||||
.diff_two_keys = xfs_bnobt_diff_two_keys,
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
.keys_inorder = xfs_bnobt_keys_inorder,
|
||||
.recs_inorder = xfs_bnobt_recs_inorder,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct xfs_btree_ops xfs_cntbt_ops = {
|
||||
|
@ -486,10 +482,8 @@ static const struct xfs_btree_ops xfs_cntbt_ops = {
|
|||
.key_diff = xfs_cntbt_key_diff,
|
||||
.buf_ops = &xfs_allocbt_buf_ops,
|
||||
.diff_two_keys = xfs_cntbt_diff_two_keys,
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
.keys_inorder = xfs_cntbt_keys_inorder,
|
||||
.recs_inorder = xfs_cntbt_recs_inorder,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -114,6 +114,23 @@ xfs_inode_hasattr(
|
|||
* Overall external interface routines.
|
||||
*========================================================================*/
|
||||
|
||||
/* Retrieve an extended attribute and its value. Must have iolock. */
|
||||
int
|
||||
xfs_attr_get_ilocked(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_da_args *args)
|
||||
{
|
||||
if (!xfs_inode_hasattr(ip))
|
||||
return -ENOATTR;
|
||||
else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
|
||||
return xfs_attr_shortform_getvalue(args);
|
||||
else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK))
|
||||
return xfs_attr_leaf_get(args);
|
||||
else
|
||||
return xfs_attr_node_get(args);
|
||||
}
|
||||
|
||||
/* Retrieve an extended attribute by name, and its value. */
|
||||
int
|
||||
xfs_attr_get(
|
||||
struct xfs_inode *ip,
|
||||
|
@ -141,14 +158,7 @@ xfs_attr_get(
|
|||
args.op_flags = XFS_DA_OP_OKNOENT;
|
||||
|
||||
lock_mode = xfs_ilock_attr_map_shared(ip);
|
||||
if (!xfs_inode_hasattr(ip))
|
||||
error = -ENOATTR;
|
||||
else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
|
||||
error = xfs_attr_shortform_getvalue(&args);
|
||||
else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK))
|
||||
error = xfs_attr_leaf_get(&args);
|
||||
else
|
||||
error = xfs_attr_node_get(&args);
|
||||
error = xfs_attr_get_ilocked(ip, &args);
|
||||
xfs_iunlock(ip, lock_mode);
|
||||
|
||||
*valuelenp = args.valuelen;
|
||||
|
|
|
@ -351,7 +351,7 @@ xfs_attr3_leaf_read(
|
|||
|
||||
err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
|
||||
XFS_ATTR_FORK, &xfs_attr3_leaf_buf_ops);
|
||||
if (!err && tp)
|
||||
if (!err && tp && *bpp)
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -253,7 +253,7 @@ xfs_attr_rmtval_copyout(
|
|||
xfs_ino_t ino,
|
||||
int *offset,
|
||||
int *valuelen,
|
||||
__uint8_t **dst)
|
||||
uint8_t **dst)
|
||||
{
|
||||
char *src = bp->b_addr;
|
||||
xfs_daddr_t bno = bp->b_bn;
|
||||
|
@ -301,7 +301,7 @@ xfs_attr_rmtval_copyin(
|
|||
xfs_ino_t ino,
|
||||
int *offset,
|
||||
int *valuelen,
|
||||
__uint8_t **src)
|
||||
uint8_t **src)
|
||||
{
|
||||
char *dst = bp->b_addr;
|
||||
xfs_daddr_t bno = bp->b_bn;
|
||||
|
@ -355,7 +355,7 @@ xfs_attr_rmtval_get(
|
|||
struct xfs_mount *mp = args->dp->i_mount;
|
||||
struct xfs_buf *bp;
|
||||
xfs_dablk_t lblkno = args->rmtblkno;
|
||||
__uint8_t *dst = args->value;
|
||||
uint8_t *dst = args->value;
|
||||
int valuelen;
|
||||
int nmap;
|
||||
int error;
|
||||
|
@ -386,7 +386,8 @@ xfs_attr_rmtval_get(
|
|||
(map[i].br_startblock != HOLESTARTBLOCK));
|
||||
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
|
||||
dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
|
||||
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
|
||||
error = xfs_trans_read_buf(mp, args->trans,
|
||||
mp->m_ddev_targp,
|
||||
dblkno, dblkcnt, 0, &bp,
|
||||
&xfs_attr3_rmt_buf_ops);
|
||||
if (error)
|
||||
|
@ -395,7 +396,7 @@ xfs_attr_rmtval_get(
|
|||
error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
|
||||
&offset, &valuelen,
|
||||
&dst);
|
||||
xfs_buf_relse(bp);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -421,7 +422,7 @@ xfs_attr_rmtval_set(
|
|||
struct xfs_bmbt_irec map;
|
||||
xfs_dablk_t lblkno;
|
||||
xfs_fileoff_t lfileoff = 0;
|
||||
__uint8_t *src = args->value;
|
||||
uint8_t *src = args->value;
|
||||
int blkcnt;
|
||||
int valuelen;
|
||||
int nmap;
|
||||
|
|
|
@ -31,10 +31,10 @@ typedef struct xfs_attr_sf_entry xfs_attr_sf_entry_t;
|
|||
* We generate this then sort it, attr_list() must return things in hash-order.
|
||||
*/
|
||||
typedef struct xfs_attr_sf_sort {
|
||||
__uint8_t entno; /* entry number in original list */
|
||||
__uint8_t namelen; /* length of name value (no null) */
|
||||
__uint8_t valuelen; /* length of value */
|
||||
__uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */
|
||||
uint8_t entno; /* entry number in original list */
|
||||
uint8_t namelen; /* length of name value (no null) */
|
||||
uint8_t valuelen; /* length of value */
|
||||
uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */
|
||||
xfs_dahash_t hash; /* this entry's hash value */
|
||||
unsigned char *name; /* name value, pointer into buffer */
|
||||
} xfs_attr_sf_sort_t;
|
||||
|
@ -42,7 +42,7 @@ typedef struct xfs_attr_sf_sort {
|
|||
#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) /* space name/value uses */ \
|
||||
(((int)sizeof(xfs_attr_sf_entry_t)-1 + (nlen)+(vlen)))
|
||||
#define XFS_ATTR_SF_ENTSIZE_MAX /* max space for name&value */ \
|
||||
((1 << (NBBY*(int)sizeof(__uint8_t))) - 1)
|
||||
((1 << (NBBY*(int)sizeof(uint8_t))) - 1)
|
||||
#define XFS_ATTR_SF_ENTSIZE(sfep) /* space an entry uses */ \
|
||||
((int)sizeof(xfs_attr_sf_entry_t)-1 + (sfep)->namelen+(sfep)->valuelen)
|
||||
#define XFS_ATTR_SF_NEXTENTRY(sfep) /* next entry in struct */ \
|
||||
|
|
|
@ -25,47 +25,47 @@
|
|||
/*
|
||||
* masks with n high/low bits set, 64-bit values
|
||||
*/
|
||||
static inline __uint64_t xfs_mask64hi(int n)
|
||||
static inline uint64_t xfs_mask64hi(int n)
|
||||
{
|
||||
return (__uint64_t)-1 << (64 - (n));
|
||||
return (uint64_t)-1 << (64 - (n));
|
||||
}
|
||||
static inline __uint32_t xfs_mask32lo(int n)
|
||||
static inline uint32_t xfs_mask32lo(int n)
|
||||
{
|
||||
return ((__uint32_t)1 << (n)) - 1;
|
||||
return ((uint32_t)1 << (n)) - 1;
|
||||
}
|
||||
static inline __uint64_t xfs_mask64lo(int n)
|
||||
static inline uint64_t xfs_mask64lo(int n)
|
||||
{
|
||||
return ((__uint64_t)1 << (n)) - 1;
|
||||
return ((uint64_t)1 << (n)) - 1;
|
||||
}
|
||||
|
||||
/* Get high bit set out of 32-bit argument, -1 if none set */
|
||||
static inline int xfs_highbit32(__uint32_t v)
|
||||
static inline int xfs_highbit32(uint32_t v)
|
||||
{
|
||||
return fls(v) - 1;
|
||||
}
|
||||
|
||||
/* Get high bit set out of 64-bit argument, -1 if none set */
|
||||
static inline int xfs_highbit64(__uint64_t v)
|
||||
static inline int xfs_highbit64(uint64_t v)
|
||||
{
|
||||
return fls64(v) - 1;
|
||||
}
|
||||
|
||||
/* Get low bit set out of 32-bit argument, -1 if none set */
|
||||
static inline int xfs_lowbit32(__uint32_t v)
|
||||
static inline int xfs_lowbit32(uint32_t v)
|
||||
{
|
||||
return ffs(v) - 1;
|
||||
}
|
||||
|
||||
/* Get low bit set out of 64-bit argument, -1 if none set */
|
||||
static inline int xfs_lowbit64(__uint64_t v)
|
||||
static inline int xfs_lowbit64(uint64_t v)
|
||||
{
|
||||
__uint32_t w = (__uint32_t)v;
|
||||
uint32_t w = (uint32_t)v;
|
||||
int n = 0;
|
||||
|
||||
if (w) { /* lower bits */
|
||||
n = ffs(w);
|
||||
} else { /* upper bits */
|
||||
w = (__uint32_t)(v >> 32);
|
||||
w = (uint32_t)(v >> 32);
|
||||
if (w) {
|
||||
n = ffs(w);
|
||||
if (n)
|
||||
|
|
|
@ -3992,7 +3992,7 @@ xfs_bmapi_read(
|
|||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
|
||||
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT))) {
|
||||
XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
@ -4473,7 +4473,7 @@ xfs_bmapi_write(
|
|||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
|
||||
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT))) {
|
||||
XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
@ -4694,7 +4694,7 @@ xfs_bmapi_remap(
|
|||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
|
||||
XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT))) {
|
||||
XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
@ -5434,6 +5434,7 @@ __xfs_bunmapi(
|
|||
int whichfork; /* data or attribute fork */
|
||||
xfs_fsblock_t sum;
|
||||
xfs_filblks_t len = *rlen; /* length to unmap in file */
|
||||
xfs_fileoff_t max_len;
|
||||
|
||||
trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
|
||||
|
||||
|
@ -5455,6 +5456,16 @@ __xfs_bunmapi(
|
|||
ASSERT(len > 0);
|
||||
ASSERT(nexts >= 0);
|
||||
|
||||
/*
|
||||
* Guesstimate how many blocks we can unmap without running the risk of
|
||||
* blowing out the transaction with a mix of EFIs and reflink
|
||||
* adjustments.
|
||||
*/
|
||||
if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
|
||||
max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
|
||||
else
|
||||
max_len = len;
|
||||
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS) &&
|
||||
(error = xfs_iread_extents(tp, ip, whichfork)))
|
||||
return error;
|
||||
|
@ -5499,7 +5510,7 @@ __xfs_bunmapi(
|
|||
|
||||
extno = 0;
|
||||
while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
|
||||
(nexts == 0 || extno < nexts)) {
|
||||
(nexts == 0 || extno < nexts) && max_len > 0) {
|
||||
/*
|
||||
* Is the found extent after a hole in which bno lives?
|
||||
* Just back up to the previous extent, if so.
|
||||
|
@ -5531,6 +5542,15 @@ __xfs_bunmapi(
|
|||
}
|
||||
if (del.br_startoff + del.br_blockcount > bno + 1)
|
||||
del.br_blockcount = bno + 1 - del.br_startoff;
|
||||
|
||||
/* How much can we safely unmap? */
|
||||
if (max_len < del.br_blockcount) {
|
||||
del.br_startoff += del.br_blockcount - max_len;
|
||||
if (!wasdel)
|
||||
del.br_startblock += del.br_blockcount - max_len;
|
||||
del.br_blockcount = max_len;
|
||||
}
|
||||
|
||||
sum = del.br_startblock + del.br_blockcount;
|
||||
if (isrt &&
|
||||
(mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
|
||||
|
@ -5707,6 +5727,7 @@ __xfs_bunmapi(
|
|||
if (!isrt && wasdel)
|
||||
xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
|
||||
|
||||
max_len -= del.br_blockcount;
|
||||
bno = del.br_startoff - 1;
|
||||
nodelete:
|
||||
/*
|
||||
|
@ -6077,7 +6098,7 @@ xfs_bmap_shift_extents(
|
|||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
|
||||
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT))) {
|
||||
XFS_ERROR_REPORT("xfs_bmap_shift_extents",
|
||||
XFS_ERRLEVEL_LOW, mp);
|
||||
return -EFSCORRUPTED;
|
||||
|
@ -6229,7 +6250,7 @@ xfs_bmap_split_extent_at(
|
|||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
|
||||
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT))) {
|
||||
XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
|
||||
XFS_ERRLEVEL_LOW, mp);
|
||||
return -EFSCORRUPTED;
|
||||
|
@ -6472,33 +6493,33 @@ xfs_bmap_finish_one(
|
|||
int whichfork,
|
||||
xfs_fileoff_t startoff,
|
||||
xfs_fsblock_t startblock,
|
||||
xfs_filblks_t blockcount,
|
||||
xfs_filblks_t *blockcount,
|
||||
xfs_exntst_t state)
|
||||
{
|
||||
int error = 0, done;
|
||||
xfs_fsblock_t firstfsb;
|
||||
int error = 0;
|
||||
|
||||
trace_xfs_bmap_deferred(tp->t_mountp,
|
||||
XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
|
||||
XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
|
||||
ip->i_ino, whichfork, startoff, blockcount, state);
|
||||
ip->i_ino, whichfork, startoff, *blockcount, state);
|
||||
|
||||
if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
if (XFS_TEST_ERROR(false, tp->t_mountp,
|
||||
XFS_ERRTAG_BMAP_FINISH_ONE,
|
||||
XFS_RANDOM_BMAP_FINISH_ONE))
|
||||
XFS_ERRTAG_BMAP_FINISH_ONE))
|
||||
return -EIO;
|
||||
|
||||
switch (type) {
|
||||
case XFS_BMAP_MAP:
|
||||
error = xfs_bmapi_remap(tp, ip, startoff, blockcount,
|
||||
error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
|
||||
startblock, dfops);
|
||||
*blockcount = 0;
|
||||
break;
|
||||
case XFS_BMAP_UNMAP:
|
||||
error = xfs_bunmapi(tp, ip, startoff, blockcount,
|
||||
XFS_BMAPI_REMAP, 1, &startblock, dfops, &done);
|
||||
ASSERT(done);
|
||||
error = __xfs_bunmapi(tp, ip, startoff, blockcount,
|
||||
XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
|
|
|
@ -271,7 +271,7 @@ struct xfs_bmap_intent {
|
|||
int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_defer_ops *dfops,
|
||||
struct xfs_inode *ip, enum xfs_bmap_intent_type type,
|
||||
int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
|
||||
xfs_filblks_t blockcount, xfs_exntst_t state);
|
||||
xfs_filblks_t *blockcount, xfs_exntst_t state);
|
||||
int xfs_bmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
|
||||
struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
|
||||
int xfs_bmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
|
||||
|
|
|
@ -94,8 +94,8 @@ xfs_bmdr_to_bmbt(
|
|||
*/
|
||||
STATIC void
|
||||
__xfs_bmbt_get_all(
|
||||
__uint64_t l0,
|
||||
__uint64_t l1,
|
||||
uint64_t l0,
|
||||
uint64_t l1,
|
||||
xfs_bmbt_irec_t *s)
|
||||
{
|
||||
int ext_flag;
|
||||
|
@ -572,6 +572,16 @@ xfs_bmbt_init_key_from_rec(
|
|||
cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_bmbt_init_high_key_from_rec(
|
||||
union xfs_btree_key *key,
|
||||
union xfs_btree_rec *rec)
|
||||
{
|
||||
key->bmbt.br_startoff = cpu_to_be64(
|
||||
xfs_bmbt_disk_get_startoff(&rec->bmbt) +
|
||||
xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_bmbt_init_rec_from_cur(
|
||||
struct xfs_btree_cur *cur,
|
||||
|
@ -588,15 +598,25 @@ xfs_bmbt_init_ptr_from_cur(
|
|||
ptr->l = 0;
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_bmbt_key_diff(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key)
|
||||
{
|
||||
return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
|
||||
return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
|
||||
cur->bc_rec.b.br_startoff;
|
||||
}
|
||||
|
||||
STATIC int64_t
|
||||
xfs_bmbt_diff_two_keys(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *k1,
|
||||
union xfs_btree_key *k2)
|
||||
{
|
||||
return (int64_t)be64_to_cpu(k1->bmbt.br_startoff) -
|
||||
be64_to_cpu(k2->bmbt.br_startoff);
|
||||
}
|
||||
|
||||
static bool
|
||||
xfs_bmbt_verify(
|
||||
struct xfs_buf *bp)
|
||||
|
@ -687,7 +707,6 @@ const struct xfs_buf_ops xfs_bmbt_buf_ops = {
|
|||
};
|
||||
|
||||
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
STATIC int
|
||||
xfs_bmbt_keys_inorder(
|
||||
struct xfs_btree_cur *cur,
|
||||
|
@ -708,7 +727,6 @@ xfs_bmbt_recs_inorder(
|
|||
xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
|
||||
xfs_bmbt_disk_get_startoff(&r2->bmbt);
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
||||
static const struct xfs_btree_ops xfs_bmbt_ops = {
|
||||
.rec_len = sizeof(xfs_bmbt_rec_t),
|
||||
|
@ -722,14 +740,14 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
|
|||
.get_minrecs = xfs_bmbt_get_minrecs,
|
||||
.get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
|
||||
.init_key_from_rec = xfs_bmbt_init_key_from_rec,
|
||||
.init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
|
||||
.init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
|
||||
.init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
|
||||
.key_diff = xfs_bmbt_key_diff,
|
||||
.diff_two_keys = xfs_bmbt_diff_two_keys,
|
||||
.buf_ops = &xfs_bmbt_buf_ops,
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
.keys_inorder = xfs_bmbt_keys_inorder,
|
||||
.recs_inorder = xfs_bmbt_recs_inorder,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -43,7 +43,7 @@ kmem_zone_t *xfs_btree_cur_zone;
|
|||
/*
|
||||
* Btree magic numbers.
|
||||
*/
|
||||
static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
|
||||
static const uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
|
||||
{ XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
|
||||
XFS_FIBT_MAGIC, 0 },
|
||||
{ XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
|
||||
|
@ -51,12 +51,12 @@ static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
|
|||
XFS_REFC_CRC_MAGIC }
|
||||
};
|
||||
|
||||
__uint32_t
|
||||
uint32_t
|
||||
xfs_btree_magic(
|
||||
int crc,
|
||||
xfs_btnum_t btnum)
|
||||
{
|
||||
__uint32_t magic = xfs_magics[crc][btnum];
|
||||
uint32_t magic = xfs_magics[crc][btnum];
|
||||
|
||||
/* Ensure we asked for crc for crc-only magics. */
|
||||
ASSERT(magic != 0);
|
||||
|
@ -101,8 +101,7 @@ xfs_btree_check_lblock(
|
|||
be64_to_cpu(block->bb_u.l.bb_rightsib)));
|
||||
|
||||
if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
|
||||
XFS_ERRTAG_BTREE_CHECK_LBLOCK,
|
||||
XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
|
||||
XFS_ERRTAG_BTREE_CHECK_LBLOCK))) {
|
||||
if (bp)
|
||||
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
||||
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
|
||||
|
@ -153,8 +152,7 @@ xfs_btree_check_sblock(
|
|||
block->bb_u.s.bb_rightsib;
|
||||
|
||||
if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp,
|
||||
XFS_ERRTAG_BTREE_CHECK_SBLOCK,
|
||||
XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
|
||||
XFS_ERRTAG_BTREE_CHECK_SBLOCK))) {
|
||||
if (bp)
|
||||
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
||||
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
|
||||
|
@ -568,7 +566,7 @@ xfs_btree_ptr_offset(
|
|||
/*
|
||||
* Return a pointer to the n-th record in the btree block.
|
||||
*/
|
||||
STATIC union xfs_btree_rec *
|
||||
union xfs_btree_rec *
|
||||
xfs_btree_rec_addr(
|
||||
struct xfs_btree_cur *cur,
|
||||
int n,
|
||||
|
@ -581,7 +579,7 @@ xfs_btree_rec_addr(
|
|||
/*
|
||||
* Return a pointer to the n-th key in the btree block.
|
||||
*/
|
||||
STATIC union xfs_btree_key *
|
||||
union xfs_btree_key *
|
||||
xfs_btree_key_addr(
|
||||
struct xfs_btree_cur *cur,
|
||||
int n,
|
||||
|
@ -594,7 +592,7 @@ xfs_btree_key_addr(
|
|||
/*
|
||||
* Return a pointer to the n-th high key in the btree block.
|
||||
*/
|
||||
STATIC union xfs_btree_key *
|
||||
union xfs_btree_key *
|
||||
xfs_btree_high_key_addr(
|
||||
struct xfs_btree_cur *cur,
|
||||
int n,
|
||||
|
@ -607,7 +605,7 @@ xfs_btree_high_key_addr(
|
|||
/*
|
||||
* Return a pointer to the n-th block pointer in the btree block.
|
||||
*/
|
||||
STATIC union xfs_btree_ptr *
|
||||
union xfs_btree_ptr *
|
||||
xfs_btree_ptr_addr(
|
||||
struct xfs_btree_cur *cur,
|
||||
int n,
|
||||
|
@ -641,7 +639,7 @@ xfs_btree_get_iroot(
|
|||
* Retrieve the block pointer from the cursor at the given level.
|
||||
* This may be an inode btree root or from a buffer.
|
||||
*/
|
||||
STATIC struct xfs_btree_block * /* generic btree block pointer */
|
||||
struct xfs_btree_block * /* generic btree block pointer */
|
||||
xfs_btree_get_block(
|
||||
struct xfs_btree_cur *cur, /* btree cursor */
|
||||
int level, /* level in btree */
|
||||
|
@ -778,14 +776,14 @@ xfs_btree_lastrec(
|
|||
*/
|
||||
void
|
||||
xfs_btree_offsets(
|
||||
__int64_t fields, /* bitmask of fields */
|
||||
int64_t fields, /* bitmask of fields */
|
||||
const short *offsets, /* table of field offsets */
|
||||
int nbits, /* number of bits to inspect */
|
||||
int *first, /* output: first byte offset */
|
||||
int *last) /* output: last byte offset */
|
||||
{
|
||||
int i; /* current bit number */
|
||||
__int64_t imask; /* mask for current bit number */
|
||||
int64_t imask; /* mask for current bit number */
|
||||
|
||||
ASSERT(fields != 0);
|
||||
/*
|
||||
|
@ -1756,7 +1754,7 @@ xfs_btree_decrement(
|
|||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
int
|
||||
xfs_btree_lookup_get_block(
|
||||
struct xfs_btree_cur *cur, /* btree cursor */
|
||||
int level, /* level in the btree */
|
||||
|
@ -1846,7 +1844,7 @@ xfs_btree_lookup(
|
|||
int *stat) /* success/failure */
|
||||
{
|
||||
struct xfs_btree_block *block; /* current btree block */
|
||||
__int64_t diff; /* difference for the current key */
|
||||
int64_t diff; /* difference for the current key */
|
||||
int error; /* error return value */
|
||||
int keyno; /* current key number */
|
||||
int level; /* level in the btree */
|
||||
|
@ -4435,7 +4433,7 @@ xfs_btree_visit_blocks(
|
|||
* recovery completion writes the changes to disk.
|
||||
*/
|
||||
struct xfs_btree_block_change_owner_info {
|
||||
__uint64_t new_owner;
|
||||
uint64_t new_owner;
|
||||
struct list_head *buffer_list;
|
||||
};
|
||||
|
||||
|
@ -4481,7 +4479,7 @@ xfs_btree_block_change_owner(
|
|||
int
|
||||
xfs_btree_change_owner(
|
||||
struct xfs_btree_cur *cur,
|
||||
__uint64_t new_owner,
|
||||
uint64_t new_owner,
|
||||
struct list_head *buffer_list)
|
||||
{
|
||||
struct xfs_btree_block_change_owner_info bbcoi;
|
||||
|
@ -4585,7 +4583,7 @@ xfs_btree_simple_query_range(
|
|||
{
|
||||
union xfs_btree_rec *recp;
|
||||
union xfs_btree_key rec_key;
|
||||
__int64_t diff;
|
||||
int64_t diff;
|
||||
int stat;
|
||||
bool firstrec = true;
|
||||
int error;
|
||||
|
@ -4682,8 +4680,8 @@ xfs_btree_overlapped_query_range(
|
|||
union xfs_btree_key *hkp;
|
||||
union xfs_btree_rec *recp;
|
||||
struct xfs_btree_block *block;
|
||||
__int64_t ldiff;
|
||||
__int64_t hdiff;
|
||||
int64_t ldiff;
|
||||
int64_t hdiff;
|
||||
int level;
|
||||
struct xfs_buf *bp;
|
||||
int i;
|
||||
|
@ -4849,12 +4847,14 @@ xfs_btree_query_all(
|
|||
xfs_btree_query_range_fn fn,
|
||||
void *priv)
|
||||
{
|
||||
union xfs_btree_irec low_rec;
|
||||
union xfs_btree_irec high_rec;
|
||||
union xfs_btree_key low_key;
|
||||
union xfs_btree_key high_key;
|
||||
|
||||
memset(&low_rec, 0, sizeof(low_rec));
|
||||
memset(&high_rec, 0xFF, sizeof(high_rec));
|
||||
return xfs_btree_query_range(cur, &low_rec, &high_rec, fn, priv);
|
||||
memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
|
||||
memset(&low_key, 0, sizeof(low_key));
|
||||
memset(&high_key, 0xFF, sizeof(high_key));
|
||||
|
||||
return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -76,7 +76,7 @@ union xfs_btree_rec {
|
|||
#define XFS_BTNUM_RMAP ((xfs_btnum_t)XFS_BTNUM_RMAPi)
|
||||
#define XFS_BTNUM_REFC ((xfs_btnum_t)XFS_BTNUM_REFCi)
|
||||
|
||||
__uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
|
||||
uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
|
||||
|
||||
/*
|
||||
* For logging record fields.
|
||||
|
@ -150,20 +150,19 @@ struct xfs_btree_ops {
|
|||
union xfs_btree_rec *rec);
|
||||
|
||||
/* difference between key value and cursor value */
|
||||
__int64_t (*key_diff)(struct xfs_btree_cur *cur,
|
||||
int64_t (*key_diff)(struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key);
|
||||
|
||||
/*
|
||||
* Difference between key2 and key1 -- positive if key1 > key2,
|
||||
* negative if key1 < key2, and zero if equal.
|
||||
*/
|
||||
__int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
|
||||
int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key1,
|
||||
union xfs_btree_key *key2);
|
||||
|
||||
const struct xfs_buf_ops *buf_ops;
|
||||
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
/* check that k1 is lower than k2 */
|
||||
int (*keys_inorder)(struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *k1,
|
||||
|
@ -173,7 +172,6 @@ struct xfs_btree_ops {
|
|||
int (*recs_inorder)(struct xfs_btree_cur *cur,
|
||||
union xfs_btree_rec *r1,
|
||||
union xfs_btree_rec *r2);
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -213,11 +211,11 @@ typedef struct xfs_btree_cur
|
|||
union xfs_btree_irec bc_rec; /* current insert/search record value */
|
||||
struct xfs_buf *bc_bufs[XFS_BTREE_MAXLEVELS]; /* buf ptr per level */
|
||||
int bc_ptrs[XFS_BTREE_MAXLEVELS]; /* key/record # */
|
||||
__uint8_t bc_ra[XFS_BTREE_MAXLEVELS]; /* readahead bits */
|
||||
uint8_t bc_ra[XFS_BTREE_MAXLEVELS]; /* readahead bits */
|
||||
#define XFS_BTCUR_LEFTRA 1 /* left sibling has been read-ahead */
|
||||
#define XFS_BTCUR_RIGHTRA 2 /* right sibling has been read-ahead */
|
||||
__uint8_t bc_nlevels; /* number of levels in the tree */
|
||||
__uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */
|
||||
uint8_t bc_nlevels; /* number of levels in the tree */
|
||||
uint8_t bc_blocklog; /* log2(blocksize) of btree blocks */
|
||||
xfs_btnum_t bc_btnum; /* identifies which btree type */
|
||||
int bc_statoff; /* offset of btre stats array */
|
||||
union {
|
||||
|
@ -330,7 +328,7 @@ xfs_btree_islastblock(
|
|||
*/
|
||||
void
|
||||
xfs_btree_offsets(
|
||||
__int64_t fields, /* bitmask of fields */
|
||||
int64_t fields, /* bitmask of fields */
|
||||
const short *offsets,/* table of field offsets */
|
||||
int nbits, /* number of bits to inspect */
|
||||
int *first, /* output: first byte offset */
|
||||
|
@ -408,7 +406,7 @@ int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *);
|
|||
int xfs_btree_insert(struct xfs_btree_cur *, int *);
|
||||
int xfs_btree_delete(struct xfs_btree_cur *, int *);
|
||||
int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *);
|
||||
int xfs_btree_change_owner(struct xfs_btree_cur *cur, __uint64_t new_owner,
|
||||
int xfs_btree_change_owner(struct xfs_btree_cur *cur, uint64_t new_owner,
|
||||
struct list_head *buffer_list);
|
||||
|
||||
/*
|
||||
|
@ -434,7 +432,7 @@ static inline int xfs_btree_get_numrecs(struct xfs_btree_block *block)
|
|||
}
|
||||
|
||||
static inline void xfs_btree_set_numrecs(struct xfs_btree_block *block,
|
||||
__uint16_t numrecs)
|
||||
uint16_t numrecs)
|
||||
{
|
||||
block->bb_numrecs = cpu_to_be16(numrecs);
|
||||
}
|
||||
|
@ -506,4 +504,17 @@ int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
|
|||
|
||||
int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_extlen_t *blocks);
|
||||
|
||||
union xfs_btree_rec *xfs_btree_rec_addr(struct xfs_btree_cur *cur, int n,
|
||||
struct xfs_btree_block *block);
|
||||
union xfs_btree_key *xfs_btree_key_addr(struct xfs_btree_cur *cur, int n,
|
||||
struct xfs_btree_block *block);
|
||||
union xfs_btree_key *xfs_btree_high_key_addr(struct xfs_btree_cur *cur, int n,
|
||||
struct xfs_btree_block *block);
|
||||
union xfs_btree_ptr *xfs_btree_ptr_addr(struct xfs_btree_cur *cur, int n,
|
||||
struct xfs_btree_block *block);
|
||||
int xfs_btree_lookup_get_block(struct xfs_btree_cur *cur, int level,
|
||||
union xfs_btree_ptr *pp, struct xfs_btree_block **blkp);
|
||||
struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur,
|
||||
int level, struct xfs_buf **bpp);
|
||||
|
||||
#endif /* __XFS_BTREE_H__ */
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef _XFS_CKSUM_H
|
||||
#define _XFS_CKSUM_H 1
|
||||
|
||||
#define XFS_CRC_SEED (~(__uint32_t)0)
|
||||
#define XFS_CRC_SEED (~(uint32_t)0)
|
||||
|
||||
/*
|
||||
* Calculate the intermediate checksum for a buffer that has the CRC field
|
||||
|
@ -9,11 +9,11 @@
|
|||
* cksum_offset parameter. We do not modify the buffer during verification,
|
||||
* hence we have to split the CRC calculation across the cksum_offset.
|
||||
*/
|
||||
static inline __uint32_t
|
||||
static inline uint32_t
|
||||
xfs_start_cksum_safe(char *buffer, size_t length, unsigned long cksum_offset)
|
||||
{
|
||||
__uint32_t zero = 0;
|
||||
__uint32_t crc;
|
||||
uint32_t zero = 0;
|
||||
uint32_t crc;
|
||||
|
||||
/* Calculate CRC up to the checksum. */
|
||||
crc = crc32c(XFS_CRC_SEED, buffer, cksum_offset);
|
||||
|
@ -30,7 +30,7 @@ xfs_start_cksum_safe(char *buffer, size_t length, unsigned long cksum_offset)
|
|||
* Fast CRC method where the buffer is modified. Callers must have exclusive
|
||||
* access to the buffer while the calculation takes place.
|
||||
*/
|
||||
static inline __uint32_t
|
||||
static inline uint32_t
|
||||
xfs_start_cksum_update(char *buffer, size_t length, unsigned long cksum_offset)
|
||||
{
|
||||
/* zero the CRC field */
|
||||
|
@ -48,7 +48,7 @@ xfs_start_cksum_update(char *buffer, size_t length, unsigned long cksum_offset)
|
|||
* so that it is consistent on disk.
|
||||
*/
|
||||
static inline __le32
|
||||
xfs_end_cksum(__uint32_t crc)
|
||||
xfs_end_cksum(uint32_t crc)
|
||||
{
|
||||
return ~cpu_to_le32(crc);
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ xfs_end_cksum(__uint32_t crc)
|
|||
static inline void
|
||||
xfs_update_cksum(char *buffer, size_t length, unsigned long cksum_offset)
|
||||
{
|
||||
__uint32_t crc = xfs_start_cksum_update(buffer, length, cksum_offset);
|
||||
uint32_t crc = xfs_start_cksum_update(buffer, length, cksum_offset);
|
||||
|
||||
*(__le32 *)(buffer + cksum_offset) = xfs_end_cksum(crc);
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ xfs_update_cksum(char *buffer, size_t length, unsigned long cksum_offset)
|
|||
static inline int
|
||||
xfs_verify_cksum(char *buffer, size_t length, unsigned long cksum_offset)
|
||||
{
|
||||
__uint32_t crc = xfs_start_cksum_safe(buffer, length, cksum_offset);
|
||||
uint32_t crc = xfs_start_cksum_safe(buffer, length, cksum_offset);
|
||||
|
||||
return *(__le32 *)(buffer + cksum_offset) == xfs_end_cksum(crc);
|
||||
}
|
||||
|
|
|
@ -263,7 +263,7 @@ xfs_da3_node_read(
|
|||
|
||||
err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
|
||||
which_fork, &xfs_da3_node_buf_ops);
|
||||
if (!err && tp) {
|
||||
if (!err && tp && *bpp) {
|
||||
struct xfs_da_blkinfo *info = (*bpp)->b_addr;
|
||||
int type;
|
||||
|
||||
|
@ -1282,7 +1282,7 @@ xfs_da3_fixhashpath(
|
|||
return;
|
||||
break;
|
||||
case XFS_DIR2_LEAFN_MAGIC:
|
||||
lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
|
||||
lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
|
||||
if (count == 0)
|
||||
return;
|
||||
break;
|
||||
|
@ -1502,8 +1502,8 @@ xfs_da3_node_lookup_int(
|
|||
if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
|
||||
blk->magic == XFS_DIR3_LEAFN_MAGIC) {
|
||||
blk->magic = XFS_DIR2_LEAFN_MAGIC;
|
||||
blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
|
||||
blk->bp, NULL);
|
||||
blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
|
||||
blk->bp, NULL);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1929,8 +1929,8 @@ xfs_da3_path_shift(
|
|||
blk->magic = XFS_DIR2_LEAFN_MAGIC;
|
||||
ASSERT(level == path->active-1);
|
||||
blk->index = 0;
|
||||
blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
|
||||
blk->bp, NULL);
|
||||
blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
|
||||
blk->bp, NULL);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
|
@ -1952,7 +1952,7 @@ xfs_da3_path_shift(
|
|||
* This is implemented with some source-level loop unrolling.
|
||||
*/
|
||||
xfs_dahash_t
|
||||
xfs_da_hashname(const __uint8_t *name, int namelen)
|
||||
xfs_da_hashname(const uint8_t *name, int namelen)
|
||||
{
|
||||
xfs_dahash_t hash;
|
||||
|
||||
|
|
|
@ -60,10 +60,10 @@ enum xfs_dacmp {
|
|||
*/
|
||||
typedef struct xfs_da_args {
|
||||
struct xfs_da_geometry *geo; /* da block geometry */
|
||||
const __uint8_t *name; /* string (maybe not NULL terminated) */
|
||||
const uint8_t *name; /* string (maybe not NULL terminated) */
|
||||
int namelen; /* length of string (maybe no NULL) */
|
||||
__uint8_t filetype; /* filetype of inode for directories */
|
||||
__uint8_t *value; /* set of bytes (maybe contain NULLs) */
|
||||
uint8_t filetype; /* filetype of inode for directories */
|
||||
uint8_t *value; /* set of bytes (maybe contain NULLs) */
|
||||
int valuelen; /* length of value */
|
||||
int flags; /* argument flags (eg: ATTR_NOCREATE) */
|
||||
xfs_dahash_t hashval; /* hash value of name */
|
||||
|
@ -207,7 +207,7 @@ int xfs_da_reada_buf(struct xfs_inode *dp, xfs_dablk_t bno,
|
|||
int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
|
||||
struct xfs_buf *dead_buf);
|
||||
|
||||
uint xfs_da_hashname(const __uint8_t *name_string, int name_length);
|
||||
uint xfs_da_hashname(const uint8_t *name_string, int name_length);
|
||||
enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
|
||||
const unsigned char *name, int len);
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ xfs_dir3_sf_entsize(
|
|||
struct xfs_dir2_sf_hdr *hdr,
|
||||
int len)
|
||||
{
|
||||
return xfs_dir2_sf_entsize(hdr, len) + sizeof(__uint8_t);
|
||||
return xfs_dir2_sf_entsize(hdr, len) + sizeof(uint8_t);
|
||||
}
|
||||
|
||||
static struct xfs_dir2_sf_entry *
|
||||
|
@ -77,7 +77,7 @@ xfs_dir3_sf_nextentry(
|
|||
* not necessary. For non-filetype enable directories, the type is always
|
||||
* unknown and we never store the value.
|
||||
*/
|
||||
static __uint8_t
|
||||
static uint8_t
|
||||
xfs_dir2_sfe_get_ftype(
|
||||
struct xfs_dir2_sf_entry *sfep)
|
||||
{
|
||||
|
@ -87,16 +87,16 @@ xfs_dir2_sfe_get_ftype(
|
|||
static void
|
||||
xfs_dir2_sfe_put_ftype(
|
||||
struct xfs_dir2_sf_entry *sfep,
|
||||
__uint8_t ftype)
|
||||
uint8_t ftype)
|
||||
{
|
||||
ASSERT(ftype < XFS_DIR3_FT_MAX);
|
||||
}
|
||||
|
||||
static __uint8_t
|
||||
static uint8_t
|
||||
xfs_dir3_sfe_get_ftype(
|
||||
struct xfs_dir2_sf_entry *sfep)
|
||||
{
|
||||
__uint8_t ftype;
|
||||
uint8_t ftype;
|
||||
|
||||
ftype = sfep->name[sfep->namelen];
|
||||
if (ftype >= XFS_DIR3_FT_MAX)
|
||||
|
@ -107,7 +107,7 @@ xfs_dir3_sfe_get_ftype(
|
|||
static void
|
||||
xfs_dir3_sfe_put_ftype(
|
||||
struct xfs_dir2_sf_entry *sfep,
|
||||
__uint8_t ftype)
|
||||
uint8_t ftype)
|
||||
{
|
||||
ASSERT(ftype < XFS_DIR3_FT_MAX);
|
||||
|
||||
|
@ -124,7 +124,7 @@ xfs_dir3_sfe_put_ftype(
|
|||
static xfs_ino_t
|
||||
xfs_dir2_sf_get_ino(
|
||||
struct xfs_dir2_sf_hdr *hdr,
|
||||
__uint8_t *from)
|
||||
uint8_t *from)
|
||||
{
|
||||
if (hdr->i8count)
|
||||
return get_unaligned_be64(from) & 0x00ffffffffffffffULL;
|
||||
|
@ -135,7 +135,7 @@ xfs_dir2_sf_get_ino(
|
|||
static void
|
||||
xfs_dir2_sf_put_ino(
|
||||
struct xfs_dir2_sf_hdr *hdr,
|
||||
__uint8_t *to,
|
||||
uint8_t *to,
|
||||
xfs_ino_t ino)
|
||||
{
|
||||
ASSERT((ino & 0xff00000000000000ULL) == 0);
|
||||
|
@ -225,7 +225,7 @@ xfs_dir3_sfe_put_ino(
|
|||
|
||||
#define XFS_DIR3_DATA_ENTSIZE(n) \
|
||||
round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) + \
|
||||
sizeof(xfs_dir2_data_off_t) + sizeof(__uint8_t)), \
|
||||
sizeof(xfs_dir2_data_off_t) + sizeof(uint8_t)), \
|
||||
XFS_DIR2_DATA_ALIGN)
|
||||
|
||||
static int
|
||||
|
@ -242,7 +242,7 @@ xfs_dir3_data_entsize(
|
|||
return XFS_DIR3_DATA_ENTSIZE(n);
|
||||
}
|
||||
|
||||
static __uint8_t
|
||||
static uint8_t
|
||||
xfs_dir2_data_get_ftype(
|
||||
struct xfs_dir2_data_entry *dep)
|
||||
{
|
||||
|
@ -252,16 +252,16 @@ xfs_dir2_data_get_ftype(
|
|||
static void
|
||||
xfs_dir2_data_put_ftype(
|
||||
struct xfs_dir2_data_entry *dep,
|
||||
__uint8_t ftype)
|
||||
uint8_t ftype)
|
||||
{
|
||||
ASSERT(ftype < XFS_DIR3_FT_MAX);
|
||||
}
|
||||
|
||||
static __uint8_t
|
||||
static uint8_t
|
||||
xfs_dir3_data_get_ftype(
|
||||
struct xfs_dir2_data_entry *dep)
|
||||
{
|
||||
__uint8_t ftype = dep->name[dep->namelen];
|
||||
uint8_t ftype = dep->name[dep->namelen];
|
||||
|
||||
if (ftype >= XFS_DIR3_FT_MAX)
|
||||
return XFS_DIR3_FT_UNKNOWN;
|
||||
|
@ -271,7 +271,7 @@ xfs_dir3_data_get_ftype(
|
|||
static void
|
||||
xfs_dir3_data_put_ftype(
|
||||
struct xfs_dir2_data_entry *dep,
|
||||
__uint8_t type)
|
||||
uint8_t type)
|
||||
{
|
||||
ASSERT(type < XFS_DIR3_FT_MAX);
|
||||
ASSERT(dep->namelen != 0);
|
||||
|
|
|
@ -111,11 +111,11 @@ struct xfs_da3_intnode {
|
|||
* appropriate.
|
||||
*/
|
||||
struct xfs_da3_icnode_hdr {
|
||||
__uint32_t forw;
|
||||
__uint32_t back;
|
||||
__uint16_t magic;
|
||||
__uint16_t count;
|
||||
__uint16_t level;
|
||||
uint32_t forw;
|
||||
uint32_t back;
|
||||
uint16_t magic;
|
||||
uint16_t count;
|
||||
uint16_t level;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -187,14 +187,14 @@ struct xfs_da3_icnode_hdr {
|
|||
/*
|
||||
* Byte offset in data block and shortform entry.
|
||||
*/
|
||||
typedef __uint16_t xfs_dir2_data_off_t;
|
||||
typedef uint16_t xfs_dir2_data_off_t;
|
||||
#define NULLDATAOFF 0xffffU
|
||||
typedef uint xfs_dir2_data_aoff_t; /* argument form */
|
||||
|
||||
/*
|
||||
* Offset in data space of a data entry.
|
||||
*/
|
||||
typedef __uint32_t xfs_dir2_dataptr_t;
|
||||
typedef uint32_t xfs_dir2_dataptr_t;
|
||||
#define XFS_DIR2_MAX_DATAPTR ((xfs_dir2_dataptr_t)0xffffffff)
|
||||
#define XFS_DIR2_NULL_DATAPTR ((xfs_dir2_dataptr_t)0)
|
||||
|
||||
|
@ -206,7 +206,7 @@ typedef xfs_off_t xfs_dir2_off_t;
|
|||
/*
|
||||
* Directory block number (logical dirblk in file)
|
||||
*/
|
||||
typedef __uint32_t xfs_dir2_db_t;
|
||||
typedef uint32_t xfs_dir2_db_t;
|
||||
|
||||
#define XFS_INO32_SIZE 4
|
||||
#define XFS_INO64_SIZE 8
|
||||
|
@ -226,9 +226,9 @@ typedef __uint32_t xfs_dir2_db_t;
|
|||
* over them.
|
||||
*/
|
||||
typedef struct xfs_dir2_sf_hdr {
|
||||
__uint8_t count; /* count of entries */
|
||||
__uint8_t i8count; /* count of 8-byte inode #s */
|
||||
__uint8_t parent[8]; /* parent dir inode number */
|
||||
uint8_t count; /* count of entries */
|
||||
uint8_t i8count; /* count of 8-byte inode #s */
|
||||
uint8_t parent[8]; /* parent dir inode number */
|
||||
} __packed xfs_dir2_sf_hdr_t;
|
||||
|
||||
typedef struct xfs_dir2_sf_entry {
|
||||
|
@ -447,11 +447,11 @@ struct xfs_dir3_leaf_hdr {
|
|||
};
|
||||
|
||||
struct xfs_dir3_icleaf_hdr {
|
||||
__uint32_t forw;
|
||||
__uint32_t back;
|
||||
__uint16_t magic;
|
||||
__uint16_t count;
|
||||
__uint16_t stale;
|
||||
uint32_t forw;
|
||||
uint32_t back;
|
||||
uint16_t magic;
|
||||
uint16_t count;
|
||||
uint16_t stale;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -538,10 +538,10 @@ struct xfs_dir3_free {
|
|||
* xfs_dir3_free_hdr_from_disk/xfs_dir3_free_hdr_to_disk.
|
||||
*/
|
||||
struct xfs_dir3_icfree_hdr {
|
||||
__uint32_t magic;
|
||||
__uint32_t firstdb;
|
||||
__uint32_t nvalid;
|
||||
__uint32_t nused;
|
||||
uint32_t magic;
|
||||
uint32_t firstdb;
|
||||
uint32_t nvalid;
|
||||
uint32_t nused;
|
||||
|
||||
};
|
||||
|
||||
|
@ -632,10 +632,10 @@ typedef struct xfs_attr_shortform {
|
|||
__u8 padding;
|
||||
} hdr;
|
||||
struct xfs_attr_sf_entry {
|
||||
__uint8_t namelen; /* actual length of name (no NULL) */
|
||||
__uint8_t valuelen; /* actual length of value (no NULL) */
|
||||
__uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */
|
||||
__uint8_t nameval[1]; /* name & value bytes concatenated */
|
||||
uint8_t namelen; /* actual length of name (no NULL) */
|
||||
uint8_t valuelen; /* actual length of value (no NULL) */
|
||||
uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */
|
||||
uint8_t nameval[1]; /* name & value bytes concatenated */
|
||||
} list[1]; /* variable sized array */
|
||||
} xfs_attr_shortform_t;
|
||||
|
||||
|
@ -725,22 +725,22 @@ struct xfs_attr3_leafblock {
|
|||
* incore, neutral version of the attribute leaf header
|
||||
*/
|
||||
struct xfs_attr3_icleaf_hdr {
|
||||
__uint32_t forw;
|
||||
__uint32_t back;
|
||||
__uint16_t magic;
|
||||
__uint16_t count;
|
||||
__uint16_t usedbytes;
|
||||
uint32_t forw;
|
||||
uint32_t back;
|
||||
uint16_t magic;
|
||||
uint16_t count;
|
||||
uint16_t usedbytes;
|
||||
/*
|
||||
* firstused is 32-bit here instead of 16-bit like the on-disk variant
|
||||
* to support maximum fsb size of 64k without overflow issues throughout
|
||||
* the attr code. Instead, the overflow condition is handled on
|
||||
* conversion to/from disk.
|
||||
*/
|
||||
__uint32_t firstused;
|
||||
uint32_t firstused;
|
||||
__u8 holes;
|
||||
struct {
|
||||
__uint16_t base;
|
||||
__uint16_t size;
|
||||
uint16_t base;
|
||||
uint16_t size;
|
||||
} freemap[XFS_ATTR_LEAF_MAPSIZE];
|
||||
};
|
||||
|
||||
|
|
|
@ -218,8 +218,7 @@ xfs_dir_ino_validate(
|
|||
agblkno != 0 &&
|
||||
ioff < (1 << mp->m_sb.sb_inopblog) &&
|
||||
XFS_AGINO_TO_INO(mp, agno, agino) == ino;
|
||||
if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE,
|
||||
XFS_RANDOM_DIR_INO_VALIDATE))) {
|
||||
if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE))) {
|
||||
xfs_warn(mp, "Invalid inode number 0x%Lx",
|
||||
(unsigned long long) ino);
|
||||
XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp);
|
||||
|
|
|
@ -47,9 +47,9 @@ struct xfs_dir_ops {
|
|||
struct xfs_dir2_sf_entry *
|
||||
(*sf_nextentry)(struct xfs_dir2_sf_hdr *hdr,
|
||||
struct xfs_dir2_sf_entry *sfep);
|
||||
__uint8_t (*sf_get_ftype)(struct xfs_dir2_sf_entry *sfep);
|
||||
uint8_t (*sf_get_ftype)(struct xfs_dir2_sf_entry *sfep);
|
||||
void (*sf_put_ftype)(struct xfs_dir2_sf_entry *sfep,
|
||||
__uint8_t ftype);
|
||||
uint8_t ftype);
|
||||
xfs_ino_t (*sf_get_ino)(struct xfs_dir2_sf_hdr *hdr,
|
||||
struct xfs_dir2_sf_entry *sfep);
|
||||
void (*sf_put_ino)(struct xfs_dir2_sf_hdr *hdr,
|
||||
|
@ -60,9 +60,9 @@ struct xfs_dir_ops {
|
|||
xfs_ino_t ino);
|
||||
|
||||
int (*data_entsize)(int len);
|
||||
__uint8_t (*data_get_ftype)(struct xfs_dir2_data_entry *dep);
|
||||
uint8_t (*data_get_ftype)(struct xfs_dir2_data_entry *dep);
|
||||
void (*data_put_ftype)(struct xfs_dir2_data_entry *dep,
|
||||
__uint8_t ftype);
|
||||
uint8_t ftype);
|
||||
__be16 * (*data_entry_tag_p)(struct xfs_dir2_data_entry *dep);
|
||||
struct xfs_dir2_data_free *
|
||||
(*data_bestfree_p)(struct xfs_dir2_data_hdr *hdr);
|
||||
|
|
|
@ -139,7 +139,7 @@ xfs_dir3_block_read(
|
|||
|
||||
err = xfs_da_read_buf(tp, dp, mp->m_dir_geo->datablk, -1, bpp,
|
||||
XFS_DATA_FORK, &xfs_dir3_block_buf_ops);
|
||||
if (!err && tp)
|
||||
if (!err && tp && *bpp)
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ xfs_dir3_leaf_check_int(
|
|||
static bool
|
||||
xfs_dir3_leaf_verify(
|
||||
struct xfs_buf *bp,
|
||||
__uint16_t magic)
|
||||
uint16_t magic)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_target->bt_mount;
|
||||
struct xfs_dir2_leaf *leaf = bp->b_addr;
|
||||
|
@ -154,7 +154,7 @@ xfs_dir3_leaf_verify(
|
|||
|
||||
if (xfs_sb_version_hascrc(&mp->m_sb)) {
|
||||
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
|
||||
__uint16_t magic3;
|
||||
uint16_t magic3;
|
||||
|
||||
magic3 = (magic == XFS_DIR2_LEAF1_MAGIC) ? XFS_DIR3_LEAF1_MAGIC
|
||||
: XFS_DIR3_LEAFN_MAGIC;
|
||||
|
@ -178,7 +178,7 @@ xfs_dir3_leaf_verify(
|
|||
static void
|
||||
__read_verify(
|
||||
struct xfs_buf *bp,
|
||||
__uint16_t magic)
|
||||
uint16_t magic)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_target->bt_mount;
|
||||
|
||||
|
@ -195,7 +195,7 @@ __read_verify(
|
|||
static void
|
||||
__write_verify(
|
||||
struct xfs_buf *bp,
|
||||
__uint16_t magic)
|
||||
uint16_t magic)
|
||||
{
|
||||
struct xfs_mount *mp = bp->b_target->bt_mount;
|
||||
struct xfs_buf_log_item *bip = bp->b_fspriv;
|
||||
|
@ -256,7 +256,7 @@ const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = {
|
|||
.verify_write = xfs_dir3_leafn_write_verify,
|
||||
};
|
||||
|
||||
static int
|
||||
int
|
||||
xfs_dir3_leaf_read(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
|
@ -268,7 +268,7 @@ xfs_dir3_leaf_read(
|
|||
|
||||
err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
|
||||
XFS_DATA_FORK, &xfs_dir3_leaf1_buf_ops);
|
||||
if (!err && tp)
|
||||
if (!err && tp && *bpp)
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAF1_BUF);
|
||||
return err;
|
||||
}
|
||||
|
@ -285,7 +285,7 @@ xfs_dir3_leafn_read(
|
|||
|
||||
err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
|
||||
XFS_DATA_FORK, &xfs_dir3_leafn_buf_ops);
|
||||
if (!err && tp)
|
||||
if (!err && tp && *bpp)
|
||||
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAFN_BUF);
|
||||
return err;
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ xfs_dir3_leaf_init(
|
|||
struct xfs_trans *tp,
|
||||
struct xfs_buf *bp,
|
||||
xfs_ino_t owner,
|
||||
__uint16_t type)
|
||||
uint16_t type)
|
||||
{
|
||||
struct xfs_dir2_leaf *leaf = bp->b_addr;
|
||||
|
||||
|
@ -343,7 +343,7 @@ xfs_dir3_leaf_get_buf(
|
|||
xfs_da_args_t *args,
|
||||
xfs_dir2_db_t bno,
|
||||
struct xfs_buf **bpp,
|
||||
__uint16_t magic)
|
||||
uint16_t magic)
|
||||
{
|
||||
struct xfs_inode *dp = args->dp;
|
||||
struct xfs_trans *tp = args->trans;
|
||||
|
|
|
@ -528,7 +528,7 @@ xfs_dir2_free_hdr_check(
|
|||
* Stale entries are ok.
|
||||
*/
|
||||
xfs_dahash_t /* hash value */
|
||||
xfs_dir2_leafn_lasthash(
|
||||
xfs_dir2_leaf_lasthash(
|
||||
struct xfs_inode *dp,
|
||||
struct xfs_buf *bp, /* leaf buffer */
|
||||
int *count) /* count of entries in leaf */
|
||||
|
@ -540,7 +540,9 @@ xfs_dir2_leafn_lasthash(
|
|||
dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
|
||||
|
||||
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
|
||||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
|
||||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC ||
|
||||
leafhdr.magic == XFS_DIR2_LEAF1_MAGIC ||
|
||||
leafhdr.magic == XFS_DIR3_LEAF1_MAGIC);
|
||||
|
||||
if (count)
|
||||
*count = leafhdr.count;
|
||||
|
@ -1405,8 +1407,8 @@ xfs_dir2_leafn_split(
|
|||
/*
|
||||
* Update last hashval in each block since we added the name.
|
||||
*/
|
||||
oldblk->hashval = xfs_dir2_leafn_lasthash(dp, oldblk->bp, NULL);
|
||||
newblk->hashval = xfs_dir2_leafn_lasthash(dp, newblk->bp, NULL);
|
||||
oldblk->hashval = xfs_dir2_leaf_lasthash(dp, oldblk->bp, NULL);
|
||||
newblk->hashval = xfs_dir2_leaf_lasthash(dp, newblk->bp, NULL);
|
||||
xfs_dir3_leaf_check(dp, oldblk->bp);
|
||||
xfs_dir3_leaf_check(dp, newblk->bp);
|
||||
return error;
|
||||
|
|
|
@ -58,6 +58,8 @@ extern int xfs_dir3_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
|
|||
struct xfs_buf **bpp);
|
||||
|
||||
/* xfs_dir2_leaf.c */
|
||||
extern int xfs_dir3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp);
|
||||
extern int xfs_dir3_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp);
|
||||
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
|
||||
|
@ -69,7 +71,7 @@ extern void xfs_dir3_leaf_compact_x1(struct xfs_dir3_icleaf_hdr *leafhdr,
|
|||
struct xfs_dir2_leaf_entry *ents, int *indexp,
|
||||
int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
|
||||
extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno,
|
||||
struct xfs_buf **bpp, __uint16_t magic);
|
||||
struct xfs_buf **bpp, uint16_t magic);
|
||||
extern void xfs_dir3_leaf_log_ents(struct xfs_da_args *args,
|
||||
struct xfs_buf *bp, int first, int last);
|
||||
extern void xfs_dir3_leaf_log_header(struct xfs_da_args *args,
|
||||
|
@ -93,7 +95,7 @@ extern bool xfs_dir3_leaf_check_int(struct xfs_mount *mp, struct xfs_inode *dp,
|
|||
/* xfs_dir2_node.c */
|
||||
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
|
||||
struct xfs_buf *lbp);
|
||||
extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_inode *dp,
|
||||
extern xfs_dahash_t xfs_dir2_leaf_lasthash(struct xfs_inode *dp,
|
||||
struct xfs_buf *bp, int *count);
|
||||
extern int xfs_dir2_leafn_lookup_int(struct xfs_buf *bp,
|
||||
struct xfs_da_args *args, int *indexp,
|
||||
|
@ -128,7 +130,7 @@ extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
|
|||
extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
|
||||
|
||||
/* xfs_dir2_readdir.c */
|
||||
extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
|
||||
size_t bufsize);
|
||||
extern int xfs_readdir(struct xfs_trans *tp, struct xfs_inode *dp,
|
||||
struct dir_context *ctx, size_t bufsize);
|
||||
|
||||
#endif /* __XFS_DIR2_PRIV_H__ */
|
||||
|
|
|
@ -647,7 +647,7 @@ xfs_dir2_sf_verify(
|
|||
int offset;
|
||||
int size;
|
||||
int error;
|
||||
__uint8_t filetype;
|
||||
uint8_t filetype;
|
||||
|
||||
ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
|
||||
/*
|
||||
|
|
|
@ -103,8 +103,8 @@ struct xfs_ifork;
|
|||
* Must be padded to 64 bit alignment.
|
||||
*/
|
||||
typedef struct xfs_sb {
|
||||
__uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */
|
||||
__uint32_t sb_blocksize; /* logical block size, bytes */
|
||||
uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */
|
||||
uint32_t sb_blocksize; /* logical block size, bytes */
|
||||
xfs_rfsblock_t sb_dblocks; /* number of data blocks */
|
||||
xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */
|
||||
xfs_rtblock_t sb_rextents; /* number of realtime extents */
|
||||
|
@ -118,45 +118,45 @@ typedef struct xfs_sb {
|
|||
xfs_agnumber_t sb_agcount; /* number of allocation groups */
|
||||
xfs_extlen_t sb_rbmblocks; /* number of rt bitmap blocks */
|
||||
xfs_extlen_t sb_logblocks; /* number of log blocks */
|
||||
__uint16_t sb_versionnum; /* header version == XFS_SB_VERSION */
|
||||
__uint16_t sb_sectsize; /* volume sector size, bytes */
|
||||
__uint16_t sb_inodesize; /* inode size, bytes */
|
||||
__uint16_t sb_inopblock; /* inodes per block */
|
||||
uint16_t sb_versionnum; /* header version == XFS_SB_VERSION */
|
||||
uint16_t sb_sectsize; /* volume sector size, bytes */
|
||||
uint16_t sb_inodesize; /* inode size, bytes */
|
||||
uint16_t sb_inopblock; /* inodes per block */
|
||||
char sb_fname[12]; /* file system name */
|
||||
__uint8_t sb_blocklog; /* log2 of sb_blocksize */
|
||||
__uint8_t sb_sectlog; /* log2 of sb_sectsize */
|
||||
__uint8_t sb_inodelog; /* log2 of sb_inodesize */
|
||||
__uint8_t sb_inopblog; /* log2 of sb_inopblock */
|
||||
__uint8_t sb_agblklog; /* log2 of sb_agblocks (rounded up) */
|
||||
__uint8_t sb_rextslog; /* log2 of sb_rextents */
|
||||
__uint8_t sb_inprogress; /* mkfs is in progress, don't mount */
|
||||
__uint8_t sb_imax_pct; /* max % of fs for inode space */
|
||||
uint8_t sb_blocklog; /* log2 of sb_blocksize */
|
||||
uint8_t sb_sectlog; /* log2 of sb_sectsize */
|
||||
uint8_t sb_inodelog; /* log2 of sb_inodesize */
|
||||
uint8_t sb_inopblog; /* log2 of sb_inopblock */
|
||||
uint8_t sb_agblklog; /* log2 of sb_agblocks (rounded up) */
|
||||
uint8_t sb_rextslog; /* log2 of sb_rextents */
|
||||
uint8_t sb_inprogress; /* mkfs is in progress, don't mount */
|
||||
uint8_t sb_imax_pct; /* max % of fs for inode space */
|
||||
/* statistics */
|
||||
/*
|
||||
* These fields must remain contiguous. If you really
|
||||
* want to change their layout, make sure you fix the
|
||||
* code in xfs_trans_apply_sb_deltas().
|
||||
*/
|
||||
__uint64_t sb_icount; /* allocated inodes */
|
||||
__uint64_t sb_ifree; /* free inodes */
|
||||
__uint64_t sb_fdblocks; /* free data blocks */
|
||||
__uint64_t sb_frextents; /* free realtime extents */
|
||||
uint64_t sb_icount; /* allocated inodes */
|
||||
uint64_t sb_ifree; /* free inodes */
|
||||
uint64_t sb_fdblocks; /* free data blocks */
|
||||
uint64_t sb_frextents; /* free realtime extents */
|
||||
/*
|
||||
* End contiguous fields.
|
||||
*/
|
||||
xfs_ino_t sb_uquotino; /* user quota inode */
|
||||
xfs_ino_t sb_gquotino; /* group quota inode */
|
||||
__uint16_t sb_qflags; /* quota flags */
|
||||
__uint8_t sb_flags; /* misc. flags */
|
||||
__uint8_t sb_shared_vn; /* shared version number */
|
||||
uint16_t sb_qflags; /* quota flags */
|
||||
uint8_t sb_flags; /* misc. flags */
|
||||
uint8_t sb_shared_vn; /* shared version number */
|
||||
xfs_extlen_t sb_inoalignmt; /* inode chunk alignment, fsblocks */
|
||||
__uint32_t sb_unit; /* stripe or raid unit */
|
||||
__uint32_t sb_width; /* stripe or raid width */
|
||||
__uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */
|
||||
__uint8_t sb_logsectlog; /* log2 of the log sector size */
|
||||
__uint16_t sb_logsectsize; /* sector size for the log, bytes */
|
||||
__uint32_t sb_logsunit; /* stripe unit size for the log */
|
||||
__uint32_t sb_features2; /* additional feature bits */
|
||||
uint32_t sb_unit; /* stripe or raid unit */
|
||||
uint32_t sb_width; /* stripe or raid width */
|
||||
uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */
|
||||
uint8_t sb_logsectlog; /* log2 of the log sector size */
|
||||
uint16_t sb_logsectsize; /* sector size for the log, bytes */
|
||||
uint32_t sb_logsunit; /* stripe unit size for the log */
|
||||
uint32_t sb_features2; /* additional feature bits */
|
||||
|
||||
/*
|
||||
* bad features2 field as a result of failing to pad the sb structure to
|
||||
|
@ -167,17 +167,17 @@ typedef struct xfs_sb {
|
|||
* the value in sb_features2 when formatting the incore superblock to
|
||||
* the disk buffer.
|
||||
*/
|
||||
__uint32_t sb_bad_features2;
|
||||
uint32_t sb_bad_features2;
|
||||
|
||||
/* version 5 superblock fields start here */
|
||||
|
||||
/* feature masks */
|
||||
__uint32_t sb_features_compat;
|
||||
__uint32_t sb_features_ro_compat;
|
||||
__uint32_t sb_features_incompat;
|
||||
__uint32_t sb_features_log_incompat;
|
||||
uint32_t sb_features_compat;
|
||||
uint32_t sb_features_ro_compat;
|
||||
uint32_t sb_features_incompat;
|
||||
uint32_t sb_features_log_incompat;
|
||||
|
||||
__uint32_t sb_crc; /* superblock crc */
|
||||
uint32_t sb_crc; /* superblock crc */
|
||||
xfs_extlen_t sb_spino_align; /* sparse inode chunk alignment */
|
||||
|
||||
xfs_ino_t sb_pquotino; /* project quota inode */
|
||||
|
@ -449,7 +449,7 @@ static inline void xfs_sb_version_addprojid32bit(struct xfs_sb *sbp)
|
|||
static inline bool
|
||||
xfs_sb_has_compat_feature(
|
||||
struct xfs_sb *sbp,
|
||||
__uint32_t feature)
|
||||
uint32_t feature)
|
||||
{
|
||||
return (sbp->sb_features_compat & feature) != 0;
|
||||
}
|
||||
|
@ -465,7 +465,7 @@ xfs_sb_has_compat_feature(
|
|||
static inline bool
|
||||
xfs_sb_has_ro_compat_feature(
|
||||
struct xfs_sb *sbp,
|
||||
__uint32_t feature)
|
||||
uint32_t feature)
|
||||
{
|
||||
return (sbp->sb_features_ro_compat & feature) != 0;
|
||||
}
|
||||
|
@ -482,7 +482,7 @@ xfs_sb_has_ro_compat_feature(
|
|||
static inline bool
|
||||
xfs_sb_has_incompat_feature(
|
||||
struct xfs_sb *sbp,
|
||||
__uint32_t feature)
|
||||
uint32_t feature)
|
||||
{
|
||||
return (sbp->sb_features_incompat & feature) != 0;
|
||||
}
|
||||
|
@ -492,7 +492,7 @@ xfs_sb_has_incompat_feature(
|
|||
static inline bool
|
||||
xfs_sb_has_incompat_log_feature(
|
||||
struct xfs_sb *sbp,
|
||||
__uint32_t feature)
|
||||
uint32_t feature)
|
||||
{
|
||||
return (sbp->sb_features_log_incompat & feature) != 0;
|
||||
}
|
||||
|
@ -594,8 +594,8 @@ xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
|
|||
*/
|
||||
#define XFS_FSB_TO_B(mp,fsbno) ((xfs_fsize_t)(fsbno) << (mp)->m_sb.sb_blocklog)
|
||||
#define XFS_B_TO_FSB(mp,b) \
|
||||
((((__uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog)
|
||||
#define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
|
||||
((((uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog)
|
||||
#define XFS_B_TO_FSBT(mp,b) (((uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
|
||||
#define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask)
|
||||
|
||||
/*
|
||||
|
@ -1072,7 +1072,7 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
|
|||
* next agno_log bits - ag number
|
||||
* high agno_log-agblklog-inopblog bits - 0
|
||||
*/
|
||||
#define XFS_INO_MASK(k) (__uint32_t)((1ULL << (k)) - 1)
|
||||
#define XFS_INO_MASK(k) (uint32_t)((1ULL << (k)) - 1)
|
||||
#define XFS_INO_OFFSET_BITS(mp) (mp)->m_sb.sb_inopblog
|
||||
#define XFS_INO_AGBNO_BITS(mp) (mp)->m_sb.sb_agblklog
|
||||
#define XFS_INO_AGINO_BITS(mp) (mp)->m_agino_log
|
||||
|
@ -1211,6 +1211,7 @@ struct xfs_dsymlink_hdr {
|
|||
|
||||
#define XFS_SYMLINK_CRC_OFF offsetof(struct xfs_dsymlink_hdr, sl_crc)
|
||||
|
||||
#define XFS_SYMLINK_MAXLEN 1024
|
||||
/*
|
||||
* The maximum pathlen is 1024 bytes. Since the minimum file system
|
||||
* blocksize is 512 bytes, we can get a max of 3 extents back from
|
||||
|
@ -1269,16 +1270,16 @@ typedef __be32 xfs_alloc_ptr_t;
|
|||
#define XFS_FIBT_MAGIC 0x46494254 /* 'FIBT' */
|
||||
#define XFS_FIBT_CRC_MAGIC 0x46494233 /* 'FIB3' */
|
||||
|
||||
typedef __uint64_t xfs_inofree_t;
|
||||
typedef uint64_t xfs_inofree_t;
|
||||
#define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t))
|
||||
#define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3)
|
||||
#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1)
|
||||
#define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i))
|
||||
|
||||
#define XFS_INOBT_HOLEMASK_FULL 0 /* holemask for full chunk */
|
||||
#define XFS_INOBT_HOLEMASK_BITS (NBBY * sizeof(__uint16_t))
|
||||
#define XFS_INOBT_HOLEMASK_BITS (NBBY * sizeof(uint16_t))
|
||||
#define XFS_INODES_PER_HOLEMASK_BIT \
|
||||
(XFS_INODES_PER_CHUNK / (NBBY * sizeof(__uint16_t)))
|
||||
(XFS_INODES_PER_CHUNK / (NBBY * sizeof(uint16_t)))
|
||||
|
||||
static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
|
||||
{
|
||||
|
@ -1312,9 +1313,9 @@ typedef struct xfs_inobt_rec {
|
|||
|
||||
typedef struct xfs_inobt_rec_incore {
|
||||
xfs_agino_t ir_startino; /* starting inode number */
|
||||
__uint16_t ir_holemask; /* hole mask for sparse chunks */
|
||||
__uint8_t ir_count; /* total inode count */
|
||||
__uint8_t ir_freecount; /* count of free inodes (set bits) */
|
||||
uint16_t ir_holemask; /* hole mask for sparse chunks */
|
||||
uint8_t ir_count; /* total inode count */
|
||||
uint8_t ir_freecount; /* count of free inodes (set bits) */
|
||||
xfs_inofree_t ir_free; /* free inode mask */
|
||||
} xfs_inobt_rec_incore_t;
|
||||
|
||||
|
@ -1397,15 +1398,15 @@ struct xfs_rmap_rec {
|
|||
* rm_offset:54-60 aren't used and should be zero
|
||||
* rm_offset:0-53 is the block offset within the inode
|
||||
*/
|
||||
#define XFS_RMAP_OFF_ATTR_FORK ((__uint64_t)1ULL << 63)
|
||||
#define XFS_RMAP_OFF_BMBT_BLOCK ((__uint64_t)1ULL << 62)
|
||||
#define XFS_RMAP_OFF_UNWRITTEN ((__uint64_t)1ULL << 61)
|
||||
#define XFS_RMAP_OFF_ATTR_FORK ((uint64_t)1ULL << 63)
|
||||
#define XFS_RMAP_OFF_BMBT_BLOCK ((uint64_t)1ULL << 62)
|
||||
#define XFS_RMAP_OFF_UNWRITTEN ((uint64_t)1ULL << 61)
|
||||
|
||||
#define XFS_RMAP_LEN_MAX ((__uint32_t)~0U)
|
||||
#define XFS_RMAP_LEN_MAX ((uint32_t)~0U)
|
||||
#define XFS_RMAP_OFF_FLAGS (XFS_RMAP_OFF_ATTR_FORK | \
|
||||
XFS_RMAP_OFF_BMBT_BLOCK | \
|
||||
XFS_RMAP_OFF_UNWRITTEN)
|
||||
#define XFS_RMAP_OFF_MASK ((__uint64_t)0x3FFFFFFFFFFFFFULL)
|
||||
#define XFS_RMAP_OFF_MASK ((uint64_t)0x3FFFFFFFFFFFFFULL)
|
||||
|
||||
#define XFS_RMAP_OFF(off) ((off) & XFS_RMAP_OFF_MASK)
|
||||
|
||||
|
@ -1431,8 +1432,8 @@ struct xfs_rmap_rec {
|
|||
struct xfs_rmap_irec {
|
||||
xfs_agblock_t rm_startblock; /* extent start block */
|
||||
xfs_extlen_t rm_blockcount; /* extent length */
|
||||
__uint64_t rm_owner; /* extent owner */
|
||||
__uint64_t rm_offset; /* offset within the owner */
|
||||
uint64_t rm_owner; /* extent owner */
|
||||
uint64_t rm_offset; /* offset within the owner */
|
||||
unsigned int rm_flags; /* state flags */
|
||||
};
|
||||
|
||||
|
@ -1544,11 +1545,11 @@ typedef struct xfs_bmbt_rec {
|
|||
__be64 l0, l1;
|
||||
} xfs_bmbt_rec_t;
|
||||
|
||||
typedef __uint64_t xfs_bmbt_rec_base_t; /* use this for casts */
|
||||
typedef uint64_t xfs_bmbt_rec_base_t; /* use this for casts */
|
||||
typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
|
||||
|
||||
typedef struct xfs_bmbt_rec_host {
|
||||
__uint64_t l0, l1;
|
||||
uint64_t l0, l1;
|
||||
} xfs_bmbt_rec_host_t;
|
||||
|
||||
/*
|
||||
|
|
|
@ -302,10 +302,10 @@ typedef struct xfs_bstat {
|
|||
* and using two 16bit values to hold new 32bit projid was choosen
|
||||
* to retain compatibility with "old" filesystems).
|
||||
*/
|
||||
static inline __uint32_t
|
||||
static inline uint32_t
|
||||
bstat_get_projid(struct xfs_bstat *bs)
|
||||
{
|
||||
return (__uint32_t)bs->bs_projid_hi << 16 | bs->bs_projid_lo;
|
||||
return (uint32_t)bs->bs_projid_hi << 16 | bs->bs_projid_lo;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -446,19 +446,15 @@ typedef struct xfs_handle {
|
|||
} xfs_handle_t;
|
||||
#define ha_fsid ha_u._ha_fsid
|
||||
|
||||
#define XFS_HSIZE(handle) (((char *) &(handle).ha_fid.fid_pad \
|
||||
- (char *) &(handle)) \
|
||||
+ (handle).ha_fid.fid_len)
|
||||
|
||||
/*
|
||||
* Structure passed to XFS_IOC_SWAPEXT
|
||||
*/
|
||||
typedef struct xfs_swapext
|
||||
{
|
||||
__int64_t sx_version; /* version */
|
||||
int64_t sx_version; /* version */
|
||||
#define XFS_SX_VERSION 0
|
||||
__int64_t sx_fdtarget; /* fd of target file */
|
||||
__int64_t sx_fdtmp; /* fd of tmp file */
|
||||
int64_t sx_fdtarget; /* fd of target file */
|
||||
int64_t sx_fdtmp; /* fd of tmp file */
|
||||
xfs_off_t sx_offset; /* offset into file */
|
||||
xfs_off_t sx_length; /* leng from offset */
|
||||
char sx_pad[16]; /* pad space, unused */
|
||||
|
@ -546,7 +542,7 @@ typedef struct xfs_swapext
|
|||
#define XFS_IOC_ATTRLIST_BY_HANDLE _IOW ('X', 122, struct xfs_fsop_attrlist_handlereq)
|
||||
#define XFS_IOC_ATTRMULTI_BY_HANDLE _IOW ('X', 123, struct xfs_fsop_attrmulti_handlereq)
|
||||
#define XFS_IOC_FSGEOMETRY _IOR ('X', 124, struct xfs_fsop_geom)
|
||||
#define XFS_IOC_GOINGDOWN _IOR ('X', 125, __uint32_t)
|
||||
#define XFS_IOC_GOINGDOWN _IOR ('X', 125, uint32_t)
|
||||
/* XFS_IOC_GETFSUUID ---------- deprecated 140 */
|
||||
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
/*
|
||||
* Allocation group level functions.
|
||||
*/
|
||||
static inline int
|
||||
int
|
||||
xfs_ialloc_cluster_alignment(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
|
@ -98,24 +98,15 @@ xfs_inobt_update(
|
|||
return xfs_btree_update(cur, &rec);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the data from the pointed-to record.
|
||||
*/
|
||||
int /* error */
|
||||
xfs_inobt_get_rec(
|
||||
struct xfs_btree_cur *cur, /* btree cursor */
|
||||
xfs_inobt_rec_incore_t *irec, /* btree record */
|
||||
int *stat) /* output: success/failure */
|
||||
/* Convert on-disk btree record to incore inobt record. */
|
||||
void
|
||||
xfs_inobt_btrec_to_irec(
|
||||
struct xfs_mount *mp,
|
||||
union xfs_btree_rec *rec,
|
||||
struct xfs_inobt_rec_incore *irec)
|
||||
{
|
||||
union xfs_btree_rec *rec;
|
||||
int error;
|
||||
|
||||
error = xfs_btree_get_rec(cur, &rec, stat);
|
||||
if (error || *stat == 0)
|
||||
return error;
|
||||
|
||||
irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
|
||||
if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
|
||||
if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
|
||||
irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
|
||||
irec->ir_count = rec->inobt.ir_u.sp.ir_count;
|
||||
irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
|
||||
|
@ -130,6 +121,25 @@ xfs_inobt_get_rec(
|
|||
be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
|
||||
}
|
||||
irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the data from the pointed-to record.
|
||||
*/
|
||||
int
|
||||
xfs_inobt_get_rec(
|
||||
struct xfs_btree_cur *cur,
|
||||
struct xfs_inobt_rec_incore *irec,
|
||||
int *stat)
|
||||
{
|
||||
union xfs_btree_rec *rec;
|
||||
int error;
|
||||
|
||||
error = xfs_btree_get_rec(cur, &rec, stat);
|
||||
if (error || *stat == 0)
|
||||
return error;
|
||||
|
||||
xfs_inobt_btrec_to_irec(cur->bc_mp, rec, irec);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -140,9 +150,9 @@ xfs_inobt_get_rec(
|
|||
STATIC int
|
||||
xfs_inobt_insert_rec(
|
||||
struct xfs_btree_cur *cur,
|
||||
__uint16_t holemask,
|
||||
__uint8_t count,
|
||||
__int32_t freecount,
|
||||
uint16_t holemask,
|
||||
uint8_t count,
|
||||
int32_t freecount,
|
||||
xfs_inofree_t free,
|
||||
int *stat)
|
||||
{
|
||||
|
@ -2542,8 +2552,7 @@ xfs_agi_read_verify(
|
|||
!xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
|
||||
xfs_buf_ioerror(bp, -EFSBADCRC);
|
||||
else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp,
|
||||
XFS_ERRTAG_IALLOC_READ_AGI,
|
||||
XFS_RANDOM_IALLOC_READ_AGI))
|
||||
XFS_ERRTAG_IALLOC_READ_AGI))
|
||||
xfs_buf_ioerror(bp, -EFSCORRUPTED);
|
||||
|
||||
if (bp->b_error)
|
||||
|
|
|
@ -168,5 +168,10 @@ int xfs_ialloc_inode_init(struct xfs_mount *mp, struct xfs_trans *tp,
|
|||
int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_agnumber_t agno, struct xfs_buf **bpp);
|
||||
|
||||
union xfs_btree_rec;
|
||||
void xfs_inobt_btrec_to_irec(struct xfs_mount *mp, union xfs_btree_rec *rec,
|
||||
struct xfs_inobt_rec_incore *irec);
|
||||
|
||||
int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
|
||||
|
||||
#endif /* __XFS_IALLOC_H__ */
|
||||
|
|
|
@ -174,6 +174,18 @@ xfs_inobt_init_key_from_rec(
|
|||
key->inobt.ir_startino = rec->inobt.ir_startino;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_inobt_init_high_key_from_rec(
|
||||
union xfs_btree_key *key,
|
||||
union xfs_btree_rec *rec)
|
||||
{
|
||||
__u32 x;
|
||||
|
||||
x = be32_to_cpu(rec->inobt.ir_startino);
|
||||
x += XFS_INODES_PER_CHUNK - 1;
|
||||
key->inobt.ir_startino = cpu_to_be32(x);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_inobt_init_rec_from_cur(
|
||||
struct xfs_btree_cur *cur,
|
||||
|
@ -219,15 +231,25 @@ xfs_finobt_init_ptr_from_cur(
|
|||
ptr->s = agi->agi_free_root;
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_inobt_key_diff(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key)
|
||||
{
|
||||
return (__int64_t)be32_to_cpu(key->inobt.ir_startino) -
|
||||
return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
|
||||
cur->bc_rec.i.ir_startino;
|
||||
}
|
||||
|
||||
STATIC int64_t
|
||||
xfs_inobt_diff_two_keys(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *k1,
|
||||
union xfs_btree_key *k2)
|
||||
{
|
||||
return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
|
||||
be32_to_cpu(k2->inobt.ir_startino);
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_inobt_verify(
|
||||
struct xfs_buf *bp)
|
||||
|
@ -302,7 +324,6 @@ const struct xfs_buf_ops xfs_inobt_buf_ops = {
|
|||
.verify_write = xfs_inobt_write_verify,
|
||||
};
|
||||
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
STATIC int
|
||||
xfs_inobt_keys_inorder(
|
||||
struct xfs_btree_cur *cur,
|
||||
|
@ -322,7 +343,6 @@ xfs_inobt_recs_inorder(
|
|||
return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
|
||||
be32_to_cpu(r2->inobt.ir_startino);
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
||||
static const struct xfs_btree_ops xfs_inobt_ops = {
|
||||
.rec_len = sizeof(xfs_inobt_rec_t),
|
||||
|
@ -335,14 +355,14 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
|
|||
.get_minrecs = xfs_inobt_get_minrecs,
|
||||
.get_maxrecs = xfs_inobt_get_maxrecs,
|
||||
.init_key_from_rec = xfs_inobt_init_key_from_rec,
|
||||
.init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
|
||||
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
|
||||
.init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
|
||||
.key_diff = xfs_inobt_key_diff,
|
||||
.buf_ops = &xfs_inobt_buf_ops,
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
.diff_two_keys = xfs_inobt_diff_two_keys,
|
||||
.keys_inorder = xfs_inobt_keys_inorder,
|
||||
.recs_inorder = xfs_inobt_recs_inorder,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct xfs_btree_ops xfs_finobt_ops = {
|
||||
|
@ -356,14 +376,14 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
|
|||
.get_minrecs = xfs_inobt_get_minrecs,
|
||||
.get_maxrecs = xfs_inobt_get_maxrecs,
|
||||
.init_key_from_rec = xfs_inobt_init_key_from_rec,
|
||||
.init_high_key_from_rec = xfs_inobt_init_high_key_from_rec,
|
||||
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
|
||||
.init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
|
||||
.key_diff = xfs_inobt_key_diff,
|
||||
.buf_ops = &xfs_inobt_buf_ops,
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
.diff_two_keys = xfs_inobt_diff_two_keys,
|
||||
.keys_inorder = xfs_inobt_keys_inorder,
|
||||
.recs_inorder = xfs_inobt_recs_inorder,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -105,8 +105,7 @@ xfs_inode_buf_verify(
|
|||
di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
|
||||
xfs_dinode_good_version(mp, dip->di_version);
|
||||
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
|
||||
XFS_ERRTAG_ITOBP_INOTOBP,
|
||||
XFS_RANDOM_ITOBP_INOTOBP))) {
|
||||
XFS_ERRTAG_ITOBP_INOTOBP))) {
|
||||
if (readahead) {
|
||||
bp->b_flags &= ~XBF_DONE;
|
||||
xfs_buf_ioerror(bp, -EIO);
|
||||
|
@ -381,7 +380,7 @@ xfs_log_dinode_to_disk(
|
|||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
bool
|
||||
xfs_dinode_verify(
|
||||
struct xfs_mount *mp,
|
||||
xfs_ino_t ino,
|
||||
|
@ -444,7 +443,7 @@ xfs_dinode_calc_crc(
|
|||
struct xfs_mount *mp,
|
||||
struct xfs_dinode *dip)
|
||||
{
|
||||
__uint32_t crc;
|
||||
uint32_t crc;
|
||||
|
||||
if (dip->di_version < 3)
|
||||
return;
|
||||
|
|
|
@ -28,26 +28,26 @@ struct xfs_dinode;
|
|||
* format specific structures at the appropriate time.
|
||||
*/
|
||||
struct xfs_icdinode {
|
||||
__int8_t di_version; /* inode version */
|
||||
__int8_t di_format; /* format of di_c data */
|
||||
__uint16_t di_flushiter; /* incremented on flush */
|
||||
__uint32_t di_uid; /* owner's user id */
|
||||
__uint32_t di_gid; /* owner's group id */
|
||||
__uint16_t di_projid_lo; /* lower part of owner's project id */
|
||||
__uint16_t di_projid_hi; /* higher part of owner's project id */
|
||||
int8_t di_version; /* inode version */
|
||||
int8_t di_format; /* format of di_c data */
|
||||
uint16_t di_flushiter; /* incremented on flush */
|
||||
uint32_t di_uid; /* owner's user id */
|
||||
uint32_t di_gid; /* owner's group id */
|
||||
uint16_t di_projid_lo; /* lower part of owner's project id */
|
||||
uint16_t di_projid_hi; /* higher part of owner's project id */
|
||||
xfs_fsize_t di_size; /* number of bytes in file */
|
||||
xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */
|
||||
xfs_extlen_t di_extsize; /* basic/minimum extent size for file */
|
||||
xfs_extnum_t di_nextents; /* number of extents in data fork */
|
||||
xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/
|
||||
__uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */
|
||||
__int8_t di_aformat; /* format of attr fork's data */
|
||||
__uint32_t di_dmevmask; /* DMIG event mask */
|
||||
__uint16_t di_dmstate; /* DMIG state info */
|
||||
__uint16_t di_flags; /* random flags, XFS_DIFLAG_... */
|
||||
uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */
|
||||
int8_t di_aformat; /* format of attr fork's data */
|
||||
uint32_t di_dmevmask; /* DMIG event mask */
|
||||
uint16_t di_dmstate; /* DMIG state info */
|
||||
uint16_t di_flags; /* random flags, XFS_DIFLAG_... */
|
||||
|
||||
__uint64_t di_flags2; /* more random flags */
|
||||
__uint32_t di_cowextsize; /* basic cow extent size for file */
|
||||
uint64_t di_flags2; /* more random flags */
|
||||
uint32_t di_cowextsize; /* basic cow extent size for file */
|
||||
|
||||
xfs_ictimestamp_t di_crtime; /* time created */
|
||||
};
|
||||
|
@ -82,4 +82,7 @@ void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
|
|||
#define xfs_inobp_check(mp, bp)
|
||||
#endif /* DEBUG */
|
||||
|
||||
bool xfs_dinode_verify(struct xfs_mount *mp, xfs_ino_t ino,
|
||||
struct xfs_dinode *dip);
|
||||
|
||||
#endif /* __XFS_INODE_BUF_H__ */
|
||||
|
|
|
@ -31,7 +31,7 @@ struct xfs_trans_res;
|
|||
* through all the log items definitions and everything they encode into the
|
||||
* log.
|
||||
*/
|
||||
typedef __uint32_t xlog_tid_t;
|
||||
typedef uint32_t xlog_tid_t;
|
||||
|
||||
#define XLOG_MIN_ICLOGS 2
|
||||
#define XLOG_MAX_ICLOGS 8
|
||||
|
@ -211,7 +211,7 @@ typedef struct xfs_log_iovec {
|
|||
typedef struct xfs_trans_header {
|
||||
uint th_magic; /* magic number */
|
||||
uint th_type; /* transaction type */
|
||||
__int32_t th_tid; /* transaction id (unused) */
|
||||
int32_t th_tid; /* transaction id (unused) */
|
||||
uint th_num_items; /* num items logged by trans */
|
||||
} xfs_trans_header_t;
|
||||
|
||||
|
@ -265,52 +265,52 @@ typedef struct xfs_trans_header {
|
|||
* must be added on to the end.
|
||||
*/
|
||||
typedef struct xfs_inode_log_format {
|
||||
__uint16_t ilf_type; /* inode log item type */
|
||||
__uint16_t ilf_size; /* size of this item */
|
||||
__uint32_t ilf_fields; /* flags for fields logged */
|
||||
__uint16_t ilf_asize; /* size of attr d/ext/root */
|
||||
__uint16_t ilf_dsize; /* size of data/ext/root */
|
||||
__uint64_t ilf_ino; /* inode number */
|
||||
uint16_t ilf_type; /* inode log item type */
|
||||
uint16_t ilf_size; /* size of this item */
|
||||
uint32_t ilf_fields; /* flags for fields logged */
|
||||
uint16_t ilf_asize; /* size of attr d/ext/root */
|
||||
uint16_t ilf_dsize; /* size of data/ext/root */
|
||||
uint64_t ilf_ino; /* inode number */
|
||||
union {
|
||||
__uint32_t ilfu_rdev; /* rdev value for dev inode*/
|
||||
uint32_t ilfu_rdev; /* rdev value for dev inode*/
|
||||
uuid_t ilfu_uuid; /* mount point value */
|
||||
} ilf_u;
|
||||
__int64_t ilf_blkno; /* blkno of inode buffer */
|
||||
__int32_t ilf_len; /* len of inode buffer */
|
||||
__int32_t ilf_boffset; /* off of inode in buffer */
|
||||
int64_t ilf_blkno; /* blkno of inode buffer */
|
||||
int32_t ilf_len; /* len of inode buffer */
|
||||
int32_t ilf_boffset; /* off of inode in buffer */
|
||||
} xfs_inode_log_format_t;
|
||||
|
||||
typedef struct xfs_inode_log_format_32 {
|
||||
__uint16_t ilf_type; /* inode log item type */
|
||||
__uint16_t ilf_size; /* size of this item */
|
||||
__uint32_t ilf_fields; /* flags for fields logged */
|
||||
__uint16_t ilf_asize; /* size of attr d/ext/root */
|
||||
__uint16_t ilf_dsize; /* size of data/ext/root */
|
||||
__uint64_t ilf_ino; /* inode number */
|
||||
uint16_t ilf_type; /* inode log item type */
|
||||
uint16_t ilf_size; /* size of this item */
|
||||
uint32_t ilf_fields; /* flags for fields logged */
|
||||
uint16_t ilf_asize; /* size of attr d/ext/root */
|
||||
uint16_t ilf_dsize; /* size of data/ext/root */
|
||||
uint64_t ilf_ino; /* inode number */
|
||||
union {
|
||||
__uint32_t ilfu_rdev; /* rdev value for dev inode*/
|
||||
uint32_t ilfu_rdev; /* rdev value for dev inode*/
|
||||
uuid_t ilfu_uuid; /* mount point value */
|
||||
} ilf_u;
|
||||
__int64_t ilf_blkno; /* blkno of inode buffer */
|
||||
__int32_t ilf_len; /* len of inode buffer */
|
||||
__int32_t ilf_boffset; /* off of inode in buffer */
|
||||
int64_t ilf_blkno; /* blkno of inode buffer */
|
||||
int32_t ilf_len; /* len of inode buffer */
|
||||
int32_t ilf_boffset; /* off of inode in buffer */
|
||||
} __attribute__((packed)) xfs_inode_log_format_32_t;
|
||||
|
||||
typedef struct xfs_inode_log_format_64 {
|
||||
__uint16_t ilf_type; /* inode log item type */
|
||||
__uint16_t ilf_size; /* size of this item */
|
||||
__uint32_t ilf_fields; /* flags for fields logged */
|
||||
__uint16_t ilf_asize; /* size of attr d/ext/root */
|
||||
__uint16_t ilf_dsize; /* size of data/ext/root */
|
||||
__uint32_t ilf_pad; /* pad for 64 bit boundary */
|
||||
__uint64_t ilf_ino; /* inode number */
|
||||
uint16_t ilf_type; /* inode log item type */
|
||||
uint16_t ilf_size; /* size of this item */
|
||||
uint32_t ilf_fields; /* flags for fields logged */
|
||||
uint16_t ilf_asize; /* size of attr d/ext/root */
|
||||
uint16_t ilf_dsize; /* size of data/ext/root */
|
||||
uint32_t ilf_pad; /* pad for 64 bit boundary */
|
||||
uint64_t ilf_ino; /* inode number */
|
||||
union {
|
||||
__uint32_t ilfu_rdev; /* rdev value for dev inode*/
|
||||
uint32_t ilfu_rdev; /* rdev value for dev inode*/
|
||||
uuid_t ilfu_uuid; /* mount point value */
|
||||
} ilf_u;
|
||||
__int64_t ilf_blkno; /* blkno of inode buffer */
|
||||
__int32_t ilf_len; /* len of inode buffer */
|
||||
__int32_t ilf_boffset; /* off of inode in buffer */
|
||||
int64_t ilf_blkno; /* blkno of inode buffer */
|
||||
int32_t ilf_len; /* len of inode buffer */
|
||||
int32_t ilf_boffset; /* off of inode in buffer */
|
||||
} xfs_inode_log_format_64_t;
|
||||
|
||||
|
||||
|
@ -379,8 +379,8 @@ static inline int xfs_ilog_fdata(int w)
|
|||
* information.
|
||||
*/
|
||||
typedef struct xfs_ictimestamp {
|
||||
__int32_t t_sec; /* timestamp seconds */
|
||||
__int32_t t_nsec; /* timestamp nanoseconds */
|
||||
int32_t t_sec; /* timestamp seconds */
|
||||
int32_t t_nsec; /* timestamp nanoseconds */
|
||||
} xfs_ictimestamp_t;
|
||||
|
||||
/*
|
||||
|
@ -388,18 +388,18 @@ typedef struct xfs_ictimestamp {
|
|||
* kept identical to struct xfs_dinode except for the endianness annotations.
|
||||
*/
|
||||
struct xfs_log_dinode {
|
||||
__uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */
|
||||
__uint16_t di_mode; /* mode and type of file */
|
||||
__int8_t di_version; /* inode version */
|
||||
__int8_t di_format; /* format of di_c data */
|
||||
__uint8_t di_pad3[2]; /* unused in v2/3 inodes */
|
||||
__uint32_t di_uid; /* owner's user id */
|
||||
__uint32_t di_gid; /* owner's group id */
|
||||
__uint32_t di_nlink; /* number of links to file */
|
||||
__uint16_t di_projid_lo; /* lower part of owner's project id */
|
||||
__uint16_t di_projid_hi; /* higher part of owner's project id */
|
||||
__uint8_t di_pad[6]; /* unused, zeroed space */
|
||||
__uint16_t di_flushiter; /* incremented on flush */
|
||||
uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */
|
||||
uint16_t di_mode; /* mode and type of file */
|
||||
int8_t di_version; /* inode version */
|
||||
int8_t di_format; /* format of di_c data */
|
||||
uint8_t di_pad3[2]; /* unused in v2/3 inodes */
|
||||
uint32_t di_uid; /* owner's user id */
|
||||
uint32_t di_gid; /* owner's group id */
|
||||
uint32_t di_nlink; /* number of links to file */
|
||||
uint16_t di_projid_lo; /* lower part of owner's project id */
|
||||
uint16_t di_projid_hi; /* higher part of owner's project id */
|
||||
uint8_t di_pad[6]; /* unused, zeroed space */
|
||||
uint16_t di_flushiter; /* incremented on flush */
|
||||
xfs_ictimestamp_t di_atime; /* time last accessed */
|
||||
xfs_ictimestamp_t di_mtime; /* time last modified */
|
||||
xfs_ictimestamp_t di_ctime; /* time created/inode modified */
|
||||
|
@ -408,23 +408,23 @@ struct xfs_log_dinode {
|
|||
xfs_extlen_t di_extsize; /* basic/minimum extent size for file */
|
||||
xfs_extnum_t di_nextents; /* number of extents in data fork */
|
||||
xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/
|
||||
__uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */
|
||||
__int8_t di_aformat; /* format of attr fork's data */
|
||||
__uint32_t di_dmevmask; /* DMIG event mask */
|
||||
__uint16_t di_dmstate; /* DMIG state info */
|
||||
__uint16_t di_flags; /* random flags, XFS_DIFLAG_... */
|
||||
__uint32_t di_gen; /* generation number */
|
||||
uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */
|
||||
int8_t di_aformat; /* format of attr fork's data */
|
||||
uint32_t di_dmevmask; /* DMIG event mask */
|
||||
uint16_t di_dmstate; /* DMIG state info */
|
||||
uint16_t di_flags; /* random flags, XFS_DIFLAG_... */
|
||||
uint32_t di_gen; /* generation number */
|
||||
|
||||
/* di_next_unlinked is the only non-core field in the old dinode */
|
||||
xfs_agino_t di_next_unlinked;/* agi unlinked list ptr */
|
||||
|
||||
/* start of the extended dinode, writable fields */
|
||||
__uint32_t di_crc; /* CRC of the inode */
|
||||
__uint64_t di_changecount; /* number of attribute changes */
|
||||
uint32_t di_crc; /* CRC of the inode */
|
||||
uint64_t di_changecount; /* number of attribute changes */
|
||||
xfs_lsn_t di_lsn; /* flush sequence */
|
||||
__uint64_t di_flags2; /* more random flags */
|
||||
__uint32_t di_cowextsize; /* basic cow extent size for file */
|
||||
__uint8_t di_pad2[12]; /* more padding for future expansion */
|
||||
uint64_t di_flags2; /* more random flags */
|
||||
uint32_t di_cowextsize; /* basic cow extent size for file */
|
||||
uint8_t di_pad2[12]; /* more padding for future expansion */
|
||||
|
||||
/* fields only written to during inode creation */
|
||||
xfs_ictimestamp_t di_crtime; /* time created */
|
||||
|
@ -483,7 +483,7 @@ typedef struct xfs_buf_log_format {
|
|||
unsigned short blf_size; /* size of this item */
|
||||
unsigned short blf_flags; /* misc state */
|
||||
unsigned short blf_len; /* number of blocks in this buf */
|
||||
__int64_t blf_blkno; /* starting blkno of this buf */
|
||||
int64_t blf_blkno; /* starting blkno of this buf */
|
||||
unsigned int blf_map_size; /* used size of data bitmap in words */
|
||||
unsigned int blf_data_map[XFS_BLF_DATAMAP_SIZE]; /* dirty bitmap */
|
||||
} xfs_buf_log_format_t;
|
||||
|
@ -533,7 +533,7 @@ xfs_blft_to_flags(struct xfs_buf_log_format *blf, enum xfs_blft type)
|
|||
blf->blf_flags |= ((type << XFS_BLFT_SHIFT) & XFS_BLFT_MASK);
|
||||
}
|
||||
|
||||
static inline __uint16_t
|
||||
static inline uint16_t
|
||||
xfs_blft_from_flags(struct xfs_buf_log_format *blf)
|
||||
{
|
||||
return (blf->blf_flags & XFS_BLFT_MASK) >> XFS_BLFT_SHIFT;
|
||||
|
@ -554,14 +554,14 @@ typedef struct xfs_extent {
|
|||
* conversion routine.
|
||||
*/
|
||||
typedef struct xfs_extent_32 {
|
||||
__uint64_t ext_start;
|
||||
__uint32_t ext_len;
|
||||
uint64_t ext_start;
|
||||
uint32_t ext_len;
|
||||
} __attribute__((packed)) xfs_extent_32_t;
|
||||
|
||||
typedef struct xfs_extent_64 {
|
||||
__uint64_t ext_start;
|
||||
__uint32_t ext_len;
|
||||
__uint32_t ext_pad;
|
||||
uint64_t ext_start;
|
||||
uint32_t ext_len;
|
||||
uint32_t ext_pad;
|
||||
} xfs_extent_64_t;
|
||||
|
||||
/*
|
||||
|
@ -570,26 +570,26 @@ typedef struct xfs_extent_64 {
|
|||
* size is given by efi_nextents.
|
||||
*/
|
||||
typedef struct xfs_efi_log_format {
|
||||
__uint16_t efi_type; /* efi log item type */
|
||||
__uint16_t efi_size; /* size of this item */
|
||||
__uint32_t efi_nextents; /* # extents to free */
|
||||
__uint64_t efi_id; /* efi identifier */
|
||||
uint16_t efi_type; /* efi log item type */
|
||||
uint16_t efi_size; /* size of this item */
|
||||
uint32_t efi_nextents; /* # extents to free */
|
||||
uint64_t efi_id; /* efi identifier */
|
||||
xfs_extent_t efi_extents[1]; /* array of extents to free */
|
||||
} xfs_efi_log_format_t;
|
||||
|
||||
typedef struct xfs_efi_log_format_32 {
|
||||
__uint16_t efi_type; /* efi log item type */
|
||||
__uint16_t efi_size; /* size of this item */
|
||||
__uint32_t efi_nextents; /* # extents to free */
|
||||
__uint64_t efi_id; /* efi identifier */
|
||||
uint16_t efi_type; /* efi log item type */
|
||||
uint16_t efi_size; /* size of this item */
|
||||
uint32_t efi_nextents; /* # extents to free */
|
||||
uint64_t efi_id; /* efi identifier */
|
||||
xfs_extent_32_t efi_extents[1]; /* array of extents to free */
|
||||
} __attribute__((packed)) xfs_efi_log_format_32_t;
|
||||
|
||||
typedef struct xfs_efi_log_format_64 {
|
||||
__uint16_t efi_type; /* efi log item type */
|
||||
__uint16_t efi_size; /* size of this item */
|
||||
__uint32_t efi_nextents; /* # extents to free */
|
||||
__uint64_t efi_id; /* efi identifier */
|
||||
uint16_t efi_type; /* efi log item type */
|
||||
uint16_t efi_size; /* size of this item */
|
||||
uint32_t efi_nextents; /* # extents to free */
|
||||
uint64_t efi_id; /* efi identifier */
|
||||
xfs_extent_64_t efi_extents[1]; /* array of extents to free */
|
||||
} xfs_efi_log_format_64_t;
|
||||
|
||||
|
@ -599,26 +599,26 @@ typedef struct xfs_efi_log_format_64 {
|
|||
* size is given by efd_nextents;
|
||||
*/
|
||||
typedef struct xfs_efd_log_format {
|
||||
__uint16_t efd_type; /* efd log item type */
|
||||
__uint16_t efd_size; /* size of this item */
|
||||
__uint32_t efd_nextents; /* # of extents freed */
|
||||
__uint64_t efd_efi_id; /* id of corresponding efi */
|
||||
uint16_t efd_type; /* efd log item type */
|
||||
uint16_t efd_size; /* size of this item */
|
||||
uint32_t efd_nextents; /* # of extents freed */
|
||||
uint64_t efd_efi_id; /* id of corresponding efi */
|
||||
xfs_extent_t efd_extents[1]; /* array of extents freed */
|
||||
} xfs_efd_log_format_t;
|
||||
|
||||
typedef struct xfs_efd_log_format_32 {
|
||||
__uint16_t efd_type; /* efd log item type */
|
||||
__uint16_t efd_size; /* size of this item */
|
||||
__uint32_t efd_nextents; /* # of extents freed */
|
||||
__uint64_t efd_efi_id; /* id of corresponding efi */
|
||||
uint16_t efd_type; /* efd log item type */
|
||||
uint16_t efd_size; /* size of this item */
|
||||
uint32_t efd_nextents; /* # of extents freed */
|
||||
uint64_t efd_efi_id; /* id of corresponding efi */
|
||||
xfs_extent_32_t efd_extents[1]; /* array of extents freed */
|
||||
} __attribute__((packed)) xfs_efd_log_format_32_t;
|
||||
|
||||
typedef struct xfs_efd_log_format_64 {
|
||||
__uint16_t efd_type; /* efd log item type */
|
||||
__uint16_t efd_size; /* size of this item */
|
||||
__uint32_t efd_nextents; /* # of extents freed */
|
||||
__uint64_t efd_efi_id; /* id of corresponding efi */
|
||||
uint16_t efd_type; /* efd log item type */
|
||||
uint16_t efd_size; /* size of this item */
|
||||
uint32_t efd_nextents; /* # of extents freed */
|
||||
uint64_t efd_efi_id; /* id of corresponding efi */
|
||||
xfs_extent_64_t efd_extents[1]; /* array of extents freed */
|
||||
} xfs_efd_log_format_64_t;
|
||||
|
||||
|
@ -626,11 +626,11 @@ typedef struct xfs_efd_log_format_64 {
|
|||
* RUI/RUD (reverse mapping) log format definitions
|
||||
*/
|
||||
struct xfs_map_extent {
|
||||
__uint64_t me_owner;
|
||||
__uint64_t me_startblock;
|
||||
__uint64_t me_startoff;
|
||||
__uint32_t me_len;
|
||||
__uint32_t me_flags;
|
||||
uint64_t me_owner;
|
||||
uint64_t me_startblock;
|
||||
uint64_t me_startoff;
|
||||
uint32_t me_len;
|
||||
uint32_t me_flags;
|
||||
};
|
||||
|
||||
/* rmap me_flags: upper bits are flags, lower byte is type code */
|
||||
|
@ -659,10 +659,10 @@ struct xfs_map_extent {
|
|||
* size is given by rui_nextents.
|
||||
*/
|
||||
struct xfs_rui_log_format {
|
||||
__uint16_t rui_type; /* rui log item type */
|
||||
__uint16_t rui_size; /* size of this item */
|
||||
__uint32_t rui_nextents; /* # extents to free */
|
||||
__uint64_t rui_id; /* rui identifier */
|
||||
uint16_t rui_type; /* rui log item type */
|
||||
uint16_t rui_size; /* size of this item */
|
||||
uint32_t rui_nextents; /* # extents to free */
|
||||
uint64_t rui_id; /* rui identifier */
|
||||
struct xfs_map_extent rui_extents[]; /* array of extents to rmap */
|
||||
};
|
||||
|
||||
|
@ -680,19 +680,19 @@ xfs_rui_log_format_sizeof(
|
|||
* size is given by rud_nextents;
|
||||
*/
|
||||
struct xfs_rud_log_format {
|
||||
__uint16_t rud_type; /* rud log item type */
|
||||
__uint16_t rud_size; /* size of this item */
|
||||
__uint32_t __pad;
|
||||
__uint64_t rud_rui_id; /* id of corresponding rui */
|
||||
uint16_t rud_type; /* rud log item type */
|
||||
uint16_t rud_size; /* size of this item */
|
||||
uint32_t __pad;
|
||||
uint64_t rud_rui_id; /* id of corresponding rui */
|
||||
};
|
||||
|
||||
/*
|
||||
* CUI/CUD (refcount update) log format definitions
|
||||
*/
|
||||
struct xfs_phys_extent {
|
||||
__uint64_t pe_startblock;
|
||||
__uint32_t pe_len;
|
||||
__uint32_t pe_flags;
|
||||
uint64_t pe_startblock;
|
||||
uint32_t pe_len;
|
||||
uint32_t pe_flags;
|
||||
};
|
||||
|
||||
/* refcount pe_flags: upper bits are flags, lower byte is type code */
|
||||
|
@ -707,10 +707,10 @@ struct xfs_phys_extent {
|
|||
* size is given by cui_nextents.
|
||||
*/
|
||||
struct xfs_cui_log_format {
|
||||
__uint16_t cui_type; /* cui log item type */
|
||||
__uint16_t cui_size; /* size of this item */
|
||||
__uint32_t cui_nextents; /* # extents to free */
|
||||
__uint64_t cui_id; /* cui identifier */
|
||||
uint16_t cui_type; /* cui log item type */
|
||||
uint16_t cui_size; /* size of this item */
|
||||
uint32_t cui_nextents; /* # extents to free */
|
||||
uint64_t cui_id; /* cui identifier */
|
||||
struct xfs_phys_extent cui_extents[]; /* array of extents */
|
||||
};
|
||||
|
||||
|
@ -728,10 +728,10 @@ xfs_cui_log_format_sizeof(
|
|||
* size is given by cud_nextents;
|
||||
*/
|
||||
struct xfs_cud_log_format {
|
||||
__uint16_t cud_type; /* cud log item type */
|
||||
__uint16_t cud_size; /* size of this item */
|
||||
__uint32_t __pad;
|
||||
__uint64_t cud_cui_id; /* id of corresponding cui */
|
||||
uint16_t cud_type; /* cud log item type */
|
||||
uint16_t cud_size; /* size of this item */
|
||||
uint32_t __pad;
|
||||
uint64_t cud_cui_id; /* id of corresponding cui */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -755,10 +755,10 @@ struct xfs_cud_log_format {
|
|||
* size is given by bui_nextents.
|
||||
*/
|
||||
struct xfs_bui_log_format {
|
||||
__uint16_t bui_type; /* bui log item type */
|
||||
__uint16_t bui_size; /* size of this item */
|
||||
__uint32_t bui_nextents; /* # extents to free */
|
||||
__uint64_t bui_id; /* bui identifier */
|
||||
uint16_t bui_type; /* bui log item type */
|
||||
uint16_t bui_size; /* size of this item */
|
||||
uint32_t bui_nextents; /* # extents to free */
|
||||
uint64_t bui_id; /* bui identifier */
|
||||
struct xfs_map_extent bui_extents[]; /* array of extents to bmap */
|
||||
};
|
||||
|
||||
|
@ -776,10 +776,10 @@ xfs_bui_log_format_sizeof(
|
|||
* size is given by bud_nextents;
|
||||
*/
|
||||
struct xfs_bud_log_format {
|
||||
__uint16_t bud_type; /* bud log item type */
|
||||
__uint16_t bud_size; /* size of this item */
|
||||
__uint32_t __pad;
|
||||
__uint64_t bud_bui_id; /* id of corresponding bui */
|
||||
uint16_t bud_type; /* bud log item type */
|
||||
uint16_t bud_size; /* size of this item */
|
||||
uint32_t __pad;
|
||||
uint64_t bud_bui_id; /* id of corresponding bui */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -789,12 +789,12 @@ struct xfs_bud_log_format {
|
|||
* 32 bits : log_recovery code assumes that.
|
||||
*/
|
||||
typedef struct xfs_dq_logformat {
|
||||
__uint16_t qlf_type; /* dquot log item type */
|
||||
__uint16_t qlf_size; /* size of this item */
|
||||
uint16_t qlf_type; /* dquot log item type */
|
||||
uint16_t qlf_size; /* size of this item */
|
||||
xfs_dqid_t qlf_id; /* usr/grp/proj id : 32 bits */
|
||||
__int64_t qlf_blkno; /* blkno of dquot buffer */
|
||||
__int32_t qlf_len; /* len of dquot buffer */
|
||||
__uint32_t qlf_boffset; /* off of dquot in buffer */
|
||||
int64_t qlf_blkno; /* blkno of dquot buffer */
|
||||
int32_t qlf_len; /* len of dquot buffer */
|
||||
uint32_t qlf_boffset; /* off of dquot in buffer */
|
||||
} xfs_dq_logformat_t;
|
||||
|
||||
/*
|
||||
|
@ -853,8 +853,8 @@ typedef struct xfs_qoff_logformat {
|
|||
* decoding can be done correctly.
|
||||
*/
|
||||
struct xfs_icreate_log {
|
||||
__uint16_t icl_type; /* type of log format structure */
|
||||
__uint16_t icl_size; /* size of log format structure */
|
||||
uint16_t icl_type; /* type of log format structure */
|
||||
uint16_t icl_size; /* size of log format structure */
|
||||
__be32 icl_ag; /* ag being allocated in */
|
||||
__be32 icl_agbno; /* start block of inode range */
|
||||
__be32 icl_count; /* number of inodes to initialise */
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#define XLOG_RHASH_SIZE 16
|
||||
#define XLOG_RHASH_SHIFT 2
|
||||
#define XLOG_RHASH(tid) \
|
||||
((((__uint32_t)tid)>>XLOG_RHASH_SHIFT) & (XLOG_RHASH_SIZE-1))
|
||||
((((uint32_t)tid)>>XLOG_RHASH_SHIFT) & (XLOG_RHASH_SIZE-1))
|
||||
|
||||
#define XLOG_MAX_REGIONS_IN_ITEM (XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK / 2 + 1)
|
||||
|
||||
|
|
|
@ -27,8 +27,8 @@
|
|||
* they may need 64-bit accounting. Hence, 64-bit quota-counters,
|
||||
* and quota-limits. This is a waste in the common case, but hey ...
|
||||
*/
|
||||
typedef __uint64_t xfs_qcnt_t;
|
||||
typedef __uint16_t xfs_qwarncnt_t;
|
||||
typedef uint64_t xfs_qcnt_t;
|
||||
typedef uint16_t xfs_qwarncnt_t;
|
||||
|
||||
/*
|
||||
* flags for q_flags field in the dquot.
|
||||
|
@ -136,6 +136,8 @@ typedef __uint16_t xfs_qwarncnt_t;
|
|||
*/
|
||||
#define XFS_QMOPT_INHERIT 0x1000000
|
||||
|
||||
#define XFS_QMOPT_NOLOCK 0x2000000 /* don't ilock during dqget */
|
||||
|
||||
/*
|
||||
* flags to xfs_trans_mod_dquot.
|
||||
*/
|
||||
|
|
|
@ -784,14 +784,6 @@ xfs_refcount_merge_extents(
|
|||
}
|
||||
|
||||
/*
|
||||
* While we're adjusting the refcounts records of an extent, we have
|
||||
* to keep an eye on the number of extents we're dirtying -- run too
|
||||
* many in a single transaction and we'll exceed the transaction's
|
||||
* reservation and crash the fs. Each record adds 12 bytes to the
|
||||
* log (plus any key updates) so we'll conservatively assume 24 bytes
|
||||
* per record. We must also leave space for btree splits on both ends
|
||||
* of the range and space for the CUD and a new CUI.
|
||||
*
|
||||
* XXX: This is a pretty hand-wavy estimate. The penalty for guessing
|
||||
* true incorrectly is a shutdown FS; the penalty for guessing false
|
||||
* incorrectly is more transaction rolls than might be necessary.
|
||||
|
@ -813,8 +805,7 @@ xfs_refcount_still_have_space(
|
|||
*/
|
||||
if (cur->bc_private.a.priv.refc.nr_ops > 2 &&
|
||||
XFS_TEST_ERROR(false, cur->bc_mp,
|
||||
XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE,
|
||||
XFS_RANDOM_REFCOUNT_CONTINUE_UPDATE))
|
||||
XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
|
||||
return false;
|
||||
|
||||
if (cur->bc_private.a.priv.refc.nr_ops == 0)
|
||||
|
@ -822,7 +813,7 @@ xfs_refcount_still_have_space(
|
|||
else if (overhead > cur->bc_tp->t_log_res)
|
||||
return false;
|
||||
return cur->bc_tp->t_log_res - overhead >
|
||||
cur->bc_private.a.priv.refc.nr_ops * 32;
|
||||
cur->bc_private.a.priv.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1076,8 +1067,7 @@ xfs_refcount_finish_one(
|
|||
blockcount);
|
||||
|
||||
if (XFS_TEST_ERROR(false, mp,
|
||||
XFS_ERRTAG_REFCOUNT_FINISH_ONE,
|
||||
XFS_RANDOM_REFCOUNT_FINISH_ONE))
|
||||
XFS_ERRTAG_REFCOUNT_FINISH_ONE))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
|
|
|
@ -67,4 +67,20 @@ extern int xfs_refcount_free_cow_extent(struct xfs_mount *mp,
|
|||
extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
|
||||
xfs_agnumber_t agno);
|
||||
|
||||
/*
|
||||
* While we're adjusting the refcounts records of an extent, we have
|
||||
* to keep an eye on the number of extents we're dirtying -- run too
|
||||
* many in a single transaction and we'll exceed the transaction's
|
||||
* reservation and crash the fs. Each record adds 12 bytes to the
|
||||
* log (plus any key updates) so we'll conservatively assume 32 bytes
|
||||
* per record. We must also leave space for btree splits on both ends
|
||||
* of the range and space for the CUD and a new CUI.
|
||||
*/
|
||||
#define XFS_REFCOUNT_ITEM_OVERHEAD 32
|
||||
|
||||
static inline xfs_fileoff_t xfs_refcount_max_unmap(int log_res)
|
||||
{
|
||||
return (log_res * 3 / 4) / XFS_REFCOUNT_ITEM_OVERHEAD;
|
||||
}
|
||||
|
||||
#endif /* __XFS_REFCOUNT_H__ */
|
||||
|
|
|
@ -202,7 +202,7 @@ xfs_refcountbt_init_ptr_from_cur(
|
|||
ptr->s = agf->agf_refcount_root;
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_refcountbt_key_diff(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key)
|
||||
|
@ -210,16 +210,16 @@ xfs_refcountbt_key_diff(
|
|||
struct xfs_refcount_irec *rec = &cur->bc_rec.rc;
|
||||
struct xfs_refcount_key *kp = &key->refc;
|
||||
|
||||
return (__int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
|
||||
return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_refcountbt_diff_two_keys(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *k1,
|
||||
union xfs_btree_key *k2)
|
||||
{
|
||||
return (__int64_t)be32_to_cpu(k1->refc.rc_startblock) -
|
||||
return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
|
||||
be32_to_cpu(k2->refc.rc_startblock);
|
||||
}
|
||||
|
||||
|
@ -285,7 +285,6 @@ const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
|
|||
.verify_write = xfs_refcountbt_write_verify,
|
||||
};
|
||||
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
STATIC int
|
||||
xfs_refcountbt_keys_inorder(
|
||||
struct xfs_btree_cur *cur,
|
||||
|
@ -306,7 +305,6 @@ xfs_refcountbt_recs_inorder(
|
|||
be32_to_cpu(r1->refc.rc_blockcount) <=
|
||||
be32_to_cpu(r2->refc.rc_startblock);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct xfs_btree_ops xfs_refcountbt_ops = {
|
||||
.rec_len = sizeof(struct xfs_refcount_rec),
|
||||
|
@ -325,10 +323,8 @@ static const struct xfs_btree_ops xfs_refcountbt_ops = {
|
|||
.key_diff = xfs_refcountbt_key_diff,
|
||||
.buf_ops = &xfs_refcountbt_buf_ops,
|
||||
.diff_two_keys = xfs_refcountbt_diff_two_keys,
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
.keys_inorder = xfs_refcountbt_keys_inorder,
|
||||
.recs_inorder = xfs_refcountbt_recs_inorder,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -179,7 +179,8 @@ xfs_rmap_delete(
|
|||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
/* Convert an internal btree record to an rmap record. */
|
||||
int
|
||||
xfs_rmap_btrec_to_irec(
|
||||
union xfs_btree_rec *rec,
|
||||
struct xfs_rmap_irec *irec)
|
||||
|
@ -2061,7 +2062,7 @@ int
|
|||
xfs_rmap_finish_one(
|
||||
struct xfs_trans *tp,
|
||||
enum xfs_rmap_intent_type type,
|
||||
__uint64_t owner,
|
||||
uint64_t owner,
|
||||
int whichfork,
|
||||
xfs_fileoff_t startoff,
|
||||
xfs_fsblock_t startblock,
|
||||
|
@ -2086,8 +2087,7 @@ xfs_rmap_finish_one(
|
|||
startoff, blockcount, state);
|
||||
|
||||
if (XFS_TEST_ERROR(false, mp,
|
||||
XFS_ERRTAG_RMAP_FINISH_ONE,
|
||||
XFS_RANDOM_RMAP_FINISH_ONE))
|
||||
XFS_ERRTAG_RMAP_FINISH_ONE))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
|
@ -2182,7 +2182,7 @@ __xfs_rmap_add(
|
|||
struct xfs_mount *mp,
|
||||
struct xfs_defer_ops *dfops,
|
||||
enum xfs_rmap_intent_type type,
|
||||
__uint64_t owner,
|
||||
uint64_t owner,
|
||||
int whichfork,
|
||||
struct xfs_bmbt_irec *bmap)
|
||||
{
|
||||
|
@ -2266,7 +2266,7 @@ xfs_rmap_alloc_extent(
|
|||
xfs_agnumber_t agno,
|
||||
xfs_agblock_t bno,
|
||||
xfs_extlen_t len,
|
||||
__uint64_t owner)
|
||||
uint64_t owner)
|
||||
{
|
||||
struct xfs_bmbt_irec bmap;
|
||||
|
||||
|
@ -2290,7 +2290,7 @@ xfs_rmap_free_extent(
|
|||
xfs_agnumber_t agno,
|
||||
xfs_agblock_t bno,
|
||||
xfs_extlen_t len,
|
||||
__uint64_t owner)
|
||||
uint64_t owner)
|
||||
{
|
||||
struct xfs_bmbt_irec bmap;
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ enum xfs_rmap_intent_type {
|
|||
struct xfs_rmap_intent {
|
||||
struct list_head ri_list;
|
||||
enum xfs_rmap_intent_type ri_type;
|
||||
__uint64_t ri_owner;
|
||||
uint64_t ri_owner;
|
||||
int ri_whichfork;
|
||||
struct xfs_bmbt_irec ri_bmap;
|
||||
};
|
||||
|
@ -196,15 +196,15 @@ int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
|
|||
struct xfs_bmbt_irec *imap);
|
||||
int xfs_rmap_alloc_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
|
||||
xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
|
||||
__uint64_t owner);
|
||||
uint64_t owner);
|
||||
int xfs_rmap_free_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
|
||||
xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
|
||||
__uint64_t owner);
|
||||
uint64_t owner);
|
||||
|
||||
void xfs_rmap_finish_one_cleanup(struct xfs_trans *tp,
|
||||
struct xfs_btree_cur *rcur, int error);
|
||||
int xfs_rmap_finish_one(struct xfs_trans *tp, enum xfs_rmap_intent_type type,
|
||||
__uint64_t owner, int whichfork, xfs_fileoff_t startoff,
|
||||
uint64_t owner, int whichfork, xfs_fileoff_t startoff,
|
||||
xfs_fsblock_t startblock, xfs_filblks_t blockcount,
|
||||
xfs_exntst_t state, struct xfs_btree_cur **pcur);
|
||||
|
||||
|
@ -216,5 +216,8 @@ int xfs_rmap_lookup_le_range(struct xfs_btree_cur *cur, xfs_agblock_t bno,
|
|||
struct xfs_rmap_irec *irec, int *stat);
|
||||
int xfs_rmap_compare(const struct xfs_rmap_irec *a,
|
||||
const struct xfs_rmap_irec *b);
|
||||
union xfs_btree_rec;
|
||||
int xfs_rmap_btrec_to_irec(union xfs_btree_rec *rec,
|
||||
struct xfs_rmap_irec *irec);
|
||||
|
||||
#endif /* __XFS_RMAP_H__ */
|
||||
|
|
|
@ -199,7 +199,7 @@ xfs_rmapbt_init_high_key_from_rec(
|
|||
union xfs_btree_key *key,
|
||||
union xfs_btree_rec *rec)
|
||||
{
|
||||
__uint64_t off;
|
||||
uint64_t off;
|
||||
int adj;
|
||||
|
||||
adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
|
||||
|
@ -241,7 +241,7 @@ xfs_rmapbt_init_ptr_from_cur(
|
|||
ptr->s = agf->agf_roots[cur->bc_btnum];
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_rmapbt_key_diff(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *key)
|
||||
|
@ -249,9 +249,9 @@ xfs_rmapbt_key_diff(
|
|||
struct xfs_rmap_irec *rec = &cur->bc_rec.r;
|
||||
struct xfs_rmap_key *kp = &key->rmap;
|
||||
__u64 x, y;
|
||||
__int64_t d;
|
||||
int64_t d;
|
||||
|
||||
d = (__int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
|
||||
d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
|
||||
if (d)
|
||||
return d;
|
||||
|
||||
|
@ -271,7 +271,7 @@ xfs_rmapbt_key_diff(
|
|||
return 0;
|
||||
}
|
||||
|
||||
STATIC __int64_t
|
||||
STATIC int64_t
|
||||
xfs_rmapbt_diff_two_keys(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *k1,
|
||||
|
@ -279,10 +279,10 @@ xfs_rmapbt_diff_two_keys(
|
|||
{
|
||||
struct xfs_rmap_key *kp1 = &k1->rmap;
|
||||
struct xfs_rmap_key *kp2 = &k2->rmap;
|
||||
__int64_t d;
|
||||
int64_t d;
|
||||
__u64 x, y;
|
||||
|
||||
d = (__int64_t)be32_to_cpu(kp1->rm_startblock) -
|
||||
d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
|
||||
be32_to_cpu(kp2->rm_startblock);
|
||||
if (d)
|
||||
return d;
|
||||
|
@ -377,17 +377,16 @@ const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
|
|||
.verify_write = xfs_rmapbt_write_verify,
|
||||
};
|
||||
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
STATIC int
|
||||
xfs_rmapbt_keys_inorder(
|
||||
struct xfs_btree_cur *cur,
|
||||
union xfs_btree_key *k1,
|
||||
union xfs_btree_key *k2)
|
||||
{
|
||||
__uint32_t x;
|
||||
__uint32_t y;
|
||||
__uint64_t a;
|
||||
__uint64_t b;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
uint64_t a;
|
||||
uint64_t b;
|
||||
|
||||
x = be32_to_cpu(k1->rmap.rm_startblock);
|
||||
y = be32_to_cpu(k2->rmap.rm_startblock);
|
||||
|
@ -414,10 +413,10 @@ xfs_rmapbt_recs_inorder(
|
|||
union xfs_btree_rec *r1,
|
||||
union xfs_btree_rec *r2)
|
||||
{
|
||||
__uint32_t x;
|
||||
__uint32_t y;
|
||||
__uint64_t a;
|
||||
__uint64_t b;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
uint64_t a;
|
||||
uint64_t b;
|
||||
|
||||
x = be32_to_cpu(r1->rmap.rm_startblock);
|
||||
y = be32_to_cpu(r2->rmap.rm_startblock);
|
||||
|
@ -437,7 +436,6 @@ xfs_rmapbt_recs_inorder(
|
|||
return 1;
|
||||
return 0;
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
||||
static const struct xfs_btree_ops xfs_rmapbt_ops = {
|
||||
.rec_len = sizeof(struct xfs_rmap_rec),
|
||||
|
@ -456,10 +454,8 @@ static const struct xfs_btree_ops xfs_rmapbt_ops = {
|
|||
.key_diff = xfs_rmapbt_key_diff,
|
||||
.buf_ops = &xfs_rmapbt_buf_ops,
|
||||
.diff_two_keys = xfs_rmapbt_diff_two_keys,
|
||||
#if defined(DEBUG) || defined(XFS_WARN)
|
||||
.keys_inorder = xfs_rmapbt_keys_inorder,
|
||||
.recs_inorder = xfs_rmapbt_recs_inorder,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -70,7 +70,7 @@ const struct xfs_buf_ops xfs_rtbuf_ops = {
|
|||
* Get a buffer for the bitmap or summary file block specified.
|
||||
* The buffer is returned read and locked.
|
||||
*/
|
||||
static int
|
||||
int
|
||||
xfs_rtbuf_get(
|
||||
xfs_mount_t *mp, /* file system mount structure */
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
|
@ -1011,7 +1011,7 @@ xfs_rtfree_extent(
|
|||
mp->m_sb.sb_rextents) {
|
||||
if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
|
||||
mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
|
||||
*(__uint64_t *)&VFS_I(mp->m_rbmip)->i_atime = 0;
|
||||
*(uint64_t *)&VFS_I(mp->m_rbmip)->i_atime = 0;
|
||||
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -448,7 +448,7 @@ xfs_sb_quota_to_disk(
|
|||
struct xfs_dsb *to,
|
||||
struct xfs_sb *from)
|
||||
{
|
||||
__uint16_t qflags = from->sb_qflags;
|
||||
uint16_t qflags = from->sb_qflags;
|
||||
|
||||
to->sb_uquotino = cpu_to_be64(from->sb_uquotino);
|
||||
if (xfs_sb_version_has_pquotino(from)) {
|
||||
|
@ -756,7 +756,7 @@ xfs_sb_mount_common(
|
|||
mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2;
|
||||
|
||||
mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
|
||||
mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
|
||||
mp->m_ialloc_inos = (int)MAX((uint16_t)XFS_INODES_PER_CHUNK,
|
||||
sbp->sb_inopblock);
|
||||
mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ xfs_symlink_verify(
|
|||
if (bp->b_bn != be64_to_cpu(dsl->sl_blkno))
|
||||
return false;
|
||||
if (be32_to_cpu(dsl->sl_offset) +
|
||||
be32_to_cpu(dsl->sl_bytes) >= MAXPATHLEN)
|
||||
be32_to_cpu(dsl->sl_bytes) >= XFS_SYMLINK_MAXLEN)
|
||||
return false;
|
||||
if (dsl->sl_owner == 0)
|
||||
return false;
|
||||
|
|
|
@ -477,14 +477,14 @@ xfs_calc_mkdir_reservation(
|
|||
/*
|
||||
* Making a new symplink is the same as creating a new file, but
|
||||
* with the added blocks for remote symlink data which can be up to 1kB in
|
||||
* length (MAXPATHLEN).
|
||||
* length (XFS_SYMLINK_MAXLEN).
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_calc_symlink_reservation(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return xfs_calc_create_reservation(mp) +
|
||||
xfs_calc_buf_res(1, MAXPATHLEN);
|
||||
xfs_calc_buf_res(1, XFS_SYMLINK_MAXLEN);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -18,34 +18,34 @@
|
|||
#ifndef __XFS_TYPES_H__
|
||||
#define __XFS_TYPES_H__
|
||||
|
||||
typedef __uint32_t prid_t; /* project ID */
|
||||
typedef uint32_t prid_t; /* project ID */
|
||||
|
||||
typedef __uint32_t xfs_agblock_t; /* blockno in alloc. group */
|
||||
typedef __uint32_t xfs_agino_t; /* inode # within allocation grp */
|
||||
typedef __uint32_t xfs_extlen_t; /* extent length in blocks */
|
||||
typedef __uint32_t xfs_agnumber_t; /* allocation group number */
|
||||
typedef __int32_t xfs_extnum_t; /* # of extents in a file */
|
||||
typedef __int16_t xfs_aextnum_t; /* # extents in an attribute fork */
|
||||
typedef __int64_t xfs_fsize_t; /* bytes in a file */
|
||||
typedef __uint64_t xfs_ufsize_t; /* unsigned bytes in a file */
|
||||
typedef uint32_t xfs_agblock_t; /* blockno in alloc. group */
|
||||
typedef uint32_t xfs_agino_t; /* inode # within allocation grp */
|
||||
typedef uint32_t xfs_extlen_t; /* extent length in blocks */
|
||||
typedef uint32_t xfs_agnumber_t; /* allocation group number */
|
||||
typedef int32_t xfs_extnum_t; /* # of extents in a file */
|
||||
typedef int16_t xfs_aextnum_t; /* # extents in an attribute fork */
|
||||
typedef int64_t xfs_fsize_t; /* bytes in a file */
|
||||
typedef uint64_t xfs_ufsize_t; /* unsigned bytes in a file */
|
||||
|
||||
typedef __int32_t xfs_suminfo_t; /* type of bitmap summary info */
|
||||
typedef __int32_t xfs_rtword_t; /* word type for bitmap manipulations */
|
||||
typedef int32_t xfs_suminfo_t; /* type of bitmap summary info */
|
||||
typedef int32_t xfs_rtword_t; /* word type for bitmap manipulations */
|
||||
|
||||
typedef __int64_t xfs_lsn_t; /* log sequence number */
|
||||
typedef __int32_t xfs_tid_t; /* transaction identifier */
|
||||
typedef int64_t xfs_lsn_t; /* log sequence number */
|
||||
typedef int32_t xfs_tid_t; /* transaction identifier */
|
||||
|
||||
typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */
|
||||
typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */
|
||||
typedef uint32_t xfs_dablk_t; /* dir/attr block number (in file) */
|
||||
typedef uint32_t xfs_dahash_t; /* dir/attr hash value */
|
||||
|
||||
typedef __uint64_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */
|
||||
typedef __uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */
|
||||
typedef __uint64_t xfs_rtblock_t; /* extent (block) in realtime area */
|
||||
typedef __uint64_t xfs_fileoff_t; /* block number in a file */
|
||||
typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */
|
||||
typedef uint64_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */
|
||||
typedef uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */
|
||||
typedef uint64_t xfs_rtblock_t; /* extent (block) in realtime area */
|
||||
typedef uint64_t xfs_fileoff_t; /* block number in a file */
|
||||
typedef uint64_t xfs_filblks_t; /* number of blocks in a file */
|
||||
|
||||
typedef __int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */
|
||||
typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */
|
||||
typedef int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */
|
||||
typedef int64_t xfs_sfiloff_t; /* signed block number in a file */
|
||||
|
||||
/*
|
||||
* Null values for the types.
|
||||
|
@ -125,7 +125,7 @@ struct xfs_name {
|
|||
* uid_t and gid_t are hard-coded to 32 bits in the inode.
|
||||
* Hence, an 'id' in a dquot is 32 bits..
|
||||
*/
|
||||
typedef __uint32_t xfs_dqid_t;
|
||||
typedef uint32_t xfs_dqid_t;
|
||||
|
||||
/*
|
||||
* Constants for bit manipulations.
|
||||
|
|
|
@ -24,6 +24,10 @@
|
|||
#define XFS_BUF_LOCK_TRACKING 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XFS_ASSERT_FATAL
|
||||
#define XFS_ASSERT_FATAL 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XFS_WARN
|
||||
#define XFS_WARN 1
|
||||
#endif
|
||||
|
|
|
@ -170,8 +170,8 @@ xfs_get_acl(struct inode *inode, int type)
|
|||
return acl;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
__xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
|
||||
int
|
||||
__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
unsigned char *ea_name;
|
||||
|
@ -268,5 +268,5 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
|
|||
}
|
||||
|
||||
set_acl:
|
||||
return __xfs_set_acl(inode, type, acl);
|
||||
return __xfs_set_acl(inode, acl, type);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ struct posix_acl;
|
|||
#ifdef CONFIG_XFS_POSIX_ACL
|
||||
extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
|
||||
extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
extern int __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
|
||||
#else
|
||||
static inline struct posix_acl *xfs_get_acl(struct inode *inode, int type)
|
||||
{
|
||||
|
|
|
@ -839,7 +839,7 @@ xfs_writepage_map(
|
|||
struct inode *inode,
|
||||
struct page *page,
|
||||
loff_t offset,
|
||||
__uint64_t end_offset)
|
||||
uint64_t end_offset)
|
||||
{
|
||||
LIST_HEAD(submit_list);
|
||||
struct xfs_ioend *ioend, *next;
|
||||
|
@ -994,7 +994,7 @@ xfs_do_writepage(
|
|||
struct xfs_writepage_ctx *wpc = data;
|
||||
struct inode *inode = page->mapping->host;
|
||||
loff_t offset;
|
||||
__uint64_t end_offset;
|
||||
uint64_t end_offset;
|
||||
pgoff_t end_index;
|
||||
|
||||
trace_xfs_writepage(inode, page, 0, 0);
|
||||
|
|
|
@ -117,6 +117,7 @@ typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int,
|
|||
unsigned char *, int, int);
|
||||
|
||||
typedef struct xfs_attr_list_context {
|
||||
struct xfs_trans *tp;
|
||||
struct xfs_inode *dp; /* inode */
|
||||
struct attrlist_cursor_kern *cursor; /* position in list */
|
||||
char *alist; /* output buffer */
|
||||
|
@ -140,8 +141,10 @@ typedef struct xfs_attr_list_context {
|
|||
* Overall external interface routines.
|
||||
*/
|
||||
int xfs_attr_inactive(struct xfs_inode *dp);
|
||||
int xfs_attr_list_int_ilocked(struct xfs_attr_list_context *);
|
||||
int xfs_attr_list_int(struct xfs_attr_list_context *);
|
||||
int xfs_inode_hasattr(struct xfs_inode *ip);
|
||||
int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
|
||||
int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
|
||||
unsigned char *value, int *valuelenp, int flags);
|
||||
int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
|
||||
|
|
|
@ -230,7 +230,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
|
|||
*/
|
||||
bp = NULL;
|
||||
if (cursor->blkno > 0) {
|
||||
error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1,
|
||||
error = xfs_da3_node_read(context->tp, dp, cursor->blkno, -1,
|
||||
&bp, XFS_ATTR_FORK);
|
||||
if ((error != 0) && (error != -EFSCORRUPTED))
|
||||
return error;
|
||||
|
@ -242,7 +242,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
|
|||
case XFS_DA_NODE_MAGIC:
|
||||
case XFS_DA3_NODE_MAGIC:
|
||||
trace_xfs_attr_list_wrong_blk(context);
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
bp = NULL;
|
||||
break;
|
||||
case XFS_ATTR_LEAF_MAGIC:
|
||||
|
@ -254,18 +254,18 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
|
|||
if (cursor->hashval > be32_to_cpu(
|
||||
entries[leafhdr.count - 1].hashval)) {
|
||||
trace_xfs_attr_list_wrong_blk(context);
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
bp = NULL;
|
||||
} else if (cursor->hashval <= be32_to_cpu(
|
||||
entries[0].hashval)) {
|
||||
trace_xfs_attr_list_wrong_blk(context);
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
bp = NULL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
trace_xfs_attr_list_wrong_blk(context);
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
bp = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -279,9 +279,9 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
|
|||
if (bp == NULL) {
|
||||
cursor->blkno = 0;
|
||||
for (;;) {
|
||||
__uint16_t magic;
|
||||
uint16_t magic;
|
||||
|
||||
error = xfs_da3_node_read(NULL, dp,
|
||||
error = xfs_da3_node_read(context->tp, dp,
|
||||
cursor->blkno, -1, &bp,
|
||||
XFS_ATTR_FORK);
|
||||
if (error)
|
||||
|
@ -297,7 +297,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
|
|||
XFS_ERRLEVEL_LOW,
|
||||
context->dp->i_mount,
|
||||
node);
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
|
@ -313,10 +313,10 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
|
|||
}
|
||||
}
|
||||
if (i == nodehdr.count) {
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
return 0;
|
||||
}
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
}
|
||||
}
|
||||
ASSERT(bp != NULL);
|
||||
|
@ -333,12 +333,12 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
|
|||
if (context->seen_enough || leafhdr.forw == 0)
|
||||
break;
|
||||
cursor->blkno = leafhdr.forw;
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno, -1, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -448,15 +448,33 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
|
|||
trace_xfs_attr_leaf_list(context);
|
||||
|
||||
context->cursor->blkno = 0;
|
||||
error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp);
|
||||
error = xfs_attr3_leaf_read(context->tp, context->dp, 0, -1, &bp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
xfs_attr3_leaf_list_int(bp, context);
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(context->tp, bp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_attr_list_int_ilocked(
|
||||
struct xfs_attr_list_context *context)
|
||||
{
|
||||
struct xfs_inode *dp = context->dp;
|
||||
|
||||
/*
|
||||
* Decide on what work routines to call based on the inode size.
|
||||
*/
|
||||
if (!xfs_inode_hasattr(dp))
|
||||
return 0;
|
||||
else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
|
||||
return xfs_attr_shortform_list(context);
|
||||
else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
|
||||
return xfs_attr_leaf_list(context);
|
||||
return xfs_attr_node_list(context);
|
||||
}
|
||||
|
||||
int
|
||||
xfs_attr_list_int(
|
||||
xfs_attr_list_context_t *context)
|
||||
|
@ -470,19 +488,8 @@ xfs_attr_list_int(
|
|||
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Decide on what work routines to call based on the inode size.
|
||||
*/
|
||||
lock_mode = xfs_ilock_attr_map_shared(dp);
|
||||
if (!xfs_inode_hasattr(dp)) {
|
||||
error = 0;
|
||||
} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
|
||||
error = xfs_attr_shortform_list(context);
|
||||
} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
|
||||
error = xfs_attr_leaf_list(context);
|
||||
} else {
|
||||
error = xfs_attr_node_list(context);
|
||||
}
|
||||
error = xfs_attr_list_int_ilocked(context);
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -396,6 +396,7 @@ xfs_bui_recover(
|
|||
struct xfs_map_extent *bmap;
|
||||
xfs_fsblock_t startblock_fsb;
|
||||
xfs_fsblock_t inode_fsb;
|
||||
xfs_filblks_t count;
|
||||
bool op_ok;
|
||||
struct xfs_bud_log_item *budp;
|
||||
enum xfs_bmap_intent_type type;
|
||||
|
@ -404,6 +405,7 @@ xfs_bui_recover(
|
|||
struct xfs_trans *tp;
|
||||
struct xfs_inode *ip = NULL;
|
||||
struct xfs_defer_ops dfops;
|
||||
struct xfs_bmbt_irec irec;
|
||||
xfs_fsblock_t firstfsb;
|
||||
|
||||
ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
|
||||
|
@ -481,13 +483,24 @@ xfs_bui_recover(
|
|||
}
|
||||
xfs_trans_ijoin(tp, ip, 0);
|
||||
|
||||
count = bmap->me_len;
|
||||
error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type,
|
||||
ip, whichfork, bmap->me_startoff,
|
||||
bmap->me_startblock, bmap->me_len,
|
||||
state);
|
||||
bmap->me_startblock, &count, state);
|
||||
if (error)
|
||||
goto err_dfops;
|
||||
|
||||
if (count > 0) {
|
||||
ASSERT(type == XFS_BMAP_UNMAP);
|
||||
irec.br_startblock = bmap->me_startblock;
|
||||
irec.br_blockcount = count;
|
||||
irec.br_startoff = bmap->me_startoff;
|
||||
irec.br_state = state;
|
||||
error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec);
|
||||
if (error)
|
||||
goto err_dfops;
|
||||
}
|
||||
|
||||
/* Finish transaction, free inodes. */
|
||||
error = xfs_defer_finish(&tp, &dfops, NULL);
|
||||
if (error)
|
||||
|
|
|
@ -219,20 +219,24 @@ xfs_bmap_eof(
|
|||
*/
|
||||
|
||||
/*
|
||||
* Count leaf blocks given a range of extent records.
|
||||
* Count leaf blocks given a range of extent records. Delayed allocation
|
||||
* extents are not counted towards the totals.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_bmap_count_leaves(
|
||||
xfs_ifork_t *ifp,
|
||||
xfs_extnum_t idx,
|
||||
int numrecs,
|
||||
int *count)
|
||||
struct xfs_ifork *ifp,
|
||||
xfs_extnum_t *numrecs,
|
||||
xfs_filblks_t *count)
|
||||
{
|
||||
int b;
|
||||
xfs_extnum_t i;
|
||||
xfs_extnum_t nr_exts = xfs_iext_count(ifp);
|
||||
|
||||
for (b = 0; b < numrecs; b++) {
|
||||
xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
|
||||
*count += xfs_bmbt_get_blockcount(frp);
|
||||
for (i = 0; i < nr_exts; i++) {
|
||||
xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, i);
|
||||
if (!isnullstartblock(xfs_bmbt_get_startblock(frp))) {
|
||||
(*numrecs)++;
|
||||
*count += xfs_bmbt_get_blockcount(frp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -245,7 +249,7 @@ xfs_bmap_disk_count_leaves(
|
|||
struct xfs_mount *mp,
|
||||
struct xfs_btree_block *block,
|
||||
int numrecs,
|
||||
int *count)
|
||||
xfs_filblks_t *count)
|
||||
{
|
||||
int b;
|
||||
xfs_bmbt_rec_t *frp;
|
||||
|
@ -260,17 +264,18 @@ xfs_bmap_disk_count_leaves(
|
|||
* Recursively walks each level of a btree
|
||||
* to count total fsblocks in use.
|
||||
*/
|
||||
STATIC int /* error */
|
||||
STATIC int
|
||||
xfs_bmap_count_tree(
|
||||
xfs_mount_t *mp, /* file system mount point */
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
xfs_ifork_t *ifp, /* inode fork pointer */
|
||||
xfs_fsblock_t blockno, /* file system block number */
|
||||
int levelin, /* level in btree */
|
||||
int *count) /* Count of blocks */
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_ifork *ifp,
|
||||
xfs_fsblock_t blockno,
|
||||
int levelin,
|
||||
xfs_extnum_t *nextents,
|
||||
xfs_filblks_t *count)
|
||||
{
|
||||
int error;
|
||||
xfs_buf_t *bp, *nbp;
|
||||
struct xfs_buf *bp, *nbp;
|
||||
int level = levelin;
|
||||
__be64 *pp;
|
||||
xfs_fsblock_t bno = blockno;
|
||||
|
@ -303,8 +308,9 @@ xfs_bmap_count_tree(
|
|||
/* Dive to the next level */
|
||||
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
|
||||
bno = be64_to_cpu(*pp);
|
||||
if (unlikely((error =
|
||||
xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
|
||||
error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
|
||||
count);
|
||||
if (error) {
|
||||
xfs_trans_brelse(tp, bp);
|
||||
XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
|
||||
XFS_ERRLEVEL_LOW, mp);
|
||||
|
@ -316,6 +322,7 @@ xfs_bmap_count_tree(
|
|||
for (;;) {
|
||||
nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
|
||||
numrecs = be16_to_cpu(block->bb_numrecs);
|
||||
(*nextents) += numrecs;
|
||||
xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
|
||||
xfs_trans_brelse(tp, bp);
|
||||
if (nextbno == NULLFSBLOCK)
|
||||
|
@ -334,46 +341,64 @@ xfs_bmap_count_tree(
|
|||
}
|
||||
|
||||
/*
|
||||
* Count fsblocks of the given fork.
|
||||
* Count fsblocks of the given fork. Delayed allocation extents are
|
||||
* not counted towards the totals.
|
||||
*/
|
||||
static int /* error */
|
||||
int
|
||||
xfs_bmap_count_blocks(
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
xfs_inode_t *ip, /* incore inode */
|
||||
int whichfork, /* data or attr fork */
|
||||
int *count) /* out: count of blocks */
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
int whichfork,
|
||||
xfs_extnum_t *nextents,
|
||||
xfs_filblks_t *count)
|
||||
{
|
||||
struct xfs_btree_block *block; /* current btree block */
|
||||
xfs_fsblock_t bno; /* block # of "block" */
|
||||
xfs_ifork_t *ifp; /* fork structure */
|
||||
int level; /* btree level, for checking */
|
||||
xfs_mount_t *mp; /* file system mount structure */
|
||||
struct xfs_mount *mp; /* file system mount structure */
|
||||
__be64 *pp; /* pointer to block address */
|
||||
struct xfs_btree_block *block; /* current btree block */
|
||||
struct xfs_ifork *ifp; /* fork structure */
|
||||
xfs_fsblock_t bno; /* block # of "block" */
|
||||
int level; /* btree level, for checking */
|
||||
int error;
|
||||
|
||||
bno = NULLFSBLOCK;
|
||||
mp = ip->i_mount;
|
||||
*nextents = 0;
|
||||
*count = 0;
|
||||
ifp = XFS_IFORK_PTR(ip, whichfork);
|
||||
if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
|
||||
xfs_bmap_count_leaves(ifp, 0, xfs_iext_count(ifp), count);
|
||||
if (!ifp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
|
||||
*/
|
||||
block = ifp->if_broot;
|
||||
level = be16_to_cpu(block->bb_level);
|
||||
ASSERT(level > 0);
|
||||
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
|
||||
bno = be64_to_cpu(*pp);
|
||||
ASSERT(bno != NULLFSBLOCK);
|
||||
ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
|
||||
ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
|
||||
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
|
||||
case XFS_DINODE_FMT_EXTENTS:
|
||||
xfs_bmap_count_leaves(ifp, nextents, count);
|
||||
return 0;
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, whichfork);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
|
||||
XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
|
||||
mp);
|
||||
return -EFSCORRUPTED;
|
||||
/*
|
||||
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
|
||||
*/
|
||||
block = ifp->if_broot;
|
||||
level = be16_to_cpu(block->bb_level);
|
||||
ASSERT(level > 0);
|
||||
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
|
||||
bno = be64_to_cpu(*pp);
|
||||
ASSERT(bno != NULLFSBLOCK);
|
||||
ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
|
||||
ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
|
||||
|
||||
error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
|
||||
nextents, count);
|
||||
if (error) {
|
||||
XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
|
||||
XFS_ERRLEVEL_LOW, mp);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -389,11 +414,11 @@ xfs_getbmapx_fix_eof_hole(
|
|||
struct getbmapx *out, /* output structure */
|
||||
int prealloced, /* this is a file with
|
||||
* preallocated data space */
|
||||
__int64_t end, /* last block requested */
|
||||
int64_t end, /* last block requested */
|
||||
xfs_fsblock_t startblock,
|
||||
bool moretocome)
|
||||
{
|
||||
__int64_t fixlen;
|
||||
int64_t fixlen;
|
||||
xfs_mount_t *mp; /* file system mount point */
|
||||
xfs_ifork_t *ifp; /* inode fork pointer */
|
||||
xfs_extnum_t lastx; /* last extent pointer */
|
||||
|
@ -455,8 +480,8 @@ xfs_getbmap_adjust_shared(
|
|||
|
||||
agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
|
||||
agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
|
||||
error = xfs_reflink_find_shared(mp, agno, agbno, map->br_blockcount,
|
||||
&ebno, &elen, true);
|
||||
error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
|
||||
map->br_blockcount, &ebno, &elen, true);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -514,9 +539,9 @@ xfs_getbmap(
|
|||
xfs_bmap_format_t formatter, /* format to user */
|
||||
void *arg) /* formatter arg */
|
||||
{
|
||||
__int64_t bmvend; /* last block requested */
|
||||
int64_t bmvend; /* last block requested */
|
||||
int error = 0; /* return value */
|
||||
__int64_t fixlen; /* length for -1 case */
|
||||
int64_t fixlen; /* length for -1 case */
|
||||
int i; /* extent number */
|
||||
int lock; /* lock state */
|
||||
xfs_bmbt_irec_t *map; /* buffer for user's data */
|
||||
|
@ -605,7 +630,7 @@ xfs_getbmap(
|
|||
if (bmv->bmv_length == -1) {
|
||||
fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
|
||||
bmv->bmv_length =
|
||||
max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
|
||||
max_t(int64_t, fixlen - bmv->bmv_offset, 0);
|
||||
} else if (bmv->bmv_length == 0) {
|
||||
bmv->bmv_entries = 0;
|
||||
return 0;
|
||||
|
@ -742,7 +767,7 @@ xfs_getbmap(
|
|||
out[cur_ext].bmv_offset +
|
||||
out[cur_ext].bmv_length;
|
||||
bmv->bmv_length =
|
||||
max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
|
||||
max_t(int64_t, 0, bmvend - bmv->bmv_offset);
|
||||
|
||||
/*
|
||||
* In case we don't want to return the hole,
|
||||
|
@ -1617,7 +1642,7 @@ xfs_swap_extents_check_format(
|
|||
* extent format...
|
||||
*/
|
||||
if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
|
||||
if (XFS_IFORK_BOFF(ip) &&
|
||||
if (XFS_IFORK_Q(ip) &&
|
||||
XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
|
||||
return -EINVAL;
|
||||
if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
|
||||
|
@ -1627,7 +1652,7 @@ xfs_swap_extents_check_format(
|
|||
|
||||
/* Reciprocal target->temp btree format checks */
|
||||
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
|
||||
if (XFS_IFORK_BOFF(tip) &&
|
||||
if (XFS_IFORK_Q(tip) &&
|
||||
XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
|
||||
return -EINVAL;
|
||||
if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
|
||||
|
@ -1676,7 +1701,7 @@ xfs_swap_extent_rmap(
|
|||
xfs_filblks_t ilen;
|
||||
xfs_filblks_t rlen;
|
||||
int nimaps;
|
||||
__uint64_t tip_flags2;
|
||||
uint64_t tip_flags2;
|
||||
|
||||
/*
|
||||
* If the source file has shared blocks, we must flag the donor
|
||||
|
@ -1789,10 +1814,11 @@ xfs_swap_extent_forks(
|
|||
int *target_log_flags)
|
||||
{
|
||||
struct xfs_ifork tempifp, *ifp, *tifp;
|
||||
int aforkblks = 0;
|
||||
int taforkblks = 0;
|
||||
xfs_filblks_t aforkblks = 0;
|
||||
xfs_filblks_t taforkblks = 0;
|
||||
xfs_extnum_t junk;
|
||||
xfs_extnum_t nextents;
|
||||
__uint64_t tmp;
|
||||
uint64_t tmp;
|
||||
int error;
|
||||
|
||||
/*
|
||||
|
@ -1800,14 +1826,14 @@ xfs_swap_extent_forks(
|
|||
*/
|
||||
if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
|
||||
(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
|
||||
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK,
|
||||
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
|
||||
&aforkblks);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
|
||||
(tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
|
||||
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
|
||||
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
|
||||
&taforkblks);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -1850,15 +1876,15 @@ xfs_swap_extent_forks(
|
|||
/*
|
||||
* Fix the on-disk inode values
|
||||
*/
|
||||
tmp = (__uint64_t)ip->i_d.di_nblocks;
|
||||
tmp = (uint64_t)ip->i_d.di_nblocks;
|
||||
ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
|
||||
tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
|
||||
|
||||
tmp = (__uint64_t) ip->i_d.di_nextents;
|
||||
tmp = (uint64_t) ip->i_d.di_nextents;
|
||||
ip->i_d.di_nextents = tip->i_d.di_nextents;
|
||||
tip->i_d.di_nextents = tmp;
|
||||
|
||||
tmp = (__uint64_t) ip->i_d.di_format;
|
||||
tmp = (uint64_t) ip->i_d.di_format;
|
||||
ip->i_d.di_format = tip->i_d.di_format;
|
||||
tip->i_d.di_format = tmp;
|
||||
|
||||
|
@ -1927,7 +1953,7 @@ xfs_swap_extents(
|
|||
int error = 0;
|
||||
int lock_flags;
|
||||
struct xfs_ifork *cowfp;
|
||||
__uint64_t f;
|
||||
uint64_t f;
|
||||
int resblks;
|
||||
|
||||
/*
|
||||
|
|
|
@ -70,4 +70,8 @@ int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
|
|||
|
||||
xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
|
||||
|
||||
int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
|
||||
int whichfork, xfs_extnum_t *nextents,
|
||||
xfs_filblks_t *count);
|
||||
|
||||
#endif /* __XFS_BMAP_UTIL_H__ */
|
||||
|
|
|
@ -1194,7 +1194,7 @@ xfs_buf_ioerror_alert(
|
|||
{
|
||||
xfs_alert(bp->b_target->bt_mount,
|
||||
"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
|
||||
(__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
|
||||
(uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -2050,6 +2050,66 @@ xfs_buf_delwri_submit(
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Push a single buffer on a delwri queue.
|
||||
*
|
||||
* The purpose of this function is to submit a single buffer of a delwri queue
|
||||
* and return with the buffer still on the original queue. The waiting delwri
|
||||
* buffer submission infrastructure guarantees transfer of the delwri queue
|
||||
* buffer reference to a temporary wait list. We reuse this infrastructure to
|
||||
* transfer the buffer back to the original queue.
|
||||
*
|
||||
* Note the buffer transitions from the queued state, to the submitted and wait
|
||||
* listed state and back to the queued state during this call. The buffer
|
||||
* locking and queue management logic between _delwri_pushbuf() and
|
||||
* _delwri_queue() guarantee that the buffer cannot be queued to another list
|
||||
* before returning.
|
||||
*/
|
||||
int
|
||||
xfs_buf_delwri_pushbuf(
|
||||
struct xfs_buf *bp,
|
||||
struct list_head *buffer_list)
|
||||
{
|
||||
LIST_HEAD (submit_list);
|
||||
int error;
|
||||
|
||||
ASSERT(bp->b_flags & _XBF_DELWRI_Q);
|
||||
|
||||
trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
|
||||
|
||||
/*
|
||||
* Isolate the buffer to a new local list so we can submit it for I/O
|
||||
* independently from the rest of the original list.
|
||||
*/
|
||||
xfs_buf_lock(bp);
|
||||
list_move(&bp->b_list, &submit_list);
|
||||
xfs_buf_unlock(bp);
|
||||
|
||||
/*
|
||||
* Delwri submission clears the DELWRI_Q buffer flag and returns with
|
||||
* the buffer on the wait list with an associated reference. Rather than
|
||||
* bounce the buffer from a local wait list back to the original list
|
||||
* after I/O completion, reuse the original list as the wait list.
|
||||
*/
|
||||
xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
|
||||
|
||||
/*
|
||||
* The buffer is now under I/O and wait listed as during typical delwri
|
||||
* submission. Lock the buffer to wait for I/O completion. Rather than
|
||||
* remove the buffer from the wait list and release the reference, we
|
||||
* want to return with the buffer queued to the original list. The
|
||||
* buffer already sits on the original list with a wait list reference,
|
||||
* however. If we let the queue inherit that wait list reference, all we
|
||||
* need to do is reset the DELWRI_Q flag.
|
||||
*/
|
||||
xfs_buf_lock(bp);
|
||||
error = bp->b_error;
|
||||
bp->b_flags |= _XBF_DELWRI_Q;
|
||||
xfs_buf_unlock(bp);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
int __init
|
||||
xfs_buf_init(void)
|
||||
{
|
||||
|
|
|
@ -332,6 +332,7 @@ extern void xfs_buf_delwri_cancel(struct list_head *);
|
|||
extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
|
||||
extern int xfs_buf_delwri_submit(struct list_head *);
|
||||
extern int xfs_buf_delwri_submit_nowait(struct list_head *);
|
||||
extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
|
||||
|
||||
/* Buffer Daemon Setup Routines */
|
||||
extern int xfs_buf_init(void);
|
||||
|
|
|
@ -636,20 +636,23 @@ xfs_buf_item_unlock(
|
|||
|
||||
/*
|
||||
* Clean buffers, by definition, cannot be in the AIL. However, aborted
|
||||
* buffers may be dirty and hence in the AIL. Therefore if we are
|
||||
* aborting a buffer and we've just taken the last refernce away, we
|
||||
* have to check if it is in the AIL before freeing it. We need to free
|
||||
* it in this case, because an aborted transaction has already shut the
|
||||
* filesystem down and this is the last chance we will have to do so.
|
||||
* buffers may be in the AIL regardless of dirty state. An aborted
|
||||
* transaction that invalidates a buffer already in the AIL may have
|
||||
* marked it stale and cleared the dirty state, for example.
|
||||
*
|
||||
* Therefore if we are aborting a buffer and we've just taken the last
|
||||
* reference away, we have to check if it is in the AIL before freeing
|
||||
* it. We need to free it in this case, because an aborted transaction
|
||||
* has already shut the filesystem down and this is the last chance we
|
||||
* will have to do so.
|
||||
*/
|
||||
if (atomic_dec_and_test(&bip->bli_refcount)) {
|
||||
if (clean)
|
||||
xfs_buf_item_relse(bp);
|
||||
else if (aborted) {
|
||||
if (aborted) {
|
||||
ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
|
||||
xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
|
||||
xfs_buf_item_relse(bp);
|
||||
}
|
||||
} else if (clean)
|
||||
xfs_buf_item_relse(bp);
|
||||
}
|
||||
|
||||
if (!(flags & XFS_BLI_HOLD))
|
||||
|
|
|
@ -44,7 +44,7 @@ static unsigned char xfs_dir3_filetype_table[] = {
|
|||
static unsigned char
|
||||
xfs_dir3_get_dtype(
|
||||
struct xfs_mount *mp,
|
||||
__uint8_t filetype)
|
||||
uint8_t filetype)
|
||||
{
|
||||
if (!xfs_sb_version_hasftype(&mp->m_sb))
|
||||
return DT_UNKNOWN;
|
||||
|
@ -117,7 +117,7 @@ xfs_dir2_sf_getdents(
|
|||
*/
|
||||
sfep = xfs_dir2_sf_firstentry(sfp);
|
||||
for (i = 0; i < sfp->count; i++) {
|
||||
__uint8_t filetype;
|
||||
uint8_t filetype;
|
||||
|
||||
off = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
|
||||
xfs_dir2_sf_get_offset(sfep));
|
||||
|
@ -170,7 +170,7 @@ xfs_dir2_block_getdents(
|
|||
return 0;
|
||||
|
||||
lock_mode = xfs_ilock_data_map_shared(dp);
|
||||
error = xfs_dir3_block_read(NULL, dp, &bp);
|
||||
error = xfs_dir3_block_read(args->trans, dp, &bp);
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -194,7 +194,7 @@ xfs_dir2_block_getdents(
|
|||
* Each object is a real entry (dep) or an unused one (dup).
|
||||
*/
|
||||
while (ptr < endptr) {
|
||||
__uint8_t filetype;
|
||||
uint8_t filetype;
|
||||
|
||||
dup = (xfs_dir2_data_unused_t *)ptr;
|
||||
/*
|
||||
|
@ -228,7 +228,7 @@ xfs_dir2_block_getdents(
|
|||
if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
|
||||
be64_to_cpu(dep->inumber),
|
||||
xfs_dir3_get_dtype(dp->i_mount, filetype))) {
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -239,218 +239,104 @@ xfs_dir2_block_getdents(
|
|||
*/
|
||||
ctx->pos = xfs_dir2_db_off_to_dataptr(geo, geo->datablk + 1, 0) &
|
||||
0x7fffffff;
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct xfs_dir2_leaf_map_info {
|
||||
xfs_extlen_t map_blocks; /* number of fsbs in map */
|
||||
xfs_dablk_t map_off; /* last mapped file offset */
|
||||
int map_size; /* total entries in *map */
|
||||
int map_valid; /* valid entries in *map */
|
||||
int nmap; /* mappings to ask xfs_bmapi */
|
||||
xfs_dir2_db_t curdb; /* db for current block */
|
||||
int ra_current; /* number of read-ahead blks */
|
||||
int ra_index; /* *map index for read-ahead */
|
||||
int ra_offset; /* map entry offset for ra */
|
||||
int ra_want; /* readahead count wanted */
|
||||
struct xfs_bmbt_irec map[]; /* map vector for blocks */
|
||||
};
|
||||
|
||||
/*
|
||||
* Read a directory block and initiate readahead for blocks beyond that.
|
||||
* We maintain a sliding readahead window of the remaining space in the
|
||||
* buffer rounded up to the nearest block.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_dir2_leaf_readbuf(
|
||||
struct xfs_da_args *args,
|
||||
size_t bufsize,
|
||||
struct xfs_dir2_leaf_map_info *mip,
|
||||
xfs_dir2_off_t *curoff,
|
||||
struct xfs_buf **bpp,
|
||||
bool trim_map)
|
||||
xfs_dir2_off_t *cur_off,
|
||||
xfs_dablk_t *ra_blk,
|
||||
struct xfs_buf **bpp)
|
||||
{
|
||||
struct xfs_inode *dp = args->dp;
|
||||
struct xfs_buf *bp = NULL;
|
||||
struct xfs_bmbt_irec *map = mip->map;
|
||||
struct blk_plug plug;
|
||||
int error = 0;
|
||||
int length;
|
||||
int i;
|
||||
int j;
|
||||
struct xfs_da_geometry *geo = args->geo;
|
||||
struct xfs_ifork *ifp = XFS_IFORK_PTR(dp, XFS_DATA_FORK);
|
||||
struct xfs_bmbt_irec map;
|
||||
struct blk_plug plug;
|
||||
xfs_dir2_off_t new_off;
|
||||
xfs_dablk_t next_ra;
|
||||
xfs_dablk_t map_off;
|
||||
xfs_dablk_t last_da;
|
||||
xfs_extnum_t idx;
|
||||
int ra_want;
|
||||
int error = 0;
|
||||
|
||||
/*
|
||||
* If the caller just finished processing a buffer, it will tell us
|
||||
* we need to trim that block out of the mapping now it is done.
|
||||
*/
|
||||
if (trim_map) {
|
||||
mip->map_blocks -= geo->fsbcount;
|
||||
/*
|
||||
* Loop to get rid of the extents for the
|
||||
* directory block.
|
||||
*/
|
||||
for (i = geo->fsbcount; i > 0; ) {
|
||||
j = min_t(int, map->br_blockcount, i);
|
||||
map->br_blockcount -= j;
|
||||
map->br_startblock += j;
|
||||
map->br_startoff += j;
|
||||
/*
|
||||
* If mapping is done, pitch it from
|
||||
* the table.
|
||||
*/
|
||||
if (!map->br_blockcount && --mip->map_valid)
|
||||
memmove(&map[0], &map[1],
|
||||
sizeof(map[0]) * mip->map_valid);
|
||||
i -= j;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Recalculate the readahead blocks wanted.
|
||||
*/
|
||||
mip->ra_want = howmany(bufsize + geo->blksize, (1 << geo->fsblog)) - 1;
|
||||
ASSERT(mip->ra_want >= 0);
|
||||
|
||||
/*
|
||||
* If we don't have as many as we want, and we haven't
|
||||
* run out of data blocks, get some more mappings.
|
||||
*/
|
||||
if (1 + mip->ra_want > mip->map_blocks &&
|
||||
mip->map_off < xfs_dir2_byte_to_da(geo, XFS_DIR2_LEAF_OFFSET)) {
|
||||
/*
|
||||
* Get more bmaps, fill in after the ones
|
||||
* we already have in the table.
|
||||
*/
|
||||
mip->nmap = mip->map_size - mip->map_valid;
|
||||
error = xfs_bmapi_read(dp, mip->map_off,
|
||||
xfs_dir2_byte_to_da(geo, XFS_DIR2_LEAF_OFFSET) -
|
||||
mip->map_off,
|
||||
&map[mip->map_valid], &mip->nmap, 0);
|
||||
|
||||
/*
|
||||
* Don't know if we should ignore this or try to return an
|
||||
* error. The trouble with returning errors is that readdir
|
||||
* will just stop without actually passing the error through.
|
||||
*/
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(args->trans, dp, XFS_DATA_FORK);
|
||||
if (error)
|
||||
goto out; /* XXX */
|
||||
|
||||
/*
|
||||
* If we got all the mappings we asked for, set the final map
|
||||
* offset based on the last bmap value received. Otherwise,
|
||||
* we've reached the end.
|
||||
*/
|
||||
if (mip->nmap == mip->map_size - mip->map_valid) {
|
||||
i = mip->map_valid + mip->nmap - 1;
|
||||
mip->map_off = map[i].br_startoff + map[i].br_blockcount;
|
||||
} else
|
||||
mip->map_off = xfs_dir2_byte_to_da(geo,
|
||||
XFS_DIR2_LEAF_OFFSET);
|
||||
|
||||
/*
|
||||
* Look for holes in the mapping, and eliminate them. Count up
|
||||
* the valid blocks.
|
||||
*/
|
||||
for (i = mip->map_valid; i < mip->map_valid + mip->nmap; ) {
|
||||
if (map[i].br_startblock == HOLESTARTBLOCK) {
|
||||
mip->nmap--;
|
||||
length = mip->map_valid + mip->nmap - i;
|
||||
if (length)
|
||||
memmove(&map[i], &map[i + 1],
|
||||
sizeof(map[i]) * length);
|
||||
} else {
|
||||
mip->map_blocks += map[i].br_blockcount;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
mip->map_valid += mip->nmap;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* No valid mappings, so no more data blocks.
|
||||
* Look for mapped directory blocks at or above the current offset.
|
||||
* Truncate down to the nearest directory block to start the scanning
|
||||
* operation.
|
||||
*/
|
||||
if (!mip->map_valid) {
|
||||
*curoff = xfs_dir2_da_to_byte(geo, mip->map_off);
|
||||
last_da = xfs_dir2_byte_to_da(geo, XFS_DIR2_LEAF_OFFSET);
|
||||
map_off = xfs_dir2_db_to_da(geo, xfs_dir2_byte_to_db(geo, *cur_off));
|
||||
if (!xfs_iext_lookup_extent(dp, ifp, map_off, &idx, &map))
|
||||
goto out;
|
||||
}
|
||||
if (map.br_startoff >= last_da)
|
||||
goto out;
|
||||
xfs_trim_extent(&map, map_off, last_da - map_off);
|
||||
|
||||
/*
|
||||
* Read the directory block starting at the first mapping.
|
||||
*/
|
||||
mip->curdb = xfs_dir2_da_to_db(geo, map->br_startoff);
|
||||
error = xfs_dir3_data_read(NULL, dp, map->br_startoff,
|
||||
map->br_blockcount >= geo->fsbcount ?
|
||||
XFS_FSB_TO_DADDR(dp->i_mount, map->br_startblock) :
|
||||
-1, &bp);
|
||||
/*
|
||||
* Should just skip over the data block instead of giving up.
|
||||
*/
|
||||
/* Read the directory block of that first mapping. */
|
||||
new_off = xfs_dir2_da_to_byte(geo, map.br_startoff);
|
||||
if (new_off > *cur_off)
|
||||
*cur_off = new_off;
|
||||
error = xfs_dir3_data_read(args->trans, dp, map.br_startoff, -1, &bp);
|
||||
if (error)
|
||||
goto out; /* XXX */
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Adjust the current amount of read-ahead: we just read a block that
|
||||
* was previously ra.
|
||||
* Start readahead for the next bufsize's worth of dir data blocks.
|
||||
* We may have already issued readahead for some of that range;
|
||||
* ra_blk tracks the last block we tried to read(ahead).
|
||||
*/
|
||||
if (mip->ra_current)
|
||||
mip->ra_current -= geo->fsbcount;
|
||||
ra_want = howmany(bufsize + geo->blksize, (1 << geo->fsblog));
|
||||
if (*ra_blk >= last_da)
|
||||
goto out;
|
||||
else if (*ra_blk == 0)
|
||||
*ra_blk = map.br_startoff;
|
||||
next_ra = map.br_startoff + geo->fsbcount;
|
||||
if (next_ra >= last_da)
|
||||
goto out_no_ra;
|
||||
if (map.br_blockcount < geo->fsbcount &&
|
||||
!xfs_iext_get_extent(ifp, ++idx, &map))
|
||||
goto out_no_ra;
|
||||
if (map.br_startoff >= last_da)
|
||||
goto out_no_ra;
|
||||
xfs_trim_extent(&map, next_ra, last_da - next_ra);
|
||||
|
||||
/*
|
||||
* Do we need more readahead?
|
||||
* Each loop tries to process 1 full dir blk; last may be partial.
|
||||
*/
|
||||
/* Start ra for each dir (not fs) block that has a mapping. */
|
||||
blk_start_plug(&plug);
|
||||
for (mip->ra_index = mip->ra_offset = i = 0;
|
||||
mip->ra_want > mip->ra_current && i < mip->map_blocks;
|
||||
i += geo->fsbcount) {
|
||||
ASSERT(mip->ra_index < mip->map_valid);
|
||||
/*
|
||||
* Read-ahead a contiguous directory block.
|
||||
*/
|
||||
if (i > mip->ra_current &&
|
||||
(map[mip->ra_index].br_blockcount - mip->ra_offset) >=
|
||||
geo->fsbcount) {
|
||||
xfs_dir3_data_readahead(dp,
|
||||
map[mip->ra_index].br_startoff + mip->ra_offset,
|
||||
XFS_FSB_TO_DADDR(dp->i_mount,
|
||||
map[mip->ra_index].br_startblock +
|
||||
mip->ra_offset));
|
||||
mip->ra_current = i;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read-ahead a non-contiguous directory block. This doesn't
|
||||
* use our mapping, but this is a very rare case.
|
||||
*/
|
||||
else if (i > mip->ra_current) {
|
||||
xfs_dir3_data_readahead(dp,
|
||||
map[mip->ra_index].br_startoff +
|
||||
mip->ra_offset, -1);
|
||||
mip->ra_current = i;
|
||||
}
|
||||
|
||||
/*
|
||||
* Advance offset through the mapping table, processing a full
|
||||
* dir block even if it is fragmented into several extents.
|
||||
* But stop if we have consumed all valid mappings, even if
|
||||
* it's not yet a full directory block.
|
||||
*/
|
||||
for (j = 0;
|
||||
j < geo->fsbcount && mip->ra_index < mip->map_valid;
|
||||
j += length ) {
|
||||
/*
|
||||
* The rest of this extent but not more than a dir
|
||||
* block.
|
||||
*/
|
||||
length = min_t(int, geo->fsbcount - j,
|
||||
map[mip->ra_index].br_blockcount -
|
||||
mip->ra_offset);
|
||||
mip->ra_offset += length;
|
||||
|
||||
/*
|
||||
* Advance to the next mapping if this one is used up.
|
||||
*/
|
||||
if (mip->ra_offset == map[mip->ra_index].br_blockcount) {
|
||||
mip->ra_offset = 0;
|
||||
mip->ra_index++;
|
||||
while (ra_want > 0) {
|
||||
next_ra = roundup((xfs_dablk_t)map.br_startoff, geo->fsbcount);
|
||||
while (ra_want > 0 &&
|
||||
next_ra < map.br_startoff + map.br_blockcount) {
|
||||
if (next_ra >= last_da) {
|
||||
*ra_blk = last_da;
|
||||
break;
|
||||
}
|
||||
if (next_ra > *ra_blk) {
|
||||
xfs_dir3_data_readahead(dp, next_ra, -2);
|
||||
*ra_blk = next_ra;
|
||||
}
|
||||
ra_want -= geo->fsbcount;
|
||||
next_ra += geo->fsbcount;
|
||||
}
|
||||
if (!xfs_iext_get_extent(ifp, ++idx, &map)) {
|
||||
*ra_blk = last_da;
|
||||
break;
|
||||
}
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
|
@ -458,6 +344,9 @@ xfs_dir2_leaf_readbuf(
|
|||
out:
|
||||
*bpp = bp;
|
||||
return error;
|
||||
out_no_ra:
|
||||
*ra_blk = last_da;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -475,14 +364,14 @@ xfs_dir2_leaf_getdents(
|
|||
xfs_dir2_data_hdr_t *hdr; /* data block header */
|
||||
xfs_dir2_data_entry_t *dep; /* data entry */
|
||||
xfs_dir2_data_unused_t *dup; /* unused entry */
|
||||
int error = 0; /* error return value */
|
||||
char *ptr = NULL; /* pointer to current data */
|
||||
struct xfs_da_geometry *geo = args->geo;
|
||||
xfs_dablk_t rablk = 0; /* current readahead block */
|
||||
xfs_dir2_off_t curoff; /* current overall offset */
|
||||
int length; /* temporary length value */
|
||||
int byteoff; /* offset in current block */
|
||||
xfs_dir2_off_t curoff; /* current overall offset */
|
||||
xfs_dir2_off_t newoff; /* new curoff after new blk */
|
||||
char *ptr = NULL; /* pointer to current data */
|
||||
struct xfs_dir2_leaf_map_info *map_info;
|
||||
struct xfs_da_geometry *geo = args->geo;
|
||||
int lock_mode;
|
||||
int error = 0; /* error return value */
|
||||
|
||||
/*
|
||||
* If the offset is at or past the largest allowed value,
|
||||
|
@ -491,74 +380,36 @@ xfs_dir2_leaf_getdents(
|
|||
if (ctx->pos >= XFS_DIR2_MAX_DATAPTR)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Set up to bmap a number of blocks based on the caller's
|
||||
* buffer size, the directory block size, and the filesystem
|
||||
* block size.
|
||||
*/
|
||||
length = howmany(bufsize + geo->blksize, (1 << geo->fsblog));
|
||||
map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
|
||||
(length * sizeof(struct xfs_bmbt_irec)),
|
||||
KM_SLEEP | KM_NOFS);
|
||||
map_info->map_size = length;
|
||||
|
||||
/*
|
||||
* Inside the loop we keep the main offset value as a byte offset
|
||||
* in the directory file.
|
||||
*/
|
||||
curoff = xfs_dir2_dataptr_to_byte(ctx->pos);
|
||||
|
||||
/*
|
||||
* Force this conversion through db so we truncate the offset
|
||||
* down to get the start of the data block.
|
||||
*/
|
||||
map_info->map_off = xfs_dir2_db_to_da(geo,
|
||||
xfs_dir2_byte_to_db(geo, curoff));
|
||||
|
||||
/*
|
||||
* Loop over directory entries until we reach the end offset.
|
||||
* Get more blocks and readahead as necessary.
|
||||
*/
|
||||
while (curoff < XFS_DIR2_LEAF_OFFSET) {
|
||||
__uint8_t filetype;
|
||||
uint8_t filetype;
|
||||
|
||||
/*
|
||||
* If we have no buffer, or we're off the end of the
|
||||
* current buffer, need to get another one.
|
||||
*/
|
||||
if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) {
|
||||
int lock_mode;
|
||||
bool trim_map = false;
|
||||
|
||||
if (bp) {
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
bp = NULL;
|
||||
trim_map = true;
|
||||
}
|
||||
|
||||
lock_mode = xfs_ilock_data_map_shared(dp);
|
||||
error = xfs_dir2_leaf_readbuf(args, bufsize, map_info,
|
||||
&curoff, &bp, trim_map);
|
||||
error = xfs_dir2_leaf_readbuf(args, bufsize, &curoff,
|
||||
&rablk, &bp);
|
||||
xfs_iunlock(dp, lock_mode);
|
||||
if (error || !map_info->map_valid)
|
||||
if (error || !bp)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Having done a read, we need to set a new offset.
|
||||
*/
|
||||
newoff = xfs_dir2_db_off_to_byte(geo,
|
||||
map_info->curdb, 0);
|
||||
/*
|
||||
* Start of the current block.
|
||||
*/
|
||||
if (curoff < newoff)
|
||||
curoff = newoff;
|
||||
/*
|
||||
* Make sure we're in the right block.
|
||||
*/
|
||||
else if (curoff > newoff)
|
||||
ASSERT(xfs_dir2_byte_to_db(geo, curoff) ==
|
||||
map_info->curdb);
|
||||
hdr = bp->b_addr;
|
||||
xfs_dir3_data_check(dp, bp);
|
||||
/*
|
||||
|
@ -643,17 +494,22 @@ xfs_dir2_leaf_getdents(
|
|||
ctx->pos = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
|
||||
else
|
||||
ctx->pos = xfs_dir2_byte_to_dataptr(curoff) & 0x7fffffff;
|
||||
kmem_free(map_info);
|
||||
if (bp)
|
||||
xfs_trans_brelse(NULL, bp);
|
||||
xfs_trans_brelse(args->trans, bp);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read a directory.
|
||||
*
|
||||
* If supplied, the transaction collects locked dir buffers to avoid
|
||||
* nested buffer deadlocks. This function does not dirty the
|
||||
* transaction. The caller should ensure that the inode is locked
|
||||
* before calling this function.
|
||||
*/
|
||||
int
|
||||
xfs_readdir(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *dp,
|
||||
struct dir_context *ctx,
|
||||
size_t bufsize)
|
||||
|
@ -672,6 +528,7 @@ xfs_readdir(
|
|||
|
||||
args.dp = dp;
|
||||
args.geo = dp->i_mount->m_dir_geo;
|
||||
args.trans = tp;
|
||||
|
||||
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
|
||||
rval = xfs_dir2_sf_getdents(&args, ctx);
|
||||
|
|
|
@ -39,7 +39,7 @@ xfs_trim_extents(
|
|||
xfs_daddr_t start,
|
||||
xfs_daddr_t end,
|
||||
xfs_daddr_t minlen,
|
||||
__uint64_t *blocks_trimmed)
|
||||
uint64_t *blocks_trimmed)
|
||||
{
|
||||
struct block_device *bdev = mp->m_ddev_targp->bt_bdev;
|
||||
struct xfs_btree_cur *cur;
|
||||
|
@ -166,7 +166,7 @@ xfs_ioc_trim(
|
|||
struct fstrim_range range;
|
||||
xfs_daddr_t start, end, minlen;
|
||||
xfs_agnumber_t start_agno, end_agno, agno;
|
||||
__uint64_t blocks_trimmed = 0;
|
||||
uint64_t blocks_trimmed = 0;
|
||||
int error, last_error = 0;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
|
|
@ -276,7 +276,7 @@ xfs_qm_init_dquot_blk(
|
|||
void
|
||||
xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
|
||||
{
|
||||
__uint64_t space;
|
||||
uint64_t space;
|
||||
|
||||
dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
|
||||
dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
|
||||
|
@ -472,18 +472,23 @@ xfs_qm_dqtobp(
|
|||
struct xfs_mount *mp = dqp->q_mount;
|
||||
xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
|
||||
struct xfs_trans *tp = (tpp ? *tpp : NULL);
|
||||
uint lock_mode;
|
||||
uint lock_mode = 0;
|
||||
|
||||
quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
|
||||
dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
|
||||
|
||||
lock_mode = xfs_ilock_data_map_shared(quotip);
|
||||
ASSERT(!(flags & XFS_QMOPT_NOLOCK) ||
|
||||
xfs_isilocked(quotip, XFS_ILOCK_SHARED) ||
|
||||
xfs_isilocked(quotip, XFS_ILOCK_EXCL));
|
||||
if (!(flags & XFS_QMOPT_NOLOCK))
|
||||
lock_mode = xfs_ilock_data_map_shared(quotip);
|
||||
if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
|
||||
/*
|
||||
* Return if this type of quotas is turned off while we
|
||||
* didn't have the quota inode lock.
|
||||
*/
|
||||
xfs_iunlock(quotip, lock_mode);
|
||||
if (lock_mode)
|
||||
xfs_iunlock(quotip, lock_mode);
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
|
@ -493,7 +498,8 @@ xfs_qm_dqtobp(
|
|||
error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
|
||||
XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
|
||||
|
||||
xfs_iunlock(quotip, lock_mode);
|
||||
if (lock_mode)
|
||||
xfs_iunlock(quotip, lock_mode);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -695,21 +701,18 @@ xfs_qm_dqread(
|
|||
*/
|
||||
static int
|
||||
xfs_dq_get_next_id(
|
||||
xfs_mount_t *mp,
|
||||
struct xfs_mount *mp,
|
||||
uint type,
|
||||
xfs_dqid_t *id,
|
||||
loff_t eof)
|
||||
xfs_dqid_t *id)
|
||||
{
|
||||
struct xfs_inode *quotip;
|
||||
struct xfs_inode *quotip = xfs_quota_inode(mp, type);
|
||||
xfs_dqid_t next_id = *id + 1; /* simple advance */
|
||||
uint lock_flags;
|
||||
struct xfs_bmbt_irec got;
|
||||
xfs_extnum_t idx;
|
||||
xfs_fsblock_t start;
|
||||
loff_t offset;
|
||||
uint lock;
|
||||
xfs_dqid_t next_id;
|
||||
int error = 0;
|
||||
|
||||
/* Simple advance */
|
||||
next_id = *id + 1;
|
||||
|
||||
/* If we'd wrap past the max ID, stop */
|
||||
if (next_id < *id)
|
||||
return -ENOENT;
|
||||
|
@ -723,23 +726,25 @@ xfs_dq_get_next_id(
|
|||
/* Nope, next_id is now past the current chunk, so find the next one */
|
||||
start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
|
||||
|
||||
quotip = xfs_quota_inode(mp, type);
|
||||
lock = xfs_ilock_data_map_shared(quotip);
|
||||
lock_flags = xfs_ilock_data_map_shared(quotip);
|
||||
if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
offset = __xfs_seek_hole_data(VFS_I(quotip), XFS_FSB_TO_B(mp, start),
|
||||
eof, SEEK_DATA);
|
||||
if (offset < 0)
|
||||
error = offset;
|
||||
if (xfs_iext_lookup_extent(quotip, "ip->i_df, start, &idx, &got)) {
|
||||
/* contiguous chunk, bump startoff for the id calculation */
|
||||
if (got.br_startoff < start)
|
||||
got.br_startoff = start;
|
||||
*id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
|
||||
} else {
|
||||
error = -ENOENT;
|
||||
}
|
||||
|
||||
xfs_iunlock(quotip, lock);
|
||||
xfs_iunlock(quotip, lock_flags);
|
||||
|
||||
/* -ENXIO is essentially "no more data" */
|
||||
if (error)
|
||||
return (error == -ENXIO ? -ENOENT: error);
|
||||
|
||||
/* Convert next data offset back to a quota id */
|
||||
*id = XFS_B_TO_FSB(mp, offset) * mp->m_quotainfo->qi_dqperchunk;
|
||||
return 0;
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -762,7 +767,6 @@ xfs_qm_dqget(
|
|||
struct xfs_quotainfo *qi = mp->m_quotainfo;
|
||||
struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
|
||||
struct xfs_dquot *dqp;
|
||||
loff_t eof = 0;
|
||||
int error;
|
||||
|
||||
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
|
||||
|
@ -790,21 +794,6 @@ xfs_qm_dqget(
|
|||
}
|
||||
#endif
|
||||
|
||||
/* Get the end of the quota file if we need it */
|
||||
if (flags & XFS_QMOPT_DQNEXT) {
|
||||
struct xfs_inode *quotip;
|
||||
xfs_fileoff_t last;
|
||||
uint lock_mode;
|
||||
|
||||
quotip = xfs_quota_inode(mp, type);
|
||||
lock_mode = xfs_ilock_data_map_shared(quotip);
|
||||
error = xfs_bmap_last_offset(quotip, &last, XFS_DATA_FORK);
|
||||
xfs_iunlock(quotip, lock_mode);
|
||||
if (error)
|
||||
return error;
|
||||
eof = XFS_FSB_TO_B(mp, last);
|
||||
}
|
||||
|
||||
restart:
|
||||
mutex_lock(&qi->qi_tree_lock);
|
||||
dqp = radix_tree_lookup(tree, id);
|
||||
|
@ -823,7 +812,7 @@ xfs_qm_dqget(
|
|||
if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
|
||||
xfs_dqunlock(dqp);
|
||||
mutex_unlock(&qi->qi_tree_lock);
|
||||
error = xfs_dq_get_next_id(mp, type, &id, eof);
|
||||
error = xfs_dq_get_next_id(mp, type, &id);
|
||||
if (error)
|
||||
return error;
|
||||
goto restart;
|
||||
|
@ -858,7 +847,7 @@ xfs_qm_dqget(
|
|||
|
||||
/* If we are asked to find next active id, keep looking */
|
||||
if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
|
||||
error = xfs_dq_get_next_id(mp, type, &id, eof);
|
||||
error = xfs_dq_get_next_id(mp, type, &id);
|
||||
if (!error)
|
||||
goto restart;
|
||||
}
|
||||
|
@ -917,7 +906,7 @@ xfs_qm_dqget(
|
|||
if (flags & XFS_QMOPT_DQNEXT) {
|
||||
if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
|
||||
xfs_qm_dqput(dqp);
|
||||
error = xfs_dq_get_next_id(mp, type, &id, eof);
|
||||
error = xfs_dq_get_next_id(mp, type, &id);
|
||||
if (error)
|
||||
return error;
|
||||
goto restart;
|
||||
|
|
|
@ -22,103 +22,280 @@
|
|||
#include "xfs_trans_resv.h"
|
||||
#include "xfs_mount.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_sysfs.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
int xfs_etest[XFS_NUM_INJECT_ERROR];
|
||||
int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR];
|
||||
char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR];
|
||||
int xfs_error_test_active;
|
||||
static unsigned int xfs_errortag_random_default[] = {
|
||||
XFS_RANDOM_DEFAULT,
|
||||
XFS_RANDOM_IFLUSH_1,
|
||||
XFS_RANDOM_IFLUSH_2,
|
||||
XFS_RANDOM_IFLUSH_3,
|
||||
XFS_RANDOM_IFLUSH_4,
|
||||
XFS_RANDOM_IFLUSH_5,
|
||||
XFS_RANDOM_IFLUSH_6,
|
||||
XFS_RANDOM_DA_READ_BUF,
|
||||
XFS_RANDOM_BTREE_CHECK_LBLOCK,
|
||||
XFS_RANDOM_BTREE_CHECK_SBLOCK,
|
||||
XFS_RANDOM_ALLOC_READ_AGF,
|
||||
XFS_RANDOM_IALLOC_READ_AGI,
|
||||
XFS_RANDOM_ITOBP_INOTOBP,
|
||||
XFS_RANDOM_IUNLINK,
|
||||
XFS_RANDOM_IUNLINK_REMOVE,
|
||||
XFS_RANDOM_DIR_INO_VALIDATE,
|
||||
XFS_RANDOM_BULKSTAT_READ_CHUNK,
|
||||
XFS_RANDOM_IODONE_IOERR,
|
||||
XFS_RANDOM_STRATREAD_IOERR,
|
||||
XFS_RANDOM_STRATCMPL_IOERR,
|
||||
XFS_RANDOM_DIOWRITE_IOERR,
|
||||
XFS_RANDOM_BMAPIFORMAT,
|
||||
XFS_RANDOM_FREE_EXTENT,
|
||||
XFS_RANDOM_RMAP_FINISH_ONE,
|
||||
XFS_RANDOM_REFCOUNT_CONTINUE_UPDATE,
|
||||
XFS_RANDOM_REFCOUNT_FINISH_ONE,
|
||||
XFS_RANDOM_BMAP_FINISH_ONE,
|
||||
XFS_RANDOM_AG_RESV_CRITICAL,
|
||||
XFS_RANDOM_DROP_WRITES,
|
||||
XFS_RANDOM_LOG_BAD_CRC,
|
||||
};
|
||||
|
||||
int
|
||||
xfs_error_test(int error_tag, int *fsidp, char *expression,
|
||||
int line, char *file, unsigned long randfactor)
|
||||
struct xfs_errortag_attr {
|
||||
struct attribute attr;
|
||||
unsigned int tag;
|
||||
};
|
||||
|
||||
static inline struct xfs_errortag_attr *
|
||||
to_attr(struct attribute *attr)
|
||||
{
|
||||
int i;
|
||||
int64_t fsid;
|
||||
return container_of(attr, struct xfs_errortag_attr, attr);
|
||||
}
|
||||
|
||||
if (prandom_u32() % randfactor)
|
||||
return 0;
|
||||
static inline struct xfs_mount *
|
||||
to_mp(struct kobject *kobject)
|
||||
{
|
||||
struct xfs_kobj *kobj = to_kobj(kobject);
|
||||
|
||||
memcpy(&fsid, fsidp, sizeof(xfs_fsid_t));
|
||||
return container_of(kobj, struct xfs_mount, m_errortag_kobj);
|
||||
}
|
||||
|
||||
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
|
||||
if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) {
|
||||
xfs_warn(NULL,
|
||||
"Injecting error (%s) at file %s, line %d, on filesystem \"%s\"",
|
||||
expression, file, line, xfs_etest_fsname[i]);
|
||||
return 1;
|
||||
}
|
||||
STATIC ssize_t
|
||||
xfs_errortag_attr_store(
|
||||
struct kobject *kobject,
|
||||
struct attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct xfs_mount *mp = to_mp(kobject);
|
||||
struct xfs_errortag_attr *xfs_attr = to_attr(attr);
|
||||
int ret;
|
||||
unsigned int val;
|
||||
|
||||
if (strcmp(buf, "default") == 0) {
|
||||
val = xfs_errortag_random_default[xfs_attr->tag];
|
||||
} else {
|
||||
ret = kstrtouint(buf, 0, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = xfs_errortag_set(mp, xfs_attr->tag, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
return count;
|
||||
}
|
||||
|
||||
STATIC ssize_t
|
||||
xfs_errortag_attr_show(
|
||||
struct kobject *kobject,
|
||||
struct attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct xfs_mount *mp = to_mp(kobject);
|
||||
struct xfs_errortag_attr *xfs_attr = to_attr(attr);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n",
|
||||
xfs_errortag_get(mp, xfs_attr->tag));
|
||||
}
|
||||
|
||||
static const struct sysfs_ops xfs_errortag_sysfs_ops = {
|
||||
.show = xfs_errortag_attr_show,
|
||||
.store = xfs_errortag_attr_store,
|
||||
};
|
||||
|
||||
#define XFS_ERRORTAG_ATTR_RW(_name, _tag) \
|
||||
static struct xfs_errortag_attr xfs_errortag_attr_##_name = { \
|
||||
.attr = {.name = __stringify(_name), \
|
||||
.mode = VERIFY_OCTAL_PERMISSIONS(S_IWUSR | S_IRUGO) }, \
|
||||
.tag = (_tag), \
|
||||
}
|
||||
|
||||
#define XFS_ERRORTAG_ATTR_LIST(_name) &xfs_errortag_attr_##_name.attr
|
||||
|
||||
XFS_ERRORTAG_ATTR_RW(noerror, XFS_ERRTAG_NOERROR);
|
||||
XFS_ERRORTAG_ATTR_RW(iflush1, XFS_ERRTAG_IFLUSH_1);
|
||||
XFS_ERRORTAG_ATTR_RW(iflush2, XFS_ERRTAG_IFLUSH_2);
|
||||
XFS_ERRORTAG_ATTR_RW(iflush3, XFS_ERRTAG_IFLUSH_3);
|
||||
XFS_ERRORTAG_ATTR_RW(iflush4, XFS_ERRTAG_IFLUSH_4);
|
||||
XFS_ERRORTAG_ATTR_RW(iflush5, XFS_ERRTAG_IFLUSH_5);
|
||||
XFS_ERRORTAG_ATTR_RW(iflush6, XFS_ERRTAG_IFLUSH_6);
|
||||
XFS_ERRORTAG_ATTR_RW(dareadbuf, XFS_ERRTAG_DA_READ_BUF);
|
||||
XFS_ERRORTAG_ATTR_RW(btree_chk_lblk, XFS_ERRTAG_BTREE_CHECK_LBLOCK);
|
||||
XFS_ERRORTAG_ATTR_RW(btree_chk_sblk, XFS_ERRTAG_BTREE_CHECK_SBLOCK);
|
||||
XFS_ERRORTAG_ATTR_RW(readagf, XFS_ERRTAG_ALLOC_READ_AGF);
|
||||
XFS_ERRORTAG_ATTR_RW(readagi, XFS_ERRTAG_IALLOC_READ_AGI);
|
||||
XFS_ERRORTAG_ATTR_RW(itobp, XFS_ERRTAG_ITOBP_INOTOBP);
|
||||
XFS_ERRORTAG_ATTR_RW(iunlink, XFS_ERRTAG_IUNLINK);
|
||||
XFS_ERRORTAG_ATTR_RW(iunlinkrm, XFS_ERRTAG_IUNLINK_REMOVE);
|
||||
XFS_ERRORTAG_ATTR_RW(dirinovalid, XFS_ERRTAG_DIR_INO_VALIDATE);
|
||||
XFS_ERRORTAG_ATTR_RW(bulkstat, XFS_ERRTAG_BULKSTAT_READ_CHUNK);
|
||||
XFS_ERRORTAG_ATTR_RW(logiodone, XFS_ERRTAG_IODONE_IOERR);
|
||||
XFS_ERRORTAG_ATTR_RW(stratread, XFS_ERRTAG_STRATREAD_IOERR);
|
||||
XFS_ERRORTAG_ATTR_RW(stratcmpl, XFS_ERRTAG_STRATCMPL_IOERR);
|
||||
XFS_ERRORTAG_ATTR_RW(diowrite, XFS_ERRTAG_DIOWRITE_IOERR);
|
||||
XFS_ERRORTAG_ATTR_RW(bmapifmt, XFS_ERRTAG_BMAPIFORMAT);
|
||||
XFS_ERRORTAG_ATTR_RW(free_extent, XFS_ERRTAG_FREE_EXTENT);
|
||||
XFS_ERRORTAG_ATTR_RW(rmap_finish_one, XFS_ERRTAG_RMAP_FINISH_ONE);
|
||||
XFS_ERRORTAG_ATTR_RW(refcount_continue_update, XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE);
|
||||
XFS_ERRORTAG_ATTR_RW(refcount_finish_one, XFS_ERRTAG_REFCOUNT_FINISH_ONE);
|
||||
XFS_ERRORTAG_ATTR_RW(bmap_finish_one, XFS_ERRTAG_BMAP_FINISH_ONE);
|
||||
XFS_ERRORTAG_ATTR_RW(ag_resv_critical, XFS_ERRTAG_AG_RESV_CRITICAL);
|
||||
XFS_ERRORTAG_ATTR_RW(drop_writes, XFS_ERRTAG_DROP_WRITES);
|
||||
XFS_ERRORTAG_ATTR_RW(log_bad_crc, XFS_ERRTAG_LOG_BAD_CRC);
|
||||
|
||||
static struct attribute *xfs_errortag_attrs[] = {
|
||||
XFS_ERRORTAG_ATTR_LIST(noerror),
|
||||
XFS_ERRORTAG_ATTR_LIST(iflush1),
|
||||
XFS_ERRORTAG_ATTR_LIST(iflush2),
|
||||
XFS_ERRORTAG_ATTR_LIST(iflush3),
|
||||
XFS_ERRORTAG_ATTR_LIST(iflush4),
|
||||
XFS_ERRORTAG_ATTR_LIST(iflush5),
|
||||
XFS_ERRORTAG_ATTR_LIST(iflush6),
|
||||
XFS_ERRORTAG_ATTR_LIST(dareadbuf),
|
||||
XFS_ERRORTAG_ATTR_LIST(btree_chk_lblk),
|
||||
XFS_ERRORTAG_ATTR_LIST(btree_chk_sblk),
|
||||
XFS_ERRORTAG_ATTR_LIST(readagf),
|
||||
XFS_ERRORTAG_ATTR_LIST(readagi),
|
||||
XFS_ERRORTAG_ATTR_LIST(itobp),
|
||||
XFS_ERRORTAG_ATTR_LIST(iunlink),
|
||||
XFS_ERRORTAG_ATTR_LIST(iunlinkrm),
|
||||
XFS_ERRORTAG_ATTR_LIST(dirinovalid),
|
||||
XFS_ERRORTAG_ATTR_LIST(bulkstat),
|
||||
XFS_ERRORTAG_ATTR_LIST(logiodone),
|
||||
XFS_ERRORTAG_ATTR_LIST(stratread),
|
||||
XFS_ERRORTAG_ATTR_LIST(stratcmpl),
|
||||
XFS_ERRORTAG_ATTR_LIST(diowrite),
|
||||
XFS_ERRORTAG_ATTR_LIST(bmapifmt),
|
||||
XFS_ERRORTAG_ATTR_LIST(free_extent),
|
||||
XFS_ERRORTAG_ATTR_LIST(rmap_finish_one),
|
||||
XFS_ERRORTAG_ATTR_LIST(refcount_continue_update),
|
||||
XFS_ERRORTAG_ATTR_LIST(refcount_finish_one),
|
||||
XFS_ERRORTAG_ATTR_LIST(bmap_finish_one),
|
||||
XFS_ERRORTAG_ATTR_LIST(ag_resv_critical),
|
||||
XFS_ERRORTAG_ATTR_LIST(drop_writes),
|
||||
XFS_ERRORTAG_ATTR_LIST(log_bad_crc),
|
||||
NULL,
|
||||
};
|
||||
|
||||
struct kobj_type xfs_errortag_ktype = {
|
||||
.release = xfs_sysfs_release,
|
||||
.sysfs_ops = &xfs_errortag_sysfs_ops,
|
||||
.default_attrs = xfs_errortag_attrs,
|
||||
};
|
||||
|
||||
int
|
||||
xfs_errortag_init(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
mp->m_errortag = kmem_zalloc(sizeof(unsigned int) * XFS_ERRTAG_MAX,
|
||||
KM_SLEEP | KM_MAYFAIL);
|
||||
if (!mp->m_errortag)
|
||||
return -ENOMEM;
|
||||
|
||||
return xfs_sysfs_init(&mp->m_errortag_kobj, &xfs_errortag_ktype,
|
||||
&mp->m_kobj, "errortag");
|
||||
}
|
||||
|
||||
void
|
||||
xfs_errortag_del(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
xfs_sysfs_del(&mp->m_errortag_kobj);
|
||||
kmem_free(mp->m_errortag);
|
||||
}
|
||||
|
||||
bool
|
||||
xfs_errortag_test(
|
||||
struct xfs_mount *mp,
|
||||
const char *expression,
|
||||
const char *file,
|
||||
int line,
|
||||
unsigned int error_tag)
|
||||
{
|
||||
unsigned int randfactor;
|
||||
|
||||
/*
|
||||
* To be able to use error injection anywhere, we need to ensure error
|
||||
* injection mechanism is already initialized.
|
||||
*
|
||||
* Code paths like I/O completion can be called before the
|
||||
* initialization is complete, but be able to inject errors in such
|
||||
* places is still useful.
|
||||
*/
|
||||
if (!mp->m_errortag)
|
||||
return false;
|
||||
|
||||
ASSERT(error_tag < XFS_ERRTAG_MAX);
|
||||
randfactor = mp->m_errortag[error_tag];
|
||||
if (!randfactor || prandom_u32() % randfactor)
|
||||
return false;
|
||||
|
||||
xfs_warn_ratelimited(mp,
|
||||
"Injecting error (%s) at file %s, line %d, on filesystem \"%s\"",
|
||||
expression, file, line, mp->m_fsname);
|
||||
return true;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_errortag_get(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int error_tag)
|
||||
{
|
||||
if (error_tag >= XFS_ERRTAG_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
return mp->m_errortag[error_tag];
|
||||
}
|
||||
|
||||
int
|
||||
xfs_errortag_set(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int error_tag,
|
||||
unsigned int tag_value)
|
||||
{
|
||||
if (error_tag >= XFS_ERRTAG_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
mp->m_errortag[error_tag] = tag_value;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_errortag_add(unsigned int error_tag, xfs_mount_t *mp)
|
||||
xfs_errortag_add(
|
||||
struct xfs_mount *mp,
|
||||
unsigned int error_tag)
|
||||
{
|
||||
int i;
|
||||
int len;
|
||||
int64_t fsid;
|
||||
|
||||
if (error_tag >= XFS_ERRTAG_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t));
|
||||
|
||||
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
|
||||
if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) {
|
||||
xfs_warn(mp, "error tag #%d on", error_tag);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
|
||||
if (xfs_etest[i] == 0) {
|
||||
xfs_warn(mp, "Turned on XFS error tag #%d",
|
||||
error_tag);
|
||||
xfs_etest[i] = error_tag;
|
||||
xfs_etest_fsid[i] = fsid;
|
||||
len = strlen(mp->m_fsname);
|
||||
xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP);
|
||||
strcpy(xfs_etest_fsname[i], mp->m_fsname);
|
||||
xfs_error_test_active++;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
xfs_warn(mp, "error tag overflow, too many turned on");
|
||||
|
||||
return 1;
|
||||
return xfs_errortag_set(mp, error_tag,
|
||||
xfs_errortag_random_default[error_tag]);
|
||||
}
|
||||
|
||||
int
|
||||
xfs_errortag_clearall(xfs_mount_t *mp, int loud)
|
||||
xfs_errortag_clearall(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
int64_t fsid;
|
||||
int cleared = 0;
|
||||
int i;
|
||||
|
||||
memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t));
|
||||
|
||||
|
||||
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
|
||||
if ((fsid == 0LL || xfs_etest_fsid[i] == fsid) &&
|
||||
xfs_etest[i] != 0) {
|
||||
cleared = 1;
|
||||
xfs_warn(mp, "Clearing XFS error tag #%d",
|
||||
xfs_etest[i]);
|
||||
xfs_etest[i] = 0;
|
||||
xfs_etest_fsid[i] = 0LL;
|
||||
kmem_free(xfs_etest_fsname[i]);
|
||||
xfs_etest_fsname[i] = NULL;
|
||||
xfs_error_test_active--;
|
||||
}
|
||||
}
|
||||
|
||||
if (loud || cleared)
|
||||
xfs_warn(mp, "Cleared all XFS error tags for filesystem");
|
||||
|
||||
memset(mp->m_errortag, 0, sizeof(unsigned int) * XFS_ERRTAG_MAX);
|
||||
return 0;
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
|
|
@ -96,7 +96,17 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
|
|||
#define XFS_ERRTAG_REFCOUNT_FINISH_ONE 25
|
||||
#define XFS_ERRTAG_BMAP_FINISH_ONE 26
|
||||
#define XFS_ERRTAG_AG_RESV_CRITICAL 27
|
||||
#define XFS_ERRTAG_MAX 28
|
||||
/*
|
||||
* DEBUG mode instrumentation to test and/or trigger delayed allocation
|
||||
* block killing in the event of failed writes. When enabled, all
|
||||
* buffered writes are silenty dropped and handled as if they failed.
|
||||
* All delalloc blocks in the range of the write (including pre-existing
|
||||
* delalloc blocks!) are tossed as part of the write failure error
|
||||
* handling sequence.
|
||||
*/
|
||||
#define XFS_ERRTAG_DROP_WRITES 28
|
||||
#define XFS_ERRTAG_LOG_BAD_CRC 29
|
||||
#define XFS_ERRTAG_MAX 30
|
||||
|
||||
/*
|
||||
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
|
||||
|
@ -129,23 +139,29 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
|
|||
#define XFS_RANDOM_REFCOUNT_FINISH_ONE 1
|
||||
#define XFS_RANDOM_BMAP_FINISH_ONE 1
|
||||
#define XFS_RANDOM_AG_RESV_CRITICAL 4
|
||||
#define XFS_RANDOM_DROP_WRITES 1
|
||||
#define XFS_RANDOM_LOG_BAD_CRC 1
|
||||
|
||||
#ifdef DEBUG
|
||||
extern int xfs_error_test_active;
|
||||
extern int xfs_error_test(int, int *, char *, int, char *, unsigned long);
|
||||
extern int xfs_errortag_init(struct xfs_mount *mp);
|
||||
extern void xfs_errortag_del(struct xfs_mount *mp);
|
||||
extern bool xfs_errortag_test(struct xfs_mount *mp, const char *expression,
|
||||
const char *file, int line, unsigned int error_tag);
|
||||
#define XFS_TEST_ERROR(expr, mp, tag) \
|
||||
((expr) || xfs_errortag_test((mp), #expr, __FILE__, __LINE__, (tag)))
|
||||
|
||||
#define XFS_NUM_INJECT_ERROR 10
|
||||
#define XFS_TEST_ERROR(expr, mp, tag, rf) \
|
||||
((expr) || (xfs_error_test_active && \
|
||||
xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
|
||||
(rf))))
|
||||
|
||||
extern int xfs_errortag_add(unsigned int error_tag, struct xfs_mount *mp);
|
||||
extern int xfs_errortag_clearall(struct xfs_mount *mp, int loud);
|
||||
extern int xfs_errortag_get(struct xfs_mount *mp, unsigned int error_tag);
|
||||
extern int xfs_errortag_set(struct xfs_mount *mp, unsigned int error_tag,
|
||||
unsigned int tag_value);
|
||||
extern int xfs_errortag_add(struct xfs_mount *mp, unsigned int error_tag);
|
||||
extern int xfs_errortag_clearall(struct xfs_mount *mp);
|
||||
#else
|
||||
#define XFS_TEST_ERROR(expr, mp, tag, rf) (expr)
|
||||
#define xfs_errortag_add(tag, mp) (ENOSYS)
|
||||
#define xfs_errortag_clearall(mp, loud) (ENOSYS)
|
||||
#define xfs_errortag_init(mp) (0)
|
||||
#define xfs_errortag_del(mp)
|
||||
#define XFS_TEST_ERROR(expr, mp, tag) (expr)
|
||||
#define xfs_errortag_set(mp, tag, val) (ENOSYS)
|
||||
#define xfs_errortag_add(mp, tag) (ENOSYS)
|
||||
#define xfs_errortag_clearall(mp) (ENOSYS)
|
||||
#endif /* DEBUG */
|
||||
|
||||
/*
|
||||
|
|
|
@ -679,6 +679,7 @@ xfs_file_buffered_aio_write(
|
|||
xfs_iunlock(ip, iolock);
|
||||
eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
|
||||
xfs_icache_free_eofblocks(ip->i_mount, &eofb);
|
||||
xfs_icache_free_cowblocks(ip->i_mount, &eofb);
|
||||
goto write_retry;
|
||||
}
|
||||
|
||||
|
@ -970,362 +971,7 @@ xfs_file_readdir(
|
|||
*/
|
||||
bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
|
||||
|
||||
return xfs_readdir(ip, ctx, bufsize);
|
||||
}
|
||||
|
||||
/*
|
||||
* This type is designed to indicate the type of offset we would like
|
||||
* to search from page cache for xfs_seek_hole_data().
|
||||
*/
|
||||
enum {
|
||||
HOLE_OFF = 0,
|
||||
DATA_OFF,
|
||||
};
|
||||
|
||||
/*
|
||||
* Lookup the desired type of offset from the given page.
|
||||
*
|
||||
* On success, return true and the offset argument will point to the
|
||||
* start of the region that was found. Otherwise this function will
|
||||
* return false and keep the offset argument unchanged.
|
||||
*/
|
||||
STATIC bool
|
||||
xfs_lookup_buffer_offset(
|
||||
struct page *page,
|
||||
loff_t *offset,
|
||||
unsigned int type)
|
||||
{
|
||||
loff_t lastoff = page_offset(page);
|
||||
bool found = false;
|
||||
struct buffer_head *bh, *head;
|
||||
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
/*
|
||||
* Unwritten extents that have data in the page
|
||||
* cache covering them can be identified by the
|
||||
* BH_Unwritten state flag. Pages with multiple
|
||||
* buffers might have a mix of holes, data and
|
||||
* unwritten extents - any buffer with valid
|
||||
* data in it should have BH_Uptodate flag set
|
||||
* on it.
|
||||
*/
|
||||
if (buffer_unwritten(bh) ||
|
||||
buffer_uptodate(bh)) {
|
||||
if (type == DATA_OFF)
|
||||
found = true;
|
||||
} else {
|
||||
if (type == HOLE_OFF)
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (found) {
|
||||
*offset = lastoff;
|
||||
break;
|
||||
}
|
||||
lastoff += bh->b_size;
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine is called to find out and return a data or hole offset
|
||||
* from the page cache for unwritten extents according to the desired
|
||||
* type for xfs_seek_hole_data().
|
||||
*
|
||||
* The argument offset is used to tell where we start to search from the
|
||||
* page cache. Map is used to figure out the end points of the range to
|
||||
* lookup pages.
|
||||
*
|
||||
* Return true if the desired type of offset was found, and the argument
|
||||
* offset is filled with that address. Otherwise, return false and keep
|
||||
* offset unchanged.
|
||||
*/
|
||||
STATIC bool
|
||||
xfs_find_get_desired_pgoff(
|
||||
struct inode *inode,
|
||||
struct xfs_bmbt_irec *map,
|
||||
unsigned int type,
|
||||
loff_t *offset)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct pagevec pvec;
|
||||
pgoff_t index;
|
||||
pgoff_t end;
|
||||
loff_t endoff;
|
||||
loff_t startoff = *offset;
|
||||
loff_t lastoff = startoff;
|
||||
bool found = false;
|
||||
|
||||
pagevec_init(&pvec, 0);
|
||||
|
||||
index = startoff >> PAGE_SHIFT;
|
||||
endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
|
||||
end = (endoff - 1) >> PAGE_SHIFT;
|
||||
do {
|
||||
int want;
|
||||
unsigned nr_pages;
|
||||
unsigned int i;
|
||||
|
||||
want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
|
||||
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
|
||||
want);
|
||||
if (nr_pages == 0)
|
||||
break;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
loff_t b_offset;
|
||||
|
||||
/*
|
||||
* At this point, the page may be truncated or
|
||||
* invalidated (changing page->mapping to NULL),
|
||||
* or even swizzled back from swapper_space to tmpfs
|
||||
* file mapping. However, page->index will not change
|
||||
* because we have a reference on the page.
|
||||
*
|
||||
* If current page offset is beyond where we've ended,
|
||||
* we've found a hole.
|
||||
*/
|
||||
if (type == HOLE_OFF && lastoff < endoff &&
|
||||
lastoff < page_offset(pvec.pages[i])) {
|
||||
found = true;
|
||||
*offset = lastoff;
|
||||
goto out;
|
||||
}
|
||||
/* Searching done if the page index is out of range. */
|
||||
if (page->index > end)
|
||||
goto out;
|
||||
|
||||
lock_page(page);
|
||||
/*
|
||||
* Page truncated or invalidated(page->mapping == NULL).
|
||||
* We can freely skip it and proceed to check the next
|
||||
* page.
|
||||
*/
|
||||
if (unlikely(page->mapping != inode->i_mapping)) {
|
||||
unlock_page(page);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!page_has_buffers(page)) {
|
||||
unlock_page(page);
|
||||
continue;
|
||||
}
|
||||
|
||||
found = xfs_lookup_buffer_offset(page, &b_offset, type);
|
||||
if (found) {
|
||||
/*
|
||||
* The found offset may be less than the start
|
||||
* point to search if this is the first time to
|
||||
* come here.
|
||||
*/
|
||||
*offset = max_t(loff_t, startoff, b_offset);
|
||||
unlock_page(page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* We either searching data but nothing was found, or
|
||||
* searching hole but found a data buffer. In either
|
||||
* case, probably the next page contains the desired
|
||||
* things, update the last offset to it so.
|
||||
*/
|
||||
lastoff = page_offset(page) + PAGE_SIZE;
|
||||
unlock_page(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* The number of returned pages less than our desired, search
|
||||
* done.
|
||||
*/
|
||||
if (nr_pages < want)
|
||||
break;
|
||||
|
||||
index = pvec.pages[i - 1]->index + 1;
|
||||
pagevec_release(&pvec);
|
||||
} while (index <= end);
|
||||
|
||||
/* No page at lastoff and we are not done - we found a hole. */
|
||||
if (type == HOLE_OFF && lastoff < endoff) {
|
||||
*offset = lastoff;
|
||||
found = true;
|
||||
}
|
||||
out:
|
||||
pagevec_release(&pvec);
|
||||
return found;
|
||||
}
|
||||
|
||||
/*
|
||||
* caller must lock inode with xfs_ilock_data_map_shared,
|
||||
* can we craft an appropriate ASSERT?
|
||||
*
|
||||
* end is because the VFS-level lseek interface is defined such that any
|
||||
* offset past i_size shall return -ENXIO, but we use this for quota code
|
||||
* which does not maintain i_size, and we want to SEEK_DATA past i_size.
|
||||
*/
|
||||
loff_t
|
||||
__xfs_seek_hole_data(
|
||||
struct inode *inode,
|
||||
loff_t start,
|
||||
loff_t end,
|
||||
int whence)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
loff_t uninitialized_var(offset);
|
||||
xfs_fileoff_t fsbno;
|
||||
xfs_filblks_t lastbno;
|
||||
int error;
|
||||
|
||||
if (start >= end) {
|
||||
error = -ENXIO;
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to read extents from the first block indicated
|
||||
* by fsbno to the end block of the file.
|
||||
*/
|
||||
fsbno = XFS_B_TO_FSBT(mp, start);
|
||||
lastbno = XFS_B_TO_FSB(mp, end);
|
||||
|
||||
for (;;) {
|
||||
struct xfs_bmbt_irec map[2];
|
||||
int nmap = 2;
|
||||
unsigned int i;
|
||||
|
||||
error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
|
||||
XFS_BMAPI_ENTIRE);
|
||||
if (error)
|
||||
goto out_error;
|
||||
|
||||
/* No extents at given offset, must be beyond EOF */
|
||||
if (nmap == 0) {
|
||||
error = -ENXIO;
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
for (i = 0; i < nmap; i++) {
|
||||
offset = max_t(loff_t, start,
|
||||
XFS_FSB_TO_B(mp, map[i].br_startoff));
|
||||
|
||||
/* Landed in the hole we wanted? */
|
||||
if (whence == SEEK_HOLE &&
|
||||
map[i].br_startblock == HOLESTARTBLOCK)
|
||||
goto out;
|
||||
|
||||
/* Landed in the data extent we wanted? */
|
||||
if (whence == SEEK_DATA &&
|
||||
(map[i].br_startblock == DELAYSTARTBLOCK ||
|
||||
(map[i].br_state == XFS_EXT_NORM &&
|
||||
!isnullstartblock(map[i].br_startblock))))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Landed in an unwritten extent, try to search
|
||||
* for hole or data from page cache.
|
||||
*/
|
||||
if (map[i].br_state == XFS_EXT_UNWRITTEN) {
|
||||
if (xfs_find_get_desired_pgoff(inode, &map[i],
|
||||
whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
|
||||
&offset))
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We only received one extent out of the two requested. This
|
||||
* means we've hit EOF and didn't find what we are looking for.
|
||||
*/
|
||||
if (nmap == 1) {
|
||||
/*
|
||||
* If we were looking for a hole, set offset to
|
||||
* the end of the file (i.e., there is an implicit
|
||||
* hole at the end of any file).
|
||||
*/
|
||||
if (whence == SEEK_HOLE) {
|
||||
offset = end;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* If we were looking for data, it's nowhere to be found
|
||||
*/
|
||||
ASSERT(whence == SEEK_DATA);
|
||||
error = -ENXIO;
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
ASSERT(i > 1);
|
||||
|
||||
/*
|
||||
* Nothing was found, proceed to the next round of search
|
||||
* if the next reading offset is not at or beyond EOF.
|
||||
*/
|
||||
fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
|
||||
start = XFS_FSB_TO_B(mp, fsbno);
|
||||
if (start >= end) {
|
||||
if (whence == SEEK_HOLE) {
|
||||
offset = end;
|
||||
break;
|
||||
}
|
||||
ASSERT(whence == SEEK_DATA);
|
||||
error = -ENXIO;
|
||||
goto out_error;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/*
|
||||
* If at this point we have found the hole we wanted, the returned
|
||||
* offset may be bigger than the file size as it may be aligned to
|
||||
* page boundary for unwritten extents. We need to deal with this
|
||||
* situation in particular.
|
||||
*/
|
||||
if (whence == SEEK_HOLE)
|
||||
offset = min_t(loff_t, offset, end);
|
||||
|
||||
return offset;
|
||||
|
||||
out_error:
|
||||
return error;
|
||||
}
|
||||
|
||||
STATIC loff_t
|
||||
xfs_seek_hole_data(
|
||||
struct file *file,
|
||||
loff_t start,
|
||||
int whence)
|
||||
{
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
uint lock;
|
||||
loff_t offset, end;
|
||||
int error = 0;
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return -EIO;
|
||||
|
||||
lock = xfs_ilock_data_map_shared(ip);
|
||||
|
||||
end = i_size_read(inode);
|
||||
offset = __xfs_seek_hole_data(inode, start, end, whence);
|
||||
if (offset < 0) {
|
||||
error = offset;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, lock);
|
||||
|
||||
if (error)
|
||||
return error;
|
||||
return offset;
|
||||
return xfs_readdir(NULL, ip, ctx, bufsize);
|
||||
}
|
||||
|
||||
STATIC loff_t
|
||||
|
@ -1334,17 +980,25 @@ xfs_file_llseek(
|
|||
loff_t offset,
|
||||
int whence)
|
||||
{
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
|
||||
return -EIO;
|
||||
|
||||
switch (whence) {
|
||||
case SEEK_END:
|
||||
case SEEK_CUR:
|
||||
case SEEK_SET:
|
||||
default:
|
||||
return generic_file_llseek(file, offset, whence);
|
||||
case SEEK_HOLE:
|
||||
offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
|
||||
break;
|
||||
case SEEK_DATA:
|
||||
return xfs_seek_hole_data(file, offset, whence);
|
||||
default:
|
||||
return -EINVAL;
|
||||
offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
|
||||
break;
|
||||
}
|
||||
|
||||
if (offset < 0)
|
||||
return offset;
|
||||
return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -602,7 +602,7 @@ xfs_growfs_data_private(
|
|||
if (nagimax)
|
||||
mp->m_maxagi = nagimax;
|
||||
if (mp->m_sb.sb_imax_pct) {
|
||||
__uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
|
||||
uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
|
||||
do_div(icount, 100);
|
||||
mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
|
||||
} else
|
||||
|
@ -793,17 +793,17 @@ xfs_fs_counts(
|
|||
int
|
||||
xfs_reserve_blocks(
|
||||
xfs_mount_t *mp,
|
||||
__uint64_t *inval,
|
||||
uint64_t *inval,
|
||||
xfs_fsop_resblks_t *outval)
|
||||
{
|
||||
__int64_t lcounter, delta;
|
||||
__int64_t fdblks_delta = 0;
|
||||
__uint64_t request;
|
||||
__int64_t free;
|
||||
int64_t lcounter, delta;
|
||||
int64_t fdblks_delta = 0;
|
||||
uint64_t request;
|
||||
int64_t free;
|
||||
int error = 0;
|
||||
|
||||
/* If inval is null, report current values and return */
|
||||
if (inval == (__uint64_t *)NULL) {
|
||||
if (inval == (uint64_t *)NULL) {
|
||||
if (!outval)
|
||||
return -EINVAL;
|
||||
outval->resblks = mp->m_resblks;
|
||||
|
@ -904,7 +904,7 @@ xfs_reserve_blocks(
|
|||
int
|
||||
xfs_fs_goingdown(
|
||||
xfs_mount_t *mp,
|
||||
__uint32_t inflags)
|
||||
uint32_t inflags)
|
||||
{
|
||||
switch (inflags) {
|
||||
case XFS_FSOP_GOING_FLAGS_DEFAULT: {
|
||||
|
|
|
@ -22,9 +22,9 @@ extern int xfs_fs_geometry(xfs_mount_t *mp, xfs_fsop_geom_t *geo, int nversion);
|
|||
extern int xfs_growfs_data(xfs_mount_t *mp, xfs_growfs_data_t *in);
|
||||
extern int xfs_growfs_log(xfs_mount_t *mp, xfs_growfs_log_t *in);
|
||||
extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
|
||||
extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
|
||||
extern int xfs_reserve_blocks(xfs_mount_t *mp, uint64_t *inval,
|
||||
xfs_fsop_resblks_t *outval);
|
||||
extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
|
||||
extern int xfs_fs_goingdown(xfs_mount_t *mp, uint32_t inflags);
|
||||
|
||||
extern int xfs_fs_reserve_ag_blocks(struct xfs_mount *mp);
|
||||
extern int xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp);
|
||||
|
|
|
@ -47,4 +47,9 @@ xfs_param_t xfs_params = {
|
|||
|
||||
struct xfs_globals xfs_globals = {
|
||||
.log_recovery_delay = 0, /* no delay by default */
|
||||
#ifdef XFS_ASSERT_FATAL
|
||||
.bug_on_assert = true, /* assert failures BUG() */
|
||||
#else
|
||||
.bug_on_assert = false, /* assert failures WARN() */
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -368,6 +368,11 @@ xfs_iget_cache_hit(
|
|||
if (ip->i_flags & XFS_IRECLAIMABLE) {
|
||||
trace_xfs_iget_reclaim(ip);
|
||||
|
||||
if (flags & XFS_IGET_INCORE) {
|
||||
error = -EAGAIN;
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
|
||||
* from stomping over us while we recycle the inode. We can't
|
||||
|
@ -432,7 +437,8 @@ xfs_iget_cache_hit(
|
|||
if (lock_flags != 0)
|
||||
xfs_ilock(ip, lock_flags);
|
||||
|
||||
xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
|
||||
if (!(flags & XFS_IGET_INCORE))
|
||||
xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
|
||||
XFS_STATS_INC(mp, xs_ig_found);
|
||||
|
||||
return 0;
|
||||
|
@ -603,6 +609,10 @@ xfs_iget(
|
|||
goto out_error_or_again;
|
||||
} else {
|
||||
rcu_read_unlock();
|
||||
if (flags & XFS_IGET_INCORE) {
|
||||
error = -ENOENT;
|
||||
goto out_error_or_again;
|
||||
}
|
||||
XFS_STATS_INC(mp, xs_ig_missed);
|
||||
|
||||
error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
|
||||
|
@ -623,7 +633,7 @@ xfs_iget(
|
|||
return 0;
|
||||
|
||||
out_error_or_again:
|
||||
if (error == -EAGAIN) {
|
||||
if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
|
||||
delay(1);
|
||||
goto again;
|
||||
}
|
||||
|
@ -631,6 +641,44 @@ xfs_iget(
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* "Is this a cached inode that's also allocated?"
|
||||
*
|
||||
* Look up an inode by number in the given file system. If the inode is
|
||||
* in cache and isn't in purgatory, return 1 if the inode is allocated
|
||||
* and 0 if it is not. For all other cases (not in cache, being torn
|
||||
* down, etc.), return a negative error code.
|
||||
*
|
||||
* The caller has to prevent inode allocation and freeing activity,
|
||||
* presumably by locking the AGI buffer. This is to ensure that an
|
||||
* inode cannot transition from allocated to freed until the caller is
|
||||
* ready to allow that. If the inode is in an intermediate state (new,
|
||||
* reclaimable, or being reclaimed), -EAGAIN will be returned; if the
|
||||
* inode is not in the cache, -ENOENT will be returned. The caller must
|
||||
* deal with these scenarios appropriately.
|
||||
*
|
||||
* This is a specialized use case for the online scrubber; if you're
|
||||
* reading this, you probably want xfs_iget.
|
||||
*/
|
||||
int
|
||||
xfs_icache_inode_is_allocated(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
xfs_ino_t ino,
|
||||
bool *inuse)
|
||||
{
|
||||
struct xfs_inode *ip;
|
||||
int error;
|
||||
|
||||
error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
*inuse = !!(VFS_I(ip)->i_mode);
|
||||
IRELE(ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The inode lookup is done in batches to keep the amount of lock traffic and
|
||||
* radix tree lookups to a minimum. The batch size is a trade off between
|
||||
|
|
|
@ -47,6 +47,7 @@ struct xfs_eofblocks {
|
|||
#define XFS_IGET_CREATE 0x1
|
||||
#define XFS_IGET_UNTRUSTED 0x2
|
||||
#define XFS_IGET_DONTCACHE 0x4
|
||||
#define XFS_IGET_INCORE 0x8 /* don't read from disk or reinit */
|
||||
|
||||
/*
|
||||
* flags for AG inode iterator
|
||||
|
@ -126,4 +127,7 @@ xfs_fs_eofblocks_from_user(
|
|||
return 0;
|
||||
}
|
||||
|
||||
int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_ino_t ino, bool *inuse);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -632,7 +632,7 @@ __xfs_iflock(
|
|||
|
||||
STATIC uint
|
||||
_xfs_dic2xflags(
|
||||
__uint16_t di_flags,
|
||||
uint16_t di_flags,
|
||||
uint64_t di_flags2,
|
||||
bool has_attr)
|
||||
{
|
||||
|
@ -855,8 +855,8 @@ xfs_ialloc(
|
|||
inode->i_version = 1;
|
||||
ip->i_d.di_flags2 = 0;
|
||||
ip->i_d.di_cowextsize = 0;
|
||||
ip->i_d.di_crtime.t_sec = (__int32_t)tv.tv_sec;
|
||||
ip->i_d.di_crtime.t_nsec = (__int32_t)tv.tv_nsec;
|
||||
ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
|
||||
ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
|
||||
}
|
||||
|
||||
|
||||
|
@ -3489,7 +3489,7 @@ xfs_iflush_int(
|
|||
dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
|
||||
|
||||
if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
|
||||
mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
|
||||
mp, XFS_ERRTAG_IFLUSH_1)) {
|
||||
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
||||
"%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
|
||||
__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
|
||||
|
@ -3499,7 +3499,7 @@ xfs_iflush_int(
|
|||
if (XFS_TEST_ERROR(
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
|
||||
mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
|
||||
mp, XFS_ERRTAG_IFLUSH_3)) {
|
||||
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
||||
"%s: Bad regular inode %Lu, ptr 0x%p",
|
||||
__func__, ip->i_ino, ip);
|
||||
|
@ -3510,7 +3510,7 @@ xfs_iflush_int(
|
|||
(ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
|
||||
mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
|
||||
mp, XFS_ERRTAG_IFLUSH_4)) {
|
||||
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
||||
"%s: Bad directory inode %Lu, ptr 0x%p",
|
||||
__func__, ip->i_ino, ip);
|
||||
|
@ -3518,8 +3518,7 @@ xfs_iflush_int(
|
|||
}
|
||||
}
|
||||
if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
|
||||
ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
|
||||
XFS_RANDOM_IFLUSH_5)) {
|
||||
ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
|
||||
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
||||
"%s: detected corrupt incore inode %Lu, "
|
||||
"total extents = %d, nblocks = %Ld, ptr 0x%p",
|
||||
|
@ -3529,7 +3528,7 @@ xfs_iflush_int(
|
|||
goto corrupt_out;
|
||||
}
|
||||
if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
|
||||
mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
|
||||
mp, XFS_ERRTAG_IFLUSH_6)) {
|
||||
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
|
||||
"%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
|
||||
__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
|
||||
|
|
|
@ -192,8 +192,8 @@ static inline void
|
|||
xfs_set_projid(struct xfs_inode *ip,
|
||||
prid_t projid)
|
||||
{
|
||||
ip->i_d.di_projid_hi = (__uint16_t) (projid >> 16);
|
||||
ip->i_d.di_projid_lo = (__uint16_t) (projid & 0xffff);
|
||||
ip->i_d.di_projid_hi = (uint16_t) (projid >> 16);
|
||||
ip->i_d.di_projid_lo = (uint16_t) (projid & 0xffff);
|
||||
}
|
||||
|
||||
static inline prid_t
|
||||
|
@ -445,9 +445,6 @@ int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
|
|||
xfs_fsize_t isize, bool *did_zeroing);
|
||||
int xfs_zero_range(struct xfs_inode *ip, xfs_off_t pos, xfs_off_t count,
|
||||
bool *did_zero);
|
||||
loff_t __xfs_seek_hole_data(struct inode *inode, loff_t start,
|
||||
loff_t eof, int whence);
|
||||
|
||||
|
||||
/* from xfs_iops.c */
|
||||
extern void xfs_setup_inode(struct xfs_inode *ip);
|
||||
|
|
|
@ -120,8 +120,7 @@ xfs_find_handle(
|
|||
handle.ha_fid.fid_pad = 0;
|
||||
handle.ha_fid.fid_gen = inode->i_generation;
|
||||
handle.ha_fid.fid_ino = ip->i_ino;
|
||||
|
||||
hsize = XFS_HSIZE(handle);
|
||||
hsize = sizeof(xfs_handle_t);
|
||||
}
|
||||
|
||||
error = -EFAULT;
|
||||
|
@ -444,8 +443,8 @@ xfs_attrmulti_attr_get(
|
|||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
unsigned char __user *ubuf,
|
||||
__uint32_t *len,
|
||||
__uint32_t flags)
|
||||
uint32_t *len,
|
||||
uint32_t flags)
|
||||
{
|
||||
unsigned char *kbuf;
|
||||
int error = -EFAULT;
|
||||
|
@ -473,8 +472,8 @@ xfs_attrmulti_attr_set(
|
|||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
const unsigned char __user *ubuf,
|
||||
__uint32_t len,
|
||||
__uint32_t flags)
|
||||
uint32_t len,
|
||||
uint32_t flags)
|
||||
{
|
||||
unsigned char *kbuf;
|
||||
int error;
|
||||
|
@ -499,7 +498,7 @@ int
|
|||
xfs_attrmulti_attr_remove(
|
||||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
__uint32_t flags)
|
||||
uint32_t flags)
|
||||
{
|
||||
int error;
|
||||
|
||||
|
@ -877,7 +876,7 @@ xfs_merge_ioc_xflags(
|
|||
|
||||
STATIC unsigned int
|
||||
xfs_di2lxflags(
|
||||
__uint16_t di_flags)
|
||||
uint16_t di_flags)
|
||||
{
|
||||
unsigned int flags = 0;
|
||||
|
||||
|
@ -1288,7 +1287,7 @@ xfs_ioctl_setattr_check_projid(
|
|||
struct fsxattr *fa)
|
||||
{
|
||||
/* Disallow 32bit project ids if projid32bit feature is not enabled. */
|
||||
if (fa->fsx_projid > (__uint16_t)-1 &&
|
||||
if (fa->fsx_projid > (uint16_t)-1 &&
|
||||
!xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1932,7 +1931,7 @@ xfs_file_ioctl(
|
|||
|
||||
case XFS_IOC_SET_RESBLKS: {
|
||||
xfs_fsop_resblks_t inout;
|
||||
__uint64_t in;
|
||||
uint64_t in;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -2018,12 +2017,12 @@ xfs_file_ioctl(
|
|||
}
|
||||
|
||||
case XFS_IOC_GOINGDOWN: {
|
||||
__uint32_t in;
|
||||
uint32_t in;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (get_user(in, (__uint32_t __user *)arg))
|
||||
if (get_user(in, (uint32_t __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
return xfs_fs_goingdown(mp, in);
|
||||
|
@ -2038,14 +2037,14 @@ xfs_file_ioctl(
|
|||
if (copy_from_user(&in, arg, sizeof(in)))
|
||||
return -EFAULT;
|
||||
|
||||
return xfs_errortag_add(in.errtag, mp);
|
||||
return xfs_errortag_add(mp, in.errtag);
|
||||
}
|
||||
|
||||
case XFS_IOC_ERROR_CLEARALL:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
return xfs_errortag_clearall(mp, 1);
|
||||
return xfs_errortag_clearall(mp);
|
||||
|
||||
case XFS_IOC_FREE_EOFBLOCKS: {
|
||||
struct xfs_fs_eofblocks eofb;
|
||||
|
|
|
@ -48,22 +48,22 @@ xfs_attrmulti_attr_get(
|
|||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
unsigned char __user *ubuf,
|
||||
__uint32_t *len,
|
||||
__uint32_t flags);
|
||||
uint32_t *len,
|
||||
uint32_t flags);
|
||||
|
||||
extern int
|
||||
xfs_attrmulti_attr_set(
|
||||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
const unsigned char __user *ubuf,
|
||||
__uint32_t len,
|
||||
__uint32_t flags);
|
||||
uint32_t len,
|
||||
uint32_t flags);
|
||||
|
||||
extern int
|
||||
xfs_attrmulti_attr_remove(
|
||||
struct inode *inode,
|
||||
unsigned char *name,
|
||||
__uint32_t flags);
|
||||
uint32_t flags);
|
||||
|
||||
extern struct dentry *
|
||||
xfs_handle_to_dentry(
|
||||
|
|
|
@ -112,9 +112,9 @@ typedef struct compat_xfs_fsop_handlereq {
|
|||
|
||||
/* The bstat field in the swapext struct needs translation */
|
||||
typedef struct compat_xfs_swapext {
|
||||
__int64_t sx_version; /* version */
|
||||
__int64_t sx_fdtarget; /* fd of target file */
|
||||
__int64_t sx_fdtmp; /* fd of tmp file */
|
||||
int64_t sx_version; /* version */
|
||||
int64_t sx_fdtarget; /* fd of target file */
|
||||
int64_t sx_fdtmp; /* fd of tmp file */
|
||||
xfs_off_t sx_offset; /* offset into file */
|
||||
xfs_off_t sx_length; /* leng from offset */
|
||||
char sx_pad[16]; /* pad space, unused */
|
||||
|
|
|
@ -543,7 +543,7 @@ xfs_file_iomap_begin_delay(
|
|||
if (unlikely(XFS_TEST_ERROR(
|
||||
(XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
|
||||
XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
|
||||
mp, XFS_ERRTAG_BMAPIFORMAT))) {
|
||||
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
|
||||
error = -EFSCORRUPTED;
|
||||
goto out_unlock;
|
||||
|
@ -1119,7 +1119,7 @@ xfs_file_iomap_end_delalloc(
|
|||
* Behave as if the write failed if drop writes is enabled. Set the NEW
|
||||
* flag to force delalloc cleanup.
|
||||
*/
|
||||
if (xfs_mp_drop_writes(mp)) {
|
||||
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
|
||||
iomap->flags |= IOMAP_F_NEW;
|
||||
written = 0;
|
||||
}
|
||||
|
|
|
@ -190,12 +190,12 @@ xfs_generic_create(
|
|||
|
||||
#ifdef CONFIG_XFS_POSIX_ACL
|
||||
if (default_acl) {
|
||||
error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
|
||||
error = __xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
|
||||
if (error)
|
||||
goto out_cleanup_inode;
|
||||
}
|
||||
if (acl) {
|
||||
error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
|
||||
error = __xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
|
||||
if (error)
|
||||
goto out_cleanup_inode;
|
||||
}
|
||||
|
@ -460,7 +460,7 @@ xfs_vn_get_link(
|
|||
if (!dentry)
|
||||
return ERR_PTR(-ECHILD);
|
||||
|
||||
link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
|
||||
link = kmalloc(XFS_SYMLINK_MAXLEN+1, GFP_KERNEL);
|
||||
if (!link)
|
||||
goto out_err;
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include "xfs_trace.h"
|
||||
#include "xfs_icache.h"
|
||||
|
||||
STATIC int
|
||||
int
|
||||
xfs_internal_inum(
|
||||
xfs_mount_t *mp,
|
||||
xfs_ino_t ino)
|
||||
|
|
|
@ -96,4 +96,6 @@ xfs_inumbers(
|
|||
void __user *buffer, /* buffer with inode info */
|
||||
inumbers_fmt_pf formatter);
|
||||
|
||||
int xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
|
||||
|
||||
#endif /* __XFS_ITABLE_H__ */
|
||||
|
|
|
@ -24,14 +24,6 @@
|
|||
/*
|
||||
* Kernel specific type declarations for XFS
|
||||
*/
|
||||
typedef signed char __int8_t;
|
||||
typedef unsigned char __uint8_t;
|
||||
typedef signed short int __int16_t;
|
||||
typedef unsigned short int __uint16_t;
|
||||
typedef signed int __int32_t;
|
||||
typedef unsigned int __uint32_t;
|
||||
typedef signed long long int __int64_t;
|
||||
typedef unsigned long long int __uint64_t;
|
||||
|
||||
typedef __s64 xfs_off_t; /* <file offset> type */
|
||||
typedef unsigned long long xfs_ino_t; /* <inode> type */
|
||||
|
@ -151,7 +143,6 @@ typedef __u32 xfs_nlink_t;
|
|||
#define __return_address __builtin_return_address(0)
|
||||
|
||||
#define XFS_PROJID_DEFAULT 0
|
||||
#define MAXPATHLEN 1024
|
||||
|
||||
#define MIN(a,b) (min(a,b))
|
||||
#define MAX(a,b) (max(a,b))
|
||||
|
@ -186,22 +177,22 @@ extern struct xstats xfsstats;
|
|||
* are converting to the init_user_ns. The uid is later mapped to a particular
|
||||
* user namespace value when crossing the kernel/user boundary.
|
||||
*/
|
||||
static inline __uint32_t xfs_kuid_to_uid(kuid_t uid)
|
||||
static inline uint32_t xfs_kuid_to_uid(kuid_t uid)
|
||||
{
|
||||
return from_kuid(&init_user_ns, uid);
|
||||
}
|
||||
|
||||
static inline kuid_t xfs_uid_to_kuid(__uint32_t uid)
|
||||
static inline kuid_t xfs_uid_to_kuid(uint32_t uid)
|
||||
{
|
||||
return make_kuid(&init_user_ns, uid);
|
||||
}
|
||||
|
||||
static inline __uint32_t xfs_kgid_to_gid(kgid_t gid)
|
||||
static inline uint32_t xfs_kgid_to_gid(kgid_t gid)
|
||||
{
|
||||
return from_kgid(&init_user_ns, gid);
|
||||
}
|
||||
|
||||
static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
|
||||
static inline kgid_t xfs_gid_to_kgid(uint32_t gid)
|
||||
{
|
||||
return make_kgid(&init_user_ns, gid);
|
||||
}
|
||||
|
@ -231,14 +222,14 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
|
|||
|
||||
#define do_mod(a, b) xfs_do_mod(&(a), (b), sizeof(a))
|
||||
|
||||
static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y)
|
||||
static inline uint64_t roundup_64(uint64_t x, uint32_t y)
|
||||
{
|
||||
x += y - 1;
|
||||
do_div(x, y);
|
||||
return x * y;
|
||||
}
|
||||
|
||||
static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
|
||||
static inline uint64_t howmany_64(uint64_t x, uint32_t y)
|
||||
{
|
||||
x += y - 1;
|
||||
do_div(x, y);
|
||||
|
|
|
@ -434,7 +434,7 @@ xfs_log_reserve(
|
|||
int unit_bytes,
|
||||
int cnt,
|
||||
struct xlog_ticket **ticp,
|
||||
__uint8_t client,
|
||||
uint8_t client,
|
||||
bool permanent)
|
||||
{
|
||||
struct xlog *log = mp->m_log;
|
||||
|
@ -825,9 +825,9 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|
|||
if (!error) {
|
||||
/* the data section must be 32 bit size aligned */
|
||||
struct {
|
||||
__uint16_t magic;
|
||||
__uint16_t pad1;
|
||||
__uint32_t pad2; /* may as well make it 64 bits */
|
||||
uint16_t magic;
|
||||
uint16_t pad1;
|
||||
uint32_t pad2; /* may as well make it 64 bits */
|
||||
} magic = {
|
||||
.magic = XLOG_UNMOUNT_TYPE,
|
||||
};
|
||||
|
@ -1189,8 +1189,7 @@ xlog_iodone(xfs_buf_t *bp)
|
|||
* IOABORT state. The IOABORT state is only set in DEBUG mode to inject
|
||||
* CRC errors into log recovery.
|
||||
*/
|
||||
if (XFS_TEST_ERROR(bp->b_error, l->l_mp, XFS_ERRTAG_IODONE_IOERR,
|
||||
XFS_RANDOM_IODONE_IOERR) ||
|
||||
if (XFS_TEST_ERROR(bp->b_error, l->l_mp, XFS_ERRTAG_IODONE_IOERR) ||
|
||||
iclog->ic_state & XLOG_STATE_IOABORT) {
|
||||
if (iclog->ic_state & XLOG_STATE_IOABORT)
|
||||
iclog->ic_state &= ~XLOG_STATE_IOABORT;
|
||||
|
@ -1665,7 +1664,7 @@ xlog_cksum(
|
|||
char *dp,
|
||||
int size)
|
||||
{
|
||||
__uint32_t crc;
|
||||
uint32_t crc;
|
||||
|
||||
/* first generate the crc for the record header ... */
|
||||
crc = xfs_start_cksum_update((char *)rhead,
|
||||
|
@ -1828,7 +1827,7 @@ xlog_sync(
|
|||
*/
|
||||
dptr = (char *)&iclog->ic_header + count;
|
||||
for (i = 0; i < split; i += BBSIZE) {
|
||||
__uint32_t cycle = be32_to_cpu(*(__be32 *)dptr);
|
||||
uint32_t cycle = be32_to_cpu(*(__be32 *)dptr);
|
||||
if (++cycle == XLOG_HEADER_MAGIC_NUM)
|
||||
cycle++;
|
||||
*(__be32 *)dptr = cpu_to_be32(cycle);
|
||||
|
@ -1842,7 +1841,6 @@ xlog_sync(
|
|||
/* calculcate the checksum */
|
||||
iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
|
||||
iclog->ic_datap, size);
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Intentionally corrupt the log record CRC based on the error injection
|
||||
* frequency, if defined. This facilitates testing log recovery in the
|
||||
|
@ -1850,15 +1848,13 @@ xlog_sync(
|
|||
* write on I/O completion and shutdown the fs. The subsequent mount
|
||||
* detects the bad CRC and attempts to recover.
|
||||
*/
|
||||
if (log->l_badcrc_factor &&
|
||||
(prandom_u32() % log->l_badcrc_factor == 0)) {
|
||||
if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
|
||||
iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
|
||||
iclog->ic_state |= XLOG_STATE_IOABORT;
|
||||
xfs_warn(log->l_mp,
|
||||
"Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
|
||||
be64_to_cpu(iclog->ic_header.h_lsn));
|
||||
}
|
||||
#endif
|
||||
|
||||
bp->b_io_length = BTOBB(count);
|
||||
bp->b_fspriv = iclog;
|
||||
|
@ -2024,7 +2020,7 @@ xlog_print_tic_res(
|
|||
};
|
||||
#undef REG_TYPE_STR
|
||||
|
||||
xfs_warn(mp, "xlog_write: reservation summary:");
|
||||
xfs_warn(mp, "ticket reservation summary:");
|
||||
xfs_warn(mp, " unit res = %d bytes",
|
||||
ticket->t_unit_res);
|
||||
xfs_warn(mp, " current res = %d bytes",
|
||||
|
@ -2045,10 +2041,55 @@ xlog_print_tic_res(
|
|||
"bad-rtype" : res_type_str[r_type]),
|
||||
ticket->t_res_arr[i].r_len);
|
||||
}
|
||||
}
|
||||
|
||||
xfs_alert_tag(mp, XFS_PTAG_LOGRES,
|
||||
"xlog_write: reservation ran out. Need to up reservation");
|
||||
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
|
||||
/*
|
||||
* Print a summary of the transaction.
|
||||
*/
|
||||
void
|
||||
xlog_print_trans(
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_log_item_desc *lidp;
|
||||
|
||||
/* dump core transaction and ticket info */
|
||||
xfs_warn(mp, "transaction summary:");
|
||||
xfs_warn(mp, " flags = 0x%x", tp->t_flags);
|
||||
|
||||
xlog_print_tic_res(mp, tp->t_ticket);
|
||||
|
||||
/* dump each log item */
|
||||
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
|
||||
struct xfs_log_item *lip = lidp->lid_item;
|
||||
struct xfs_log_vec *lv = lip->li_lv;
|
||||
struct xfs_log_iovec *vec;
|
||||
int i;
|
||||
|
||||
xfs_warn(mp, "log item: ");
|
||||
xfs_warn(mp, " type = 0x%x", lip->li_type);
|
||||
xfs_warn(mp, " flags = 0x%x", lip->li_flags);
|
||||
if (!lv)
|
||||
continue;
|
||||
xfs_warn(mp, " niovecs = %d", lv->lv_niovecs);
|
||||
xfs_warn(mp, " size = %d", lv->lv_size);
|
||||
xfs_warn(mp, " bytes = %d", lv->lv_bytes);
|
||||
xfs_warn(mp, " buf len = %d", lv->lv_buf_len);
|
||||
|
||||
/* dump each iovec for the log item */
|
||||
vec = lv->lv_iovecp;
|
||||
for (i = 0; i < lv->lv_niovecs; i++) {
|
||||
int dumplen = min(vec->i_len, 32);
|
||||
|
||||
xfs_warn(mp, " iovec[%d]", i);
|
||||
xfs_warn(mp, " type = 0x%x", vec->i_type);
|
||||
xfs_warn(mp, " len = %d", vec->i_len);
|
||||
xfs_warn(mp, " first %d bytes of iovec[%d]:", dumplen, i);
|
||||
xfs_hex_dump(vec->i_addr, dumplen);
|
||||
|
||||
vec++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2321,8 +2362,12 @@ xlog_write(
|
|||
if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS))
|
||||
ticket->t_curr_res -= sizeof(xlog_op_header_t);
|
||||
|
||||
if (ticket->t_curr_res < 0)
|
||||
if (ticket->t_curr_res < 0) {
|
||||
xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
|
||||
"ctx ticket reservation ran out. Need to up reservation");
|
||||
xlog_print_tic_res(log->l_mp, ticket);
|
||||
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
|
||||
}
|
||||
|
||||
index = 0;
|
||||
lv = log_vector;
|
||||
|
@ -2363,8 +2408,8 @@ xlog_write(
|
|||
}
|
||||
|
||||
reg = &vecp[index];
|
||||
ASSERT(reg->i_len % sizeof(__int32_t) == 0);
|
||||
ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0);
|
||||
ASSERT(reg->i_len % sizeof(int32_t) == 0);
|
||||
ASSERT((unsigned long)ptr % sizeof(int32_t) == 0);
|
||||
|
||||
start_rec_copy = xlog_write_start_rec(ptr, ticket);
|
||||
if (start_rec_copy) {
|
||||
|
@ -3143,7 +3188,7 @@ xlog_state_switch_iclogs(
|
|||
/* Round up to next log-sunit */
|
||||
if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
|
||||
log->l_mp->m_sb.sb_logsunit > 1) {
|
||||
__uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
|
||||
uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
|
||||
log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
|
||||
}
|
||||
|
||||
|
@ -3771,7 +3816,7 @@ xlog_verify_iclog(
|
|||
xlog_in_core_2_t *xhdr;
|
||||
void *base_ptr, *ptr, *p;
|
||||
ptrdiff_t field_offset;
|
||||
__uint8_t clientid;
|
||||
uint8_t clientid;
|
||||
int len, i, j, k, op_len;
|
||||
int idx;
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ int xfs_log_reserve(struct xfs_mount *mp,
|
|||
int length,
|
||||
int count,
|
||||
struct xlog_ticket **ticket,
|
||||
__uint8_t clientid,
|
||||
uint8_t clientid,
|
||||
bool permanent);
|
||||
int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
|
||||
void xfs_log_unmount(struct xfs_mount *mp);
|
||||
|
|
|
@ -410,6 +410,7 @@ xlog_cil_insert_items(
|
|||
int len = 0;
|
||||
int diff_iovecs = 0;
|
||||
int iclog_space;
|
||||
int iovhdr_res = 0, split_res = 0, ctx_res = 0;
|
||||
|
||||
ASSERT(tp);
|
||||
|
||||
|
@ -419,12 +420,65 @@ xlog_cil_insert_items(
|
|||
*/
|
||||
xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
|
||||
|
||||
spin_lock(&cil->xc_cil_lock);
|
||||
|
||||
/* account for space used by new iovec headers */
|
||||
iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t);
|
||||
len += iovhdr_res;
|
||||
ctx->nvecs += diff_iovecs;
|
||||
|
||||
/* attach the transaction to the CIL if it has any busy extents */
|
||||
if (!list_empty(&tp->t_busy))
|
||||
list_splice_init(&tp->t_busy, &ctx->busy_extents);
|
||||
|
||||
/*
|
||||
* Now transfer enough transaction reservation to the context ticket
|
||||
* for the checkpoint. The context ticket is special - the unit
|
||||
* reservation has to grow as well as the current reservation as we
|
||||
* steal from tickets so we can correctly determine the space used
|
||||
* during the transaction commit.
|
||||
*/
|
||||
if (ctx->ticket->t_curr_res == 0) {
|
||||
ctx_res = ctx->ticket->t_unit_res;
|
||||
ctx->ticket->t_curr_res = ctx_res;
|
||||
tp->t_ticket->t_curr_res -= ctx_res;
|
||||
}
|
||||
|
||||
/* do we need space for more log record headers? */
|
||||
iclog_space = log->l_iclog_size - log->l_iclog_hsize;
|
||||
if (len > 0 && (ctx->space_used / iclog_space !=
|
||||
(ctx->space_used + len) / iclog_space)) {
|
||||
split_res = (len + iclog_space - 1) / iclog_space;
|
||||
/* need to take into account split region headers, too */
|
||||
split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
|
||||
ctx->ticket->t_unit_res += split_res;
|
||||
ctx->ticket->t_curr_res += split_res;
|
||||
tp->t_ticket->t_curr_res -= split_res;
|
||||
ASSERT(tp->t_ticket->t_curr_res >= len);
|
||||
}
|
||||
tp->t_ticket->t_curr_res -= len;
|
||||
ctx->space_used += len;
|
||||
|
||||
/*
|
||||
* If we've overrun the reservation, dump the tx details before we move
|
||||
* the log items. Shutdown is imminent...
|
||||
*/
|
||||
if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
|
||||
xfs_warn(log->l_mp, "Transaction log reservation overrun:");
|
||||
xfs_warn(log->l_mp,
|
||||
" log items: %d bytes (iov hdrs: %d bytes)",
|
||||
len, iovhdr_res);
|
||||
xfs_warn(log->l_mp, " split region headers: %d bytes",
|
||||
split_res);
|
||||
xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
|
||||
xlog_print_trans(tp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now (re-)position everything modified at the tail of the CIL.
|
||||
* We do this here so we only need to take the CIL lock once during
|
||||
* the transaction commit.
|
||||
*/
|
||||
spin_lock(&cil->xc_cil_lock);
|
||||
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
|
||||
struct xfs_log_item *lip = lidp->lid_item;
|
||||
|
||||
|
@ -441,44 +495,10 @@ xlog_cil_insert_items(
|
|||
list_move_tail(&lip->li_cil, &cil->xc_cil);
|
||||
}
|
||||
|
||||
/* account for space used by new iovec headers */
|
||||
len += diff_iovecs * sizeof(xlog_op_header_t);
|
||||
ctx->nvecs += diff_iovecs;
|
||||
|
||||
/* attach the transaction to the CIL if it has any busy extents */
|
||||
if (!list_empty(&tp->t_busy))
|
||||
list_splice_init(&tp->t_busy, &ctx->busy_extents);
|
||||
|
||||
/*
|
||||
* Now transfer enough transaction reservation to the context ticket
|
||||
* for the checkpoint. The context ticket is special - the unit
|
||||
* reservation has to grow as well as the current reservation as we
|
||||
* steal from tickets so we can correctly determine the space used
|
||||
* during the transaction commit.
|
||||
*/
|
||||
if (ctx->ticket->t_curr_res == 0) {
|
||||
ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
|
||||
tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res;
|
||||
}
|
||||
|
||||
/* do we need space for more log record headers? */
|
||||
iclog_space = log->l_iclog_size - log->l_iclog_hsize;
|
||||
if (len > 0 && (ctx->space_used / iclog_space !=
|
||||
(ctx->space_used + len) / iclog_space)) {
|
||||
int hdrs;
|
||||
|
||||
hdrs = (len + iclog_space - 1) / iclog_space;
|
||||
/* need to take into account split region headers, too */
|
||||
hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
|
||||
ctx->ticket->t_unit_res += hdrs;
|
||||
ctx->ticket->t_curr_res += hdrs;
|
||||
tp->t_ticket->t_curr_res -= hdrs;
|
||||
ASSERT(tp->t_ticket->t_curr_res >= len);
|
||||
}
|
||||
tp->t_ticket->t_curr_res -= len;
|
||||
ctx->space_used += len;
|
||||
|
||||
spin_unlock(&cil->xc_cil_lock);
|
||||
|
||||
if (tp->t_ticket->t_curr_res < 0)
|
||||
xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -973,6 +993,7 @@ xfs_log_commit_cil(
|
|||
{
|
||||
struct xlog *log = mp->m_log;
|
||||
struct xfs_cil *cil = log->l_cilp;
|
||||
xfs_lsn_t xc_commit_lsn;
|
||||
|
||||
/*
|
||||
* Do all necessary memory allocation before we lock the CIL.
|
||||
|
@ -986,13 +1007,9 @@ xfs_log_commit_cil(
|
|||
|
||||
xlog_cil_insert_items(log, tp);
|
||||
|
||||
/* check we didn't blow the reservation */
|
||||
if (tp->t_ticket->t_curr_res < 0)
|
||||
xlog_print_tic_res(mp, tp->t_ticket);
|
||||
|
||||
tp->t_commit_lsn = cil->xc_ctx->sequence;
|
||||
xc_commit_lsn = cil->xc_ctx->sequence;
|
||||
if (commit_lsn)
|
||||
*commit_lsn = tp->t_commit_lsn;
|
||||
*commit_lsn = xc_commit_lsn;
|
||||
|
||||
xfs_log_done(mp, tp->t_ticket, NULL, regrant);
|
||||
xfs_trans_unreserve_and_mod_sb(tp);
|
||||
|
@ -1008,7 +1025,7 @@ xfs_log_commit_cil(
|
|||
* the log items. This affects (at least) processing of stale buffers,
|
||||
* inodes and EFIs.
|
||||
*/
|
||||
xfs_trans_free_items(tp, tp->t_commit_lsn, false);
|
||||
xfs_trans_free_items(tp, xc_commit_lsn, false);
|
||||
|
||||
xlog_cil_push_background(log);
|
||||
|
||||
|
|
|
@ -419,7 +419,7 @@ struct xlog {
|
|||
};
|
||||
|
||||
#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
|
||||
((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
|
||||
((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
|
||||
|
||||
#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
|
||||
|
||||
|
@ -456,6 +456,7 @@ xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
|
|||
}
|
||||
|
||||
void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
|
||||
void xlog_print_trans(struct xfs_trans *);
|
||||
int
|
||||
xlog_write(
|
||||
struct xlog *log,
|
||||
|
|
|
@ -2230,9 +2230,9 @@ xlog_recover_get_buf_lsn(
|
|||
struct xfs_mount *mp,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
__uint32_t magic32;
|
||||
__uint16_t magic16;
|
||||
__uint16_t magicda;
|
||||
uint32_t magic32;
|
||||
uint16_t magic16;
|
||||
uint16_t magicda;
|
||||
void *blk = bp->b_addr;
|
||||
uuid_t *uuid;
|
||||
xfs_lsn_t lsn = -1;
|
||||
|
@ -2381,9 +2381,9 @@ xlog_recover_validate_buf_type(
|
|||
xfs_lsn_t current_lsn)
|
||||
{
|
||||
struct xfs_da_blkinfo *info = bp->b_addr;
|
||||
__uint32_t magic32;
|
||||
__uint16_t magic16;
|
||||
__uint16_t magicda;
|
||||
uint32_t magic32;
|
||||
uint16_t magic16;
|
||||
uint16_t magicda;
|
||||
char *warnmsg = NULL;
|
||||
|
||||
/*
|
||||
|
@ -2852,7 +2852,7 @@ xlog_recover_buffer_pass2(
|
|||
if (XFS_DINODE_MAGIC ==
|
||||
be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
|
||||
(BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
|
||||
(__uint32_t)log->l_mp->m_inode_cluster_size))) {
|
||||
(uint32_t)log->l_mp->m_inode_cluster_size))) {
|
||||
xfs_buf_stale(bp);
|
||||
error = xfs_bwrite(bp);
|
||||
} else {
|
||||
|
@ -3423,7 +3423,7 @@ xlog_recover_efd_pass2(
|
|||
xfs_efd_log_format_t *efd_formatp;
|
||||
xfs_efi_log_item_t *efip = NULL;
|
||||
xfs_log_item_t *lip;
|
||||
__uint64_t efi_id;
|
||||
uint64_t efi_id;
|
||||
struct xfs_ail_cursor cur;
|
||||
struct xfs_ail *ailp = log->l_ailp;
|
||||
|
||||
|
@ -3519,7 +3519,7 @@ xlog_recover_rud_pass2(
|
|||
struct xfs_rud_log_format *rud_formatp;
|
||||
struct xfs_rui_log_item *ruip = NULL;
|
||||
struct xfs_log_item *lip;
|
||||
__uint64_t rui_id;
|
||||
uint64_t rui_id;
|
||||
struct xfs_ail_cursor cur;
|
||||
struct xfs_ail *ailp = log->l_ailp;
|
||||
|
||||
|
@ -3635,7 +3635,7 @@ xlog_recover_cud_pass2(
|
|||
struct xfs_cud_log_format *cud_formatp;
|
||||
struct xfs_cui_log_item *cuip = NULL;
|
||||
struct xfs_log_item *lip;
|
||||
__uint64_t cui_id;
|
||||
uint64_t cui_id;
|
||||
struct xfs_ail_cursor cur;
|
||||
struct xfs_ail *ailp = log->l_ailp;
|
||||
|
||||
|
@ -3754,7 +3754,7 @@ xlog_recover_bud_pass2(
|
|||
struct xfs_bud_log_format *bud_formatp;
|
||||
struct xfs_bui_log_item *buip = NULL;
|
||||
struct xfs_log_item *lip;
|
||||
__uint64_t bui_id;
|
||||
uint64_t bui_id;
|
||||
struct xfs_ail_cursor cur;
|
||||
struct xfs_ail *ailp = log->l_ailp;
|
||||
|
||||
|
@ -4152,7 +4152,7 @@ xlog_recover_commit_trans(
|
|||
|
||||
#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
|
||||
|
||||
hlist_del(&trans->r_list);
|
||||
hlist_del_init(&trans->r_list);
|
||||
|
||||
error = xlog_recover_reorder_trans(log, trans, pass);
|
||||
if (error)
|
||||
|
@ -4354,6 +4354,8 @@ xlog_recover_free_trans(
|
|||
xlog_recover_item_t *item, *n;
|
||||
int i;
|
||||
|
||||
hlist_del_init(&trans->r_list);
|
||||
|
||||
list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
|
||||
/* Free the regions in the item. */
|
||||
list_del(&item->ri_list);
|
||||
|
@ -5224,12 +5226,16 @@ xlog_do_recovery_pass(
|
|||
int error2 = 0;
|
||||
int bblks, split_bblks;
|
||||
int hblks, split_hblks, wrapped_hblks;
|
||||
int i;
|
||||
struct hlist_head rhash[XLOG_RHASH_SIZE];
|
||||
LIST_HEAD (buffer_list);
|
||||
|
||||
ASSERT(head_blk != tail_blk);
|
||||
rhead_blk = 0;
|
||||
|
||||
for (i = 0; i < XLOG_RHASH_SIZE; i++)
|
||||
INIT_HLIST_HEAD(&rhash[i]);
|
||||
|
||||
/*
|
||||
* Read the header of the tail block and get the iclog buffer size from
|
||||
* h_size. Use this to tell how many sectors make up the log header.
|
||||
|
@ -5466,6 +5472,19 @@ xlog_do_recovery_pass(
|
|||
if (error && first_bad)
|
||||
*first_bad = rhead_blk;
|
||||
|
||||
/*
|
||||
* Transactions are freed at commit time but transactions without commit
|
||||
* records on disk are never committed. Free any that may be left in the
|
||||
* hash table.
|
||||
*/
|
||||
for (i = 0; i < XLOG_RHASH_SIZE; i++) {
|
||||
struct hlist_node *tmp;
|
||||
struct xlog_recover *trans;
|
||||
|
||||
hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
|
||||
xlog_recover_free_trans(trans);
|
||||
}
|
||||
|
||||
return error ? error : error2;
|
||||
}
|
||||
|
||||
|
@ -5772,9 +5791,9 @@ xlog_recover_check_summary(
|
|||
xfs_buf_t *agfbp;
|
||||
xfs_buf_t *agibp;
|
||||
xfs_agnumber_t agno;
|
||||
__uint64_t freeblks;
|
||||
__uint64_t itotal;
|
||||
__uint64_t ifree;
|
||||
uint64_t freeblks;
|
||||
uint64_t itotal;
|
||||
uint64_t ifree;
|
||||
int error;
|
||||
|
||||
mp = log->l_mp;
|
||||
|
|
|
@ -110,7 +110,10 @@ assfail(char *expr, char *file, int line)
|
|||
{
|
||||
xfs_emerg(NULL, "Assertion failed: %s, file: %s, line: %d",
|
||||
expr, file, line);
|
||||
BUG();
|
||||
if (xfs_globals.bug_on_assert)
|
||||
BUG();
|
||||
else
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -173,7 +173,7 @@ xfs_free_perag(
|
|||
int
|
||||
xfs_sb_validate_fsb_count(
|
||||
xfs_sb_t *sbp,
|
||||
__uint64_t nblocks)
|
||||
uint64_t nblocks)
|
||||
{
|
||||
ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
|
||||
ASSERT(sbp->sb_blocklog >= BBSHIFT);
|
||||
|
@ -435,7 +435,7 @@ STATIC void
|
|||
xfs_set_maxicount(xfs_mount_t *mp)
|
||||
{
|
||||
xfs_sb_t *sbp = &(mp->m_sb);
|
||||
__uint64_t icount;
|
||||
uint64_t icount;
|
||||
|
||||
if (sbp->sb_imax_pct) {
|
||||
/*
|
||||
|
@ -501,7 +501,7 @@ xfs_set_low_space_thresholds(
|
|||
int i;
|
||||
|
||||
for (i = 0; i < XFS_LOWSP_MAX; i++) {
|
||||
__uint64_t space = mp->m_sb.sb_dblocks;
|
||||
uint64_t space = mp->m_sb.sb_dblocks;
|
||||
|
||||
do_div(space, 100);
|
||||
mp->m_low_space[i] = space * (i + 1);
|
||||
|
@ -597,10 +597,10 @@ xfs_mount_reset_sbqflags(
|
|||
return xfs_sync_sb(mp, false);
|
||||
}
|
||||
|
||||
__uint64_t
|
||||
uint64_t
|
||||
xfs_default_resblks(xfs_mount_t *mp)
|
||||
{
|
||||
__uint64_t resblks;
|
||||
uint64_t resblks;
|
||||
|
||||
/*
|
||||
* We default to 5% or 8192 fsbs of space reserved, whichever is
|
||||
|
@ -611,7 +611,7 @@ xfs_default_resblks(xfs_mount_t *mp)
|
|||
*/
|
||||
resblks = mp->m_sb.sb_dblocks;
|
||||
do_div(resblks, 20);
|
||||
resblks = min_t(__uint64_t, resblks, 8192);
|
||||
resblks = min_t(uint64_t, resblks, 8192);
|
||||
return resblks;
|
||||
}
|
||||
|
||||
|
@ -631,7 +631,7 @@ xfs_mountfs(
|
|||
{
|
||||
struct xfs_sb *sbp = &(mp->m_sb);
|
||||
struct xfs_inode *rip;
|
||||
__uint64_t resblks;
|
||||
uint64_t resblks;
|
||||
uint quotamount = 0;
|
||||
uint quotaflags = 0;
|
||||
int error = 0;
|
||||
|
@ -719,10 +719,13 @@ xfs_mountfs(
|
|||
if (error)
|
||||
goto out_del_stats;
|
||||
|
||||
error = xfs_errortag_init(mp);
|
||||
if (error)
|
||||
goto out_remove_error_sysfs;
|
||||
|
||||
error = xfs_uuid_mount(mp);
|
||||
if (error)
|
||||
goto out_remove_error_sysfs;
|
||||
goto out_remove_errortag;
|
||||
|
||||
/*
|
||||
* Set the minimum read and write sizes
|
||||
|
@ -1044,6 +1047,8 @@ xfs_mountfs(
|
|||
xfs_da_unmount(mp);
|
||||
out_remove_uuid:
|
||||
xfs_uuid_unmount(mp);
|
||||
out_remove_errortag:
|
||||
xfs_errortag_del(mp);
|
||||
out_remove_error_sysfs:
|
||||
xfs_error_sysfs_del(mp);
|
||||
out_del_stats:
|
||||
|
@ -1062,7 +1067,7 @@ void
|
|||
xfs_unmountfs(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
__uint64_t resblks;
|
||||
uint64_t resblks;
|
||||
int error;
|
||||
|
||||
cancel_delayed_work_sync(&mp->m_eofblocks_work);
|
||||
|
@ -1147,10 +1152,11 @@ xfs_unmountfs(
|
|||
xfs_uuid_unmount(mp);
|
||||
|
||||
#if defined(DEBUG)
|
||||
xfs_errortag_clearall(mp, 0);
|
||||
xfs_errortag_clearall(mp);
|
||||
#endif
|
||||
xfs_free_perag(mp);
|
||||
|
||||
xfs_errortag_del(mp);
|
||||
xfs_error_sysfs_del(mp);
|
||||
xfs_sysfs_del(&mp->m_stats.xs_kobj);
|
||||
xfs_sysfs_del(&mp->m_kobj);
|
||||
|
|
|
@ -108,10 +108,10 @@ typedef struct xfs_mount {
|
|||
xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
|
||||
xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
|
||||
xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
|
||||
__uint8_t m_blkbit_log; /* blocklog + NBBY */
|
||||
__uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
|
||||
__uint8_t m_agno_log; /* log #ag's */
|
||||
__uint8_t m_agino_log; /* #bits for agino in inum */
|
||||
uint8_t m_blkbit_log; /* blocklog + NBBY */
|
||||
uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
|
||||
uint8_t m_agno_log; /* log #ag's */
|
||||
uint8_t m_agino_log; /* #bits for agino in inum */
|
||||
uint m_inode_cluster_size;/* min inode buf size */
|
||||
uint m_blockmask; /* sb_blocksize-1 */
|
||||
uint m_blockwsize; /* sb_blocksize in words */
|
||||
|
@ -139,7 +139,7 @@ typedef struct xfs_mount {
|
|||
struct mutex m_growlock; /* growfs mutex */
|
||||
int m_fixedfsid[2]; /* unchanged for life of FS */
|
||||
uint m_dmevmask; /* DMI events for this FS */
|
||||
__uint64_t m_flags; /* global mount flags */
|
||||
uint64_t m_flags; /* global mount flags */
|
||||
bool m_inotbt_nores; /* no per-AG finobt resv. */
|
||||
int m_ialloc_inos; /* inodes in inode allocation */
|
||||
int m_ialloc_blks; /* blocks in inode allocation */
|
||||
|
@ -148,14 +148,14 @@ typedef struct xfs_mount {
|
|||
int m_inoalign_mask;/* mask sb_inoalignmt if used */
|
||||
uint m_qflags; /* quota status flags */
|
||||
struct xfs_trans_resv m_resv; /* precomputed res values */
|
||||
__uint64_t m_maxicount; /* maximum inode count */
|
||||
__uint64_t m_resblks; /* total reserved blocks */
|
||||
__uint64_t m_resblks_avail;/* available reserved blocks */
|
||||
__uint64_t m_resblks_save; /* reserved blks @ remount,ro */
|
||||
uint64_t m_maxicount; /* maximum inode count */
|
||||
uint64_t m_resblks; /* total reserved blocks */
|
||||
uint64_t m_resblks_avail;/* available reserved blocks */
|
||||
uint64_t m_resblks_save; /* reserved blks @ remount,ro */
|
||||
int m_dalign; /* stripe unit */
|
||||
int m_swidth; /* stripe width */
|
||||
int m_sinoalign; /* stripe unit inode alignment */
|
||||
__uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
|
||||
uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
|
||||
const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */
|
||||
const struct xfs_dir_ops *m_dir_inode_ops; /* vector of dir inode ops */
|
||||
const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */
|
||||
|
@ -194,19 +194,17 @@ typedef struct xfs_mount {
|
|||
* ever support shrinks it would have to be persisted in addition
|
||||
* to various other kinds of pain inflicted on the pNFS server.
|
||||
*/
|
||||
__uint32_t m_generation;
|
||||
uint32_t m_generation;
|
||||
|
||||
bool m_fail_unmount;
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* DEBUG mode instrumentation to test and/or trigger delayed allocation
|
||||
* block killing in the event of failed writes. When enabled, all
|
||||
* buffered writes are silenty dropped and handled as if they failed.
|
||||
* All delalloc blocks in the range of the write (including pre-existing
|
||||
* delalloc blocks!) are tossed as part of the write failure error
|
||||
* handling sequence.
|
||||
* Frequency with which errors are injected. Replaces xfs_etest; the
|
||||
* value stored in here is the inverse of the frequency with which the
|
||||
* error triggers. 1 = always, 2 = half the time, etc.
|
||||
*/
|
||||
bool m_drop_writes;
|
||||
unsigned int *m_errortag;
|
||||
struct xfs_kobj m_errortag_kobj;
|
||||
#endif
|
||||
} xfs_mount_t;
|
||||
|
||||
|
@ -325,20 +323,6 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
|
|||
return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static inline bool
|
||||
xfs_mp_drop_writes(struct xfs_mount *mp)
|
||||
{
|
||||
return mp->m_drop_writes;
|
||||
}
|
||||
#else
|
||||
static inline bool
|
||||
xfs_mp_drop_writes(struct xfs_mount *mp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* per-AG block reservation data structures*/
|
||||
enum xfs_ag_resv_type {
|
||||
XFS_AG_RESV_NONE = 0,
|
||||
|
@ -367,12 +351,12 @@ typedef struct xfs_perag {
|
|||
char pagi_init; /* this agi's entry is initialized */
|
||||
char pagf_metadata; /* the agf is preferred to be metadata */
|
||||
char pagi_inodeok; /* The agi is ok for inodes */
|
||||
__uint8_t pagf_levels[XFS_BTNUM_AGF];
|
||||
uint8_t pagf_levels[XFS_BTNUM_AGF];
|
||||
/* # of levels in bno & cnt btree */
|
||||
__uint32_t pagf_flcount; /* count of blocks in freelist */
|
||||
uint32_t pagf_flcount; /* count of blocks in freelist */
|
||||
xfs_extlen_t pagf_freeblks; /* total free blocks */
|
||||
xfs_extlen_t pagf_longest; /* longest free space */
|
||||
__uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
|
||||
uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
|
||||
xfs_agino_t pagi_freecount; /* number of free inodes */
|
||||
xfs_agino_t pagi_count; /* number of allocated inodes */
|
||||
|
||||
|
@ -411,7 +395,7 @@ typedef struct xfs_perag {
|
|||
struct xfs_ag_resv pag_agfl_resv;
|
||||
|
||||
/* reference count */
|
||||
__uint8_t pagf_refcount_level;
|
||||
uint8_t pagf_refcount_level;
|
||||
} xfs_perag_t;
|
||||
|
||||
static inline struct xfs_ag_resv *
|
||||
|
@ -434,7 +418,7 @@ void xfs_buf_hash_destroy(xfs_perag_t *pag);
|
|||
|
||||
extern void xfs_uuid_table_free(void);
|
||||
extern int xfs_log_sbcount(xfs_mount_t *);
|
||||
extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
|
||||
extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
|
||||
extern int xfs_mountfs(xfs_mount_t *mp);
|
||||
extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
|
||||
xfs_agnumber_t *maxagi);
|
||||
|
@ -450,7 +434,7 @@ extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
|
|||
extern int xfs_readsb(xfs_mount_t *, int);
|
||||
extern void xfs_freesb(xfs_mount_t *);
|
||||
extern bool xfs_fs_writable(struct xfs_mount *mp, int level);
|
||||
extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
|
||||
extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t);
|
||||
|
||||
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
|
||||
|
||||
|
|
|
@ -1247,6 +1247,7 @@ xfs_qm_flush_one(
|
|||
struct xfs_dquot *dqp,
|
||||
void *data)
|
||||
{
|
||||
struct xfs_mount *mp = dqp->q_mount;
|
||||
struct list_head *buffer_list = data;
|
||||
struct xfs_buf *bp = NULL;
|
||||
int error = 0;
|
||||
|
@ -1257,7 +1258,32 @@ xfs_qm_flush_one(
|
|||
if (!XFS_DQ_IS_DIRTY(dqp))
|
||||
goto out_unlock;
|
||||
|
||||
xfs_dqflock(dqp);
|
||||
/*
|
||||
* The only way the dquot is already flush locked by the time quotacheck
|
||||
* gets here is if reclaim flushed it before the dqadjust walk dirtied
|
||||
* it for the final time. Quotacheck collects all dquot bufs in the
|
||||
* local delwri queue before dquots are dirtied, so reclaim can't have
|
||||
* possibly queued it for I/O. The only way out is to push the buffer to
|
||||
* cycle the flush lock.
|
||||
*/
|
||||
if (!xfs_dqflock_nowait(dqp)) {
|
||||
/* buf is pinned in-core by delwri list */
|
||||
DEFINE_SINGLE_BUF_MAP(map, dqp->q_blkno,
|
||||
mp->m_quotainfo->qi_dqchunklen);
|
||||
bp = _xfs_buf_find(mp->m_ddev_targp, &map, 1, 0, NULL);
|
||||
if (!bp) {
|
||||
error = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
xfs_buf_unlock(bp);
|
||||
|
||||
xfs_buf_delwri_pushbuf(bp, buffer_list);
|
||||
xfs_buf_rele(bp);
|
||||
|
||||
error = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
error = xfs_qm_dqflush(dqp, &bp);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
|
|
@ -33,7 +33,7 @@ xfs_fill_statvfs_from_dquot(
|
|||
struct kstatfs *statp,
|
||||
struct xfs_dquot *dqp)
|
||||
{
|
||||
__uint64_t limit;
|
||||
uint64_t limit;
|
||||
|
||||
limit = dqp->q_core.d_blk_softlimit ?
|
||||
be64_to_cpu(dqp->q_core.d_blk_softlimit) :
|
||||
|
|
|
@ -269,7 +269,6 @@ xfs_fs_get_nextdqblk(
|
|||
/* ID may be different, so convert back what we got */
|
||||
*qid = make_kqid(current_user_ns(), qid->type, id);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
STATIC int
|
||||
|
|
|
@ -155,6 +155,7 @@
|
|||
int
|
||||
xfs_reflink_find_shared(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_trans *tp,
|
||||
xfs_agnumber_t agno,
|
||||
xfs_agblock_t agbno,
|
||||
xfs_extlen_t aglen,
|
||||
|
@ -166,18 +167,18 @@ xfs_reflink_find_shared(
|
|||
struct xfs_btree_cur *cur;
|
||||
int error;
|
||||
|
||||
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
|
||||
error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
|
||||
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
|
||||
|
||||
error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
|
||||
find_end_of_shared);
|
||||
|
||||
xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
|
||||
|
||||
xfs_buf_relse(agbp);
|
||||
xfs_trans_brelse(tp, agbp);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -217,7 +218,7 @@ xfs_reflink_trim_around_shared(
|
|||
agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock);
|
||||
aglen = irec->br_blockcount;
|
||||
|
||||
error = xfs_reflink_find_shared(ip->i_mount, agno, agbno,
|
||||
error = xfs_reflink_find_shared(ip->i_mount, NULL, agno, agbno,
|
||||
aglen, &fbno, &flen, true);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -1373,8 +1374,8 @@ xfs_reflink_dirty_extents(
|
|||
agbno = XFS_FSB_TO_AGBNO(mp, map[1].br_startblock);
|
||||
aglen = map[1].br_blockcount;
|
||||
|
||||
error = xfs_reflink_find_shared(mp, agno, agbno, aglen,
|
||||
&rbno, &rlen, true);
|
||||
error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
|
||||
aglen, &rbno, &rlen, true);
|
||||
if (error)
|
||||
goto out;
|
||||
if (rbno == NULLAGBLOCK)
|
||||
|
@ -1405,56 +1406,72 @@ xfs_reflink_dirty_extents(
|
|||
return error;
|
||||
}
|
||||
|
||||
/* Does this inode need the reflink flag? */
|
||||
int
|
||||
xfs_reflink_inode_has_shared_extents(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *ip,
|
||||
bool *has_shared)
|
||||
{
|
||||
struct xfs_bmbt_irec got;
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_ifork *ifp;
|
||||
xfs_agnumber_t agno;
|
||||
xfs_agblock_t agbno;
|
||||
xfs_extlen_t aglen;
|
||||
xfs_agblock_t rbno;
|
||||
xfs_extlen_t rlen;
|
||||
xfs_extnum_t idx;
|
||||
bool found;
|
||||
int error;
|
||||
|
||||
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
|
||||
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
||||
error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
*has_shared = false;
|
||||
found = xfs_iext_lookup_extent(ip, ifp, 0, &idx, &got);
|
||||
while (found) {
|
||||
if (isnullstartblock(got.br_startblock) ||
|
||||
got.br_state != XFS_EXT_NORM)
|
||||
goto next;
|
||||
agno = XFS_FSB_TO_AGNO(mp, got.br_startblock);
|
||||
agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock);
|
||||
aglen = got.br_blockcount;
|
||||
|
||||
error = xfs_reflink_find_shared(mp, tp, agno, agbno, aglen,
|
||||
&rbno, &rlen, false);
|
||||
if (error)
|
||||
return error;
|
||||
/* Is there still a shared block here? */
|
||||
if (rbno != NULLAGBLOCK) {
|
||||
*has_shared = true;
|
||||
return 0;
|
||||
}
|
||||
next:
|
||||
found = xfs_iext_get_extent(ifp, ++idx, &got);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Clear the inode reflink flag if there are no shared extents. */
|
||||
int
|
||||
xfs_reflink_clear_inode_flag(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_trans **tpp)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
xfs_fileoff_t fbno;
|
||||
xfs_filblks_t end;
|
||||
xfs_agnumber_t agno;
|
||||
xfs_agblock_t agbno;
|
||||
xfs_extlen_t aglen;
|
||||
xfs_agblock_t rbno;
|
||||
xfs_extlen_t rlen;
|
||||
struct xfs_bmbt_irec map;
|
||||
int nmaps;
|
||||
bool needs_flag;
|
||||
int error = 0;
|
||||
|
||||
ASSERT(xfs_is_reflink_inode(ip));
|
||||
|
||||
fbno = 0;
|
||||
end = XFS_B_TO_FSB(mp, i_size_read(VFS_I(ip)));
|
||||
while (end - fbno > 0) {
|
||||
nmaps = 1;
|
||||
/*
|
||||
* Look for extents in the file. Skip holes, delalloc, or
|
||||
* unwritten extents; they can't be reflinked.
|
||||
*/
|
||||
error = xfs_bmapi_read(ip, fbno, end - fbno, &map, &nmaps, 0);
|
||||
if (error)
|
||||
return error;
|
||||
if (nmaps == 0)
|
||||
break;
|
||||
if (!xfs_bmap_is_real_extent(&map))
|
||||
goto next;
|
||||
|
||||
agno = XFS_FSB_TO_AGNO(mp, map.br_startblock);
|
||||
agbno = XFS_FSB_TO_AGBNO(mp, map.br_startblock);
|
||||
aglen = map.br_blockcount;
|
||||
|
||||
error = xfs_reflink_find_shared(mp, agno, agbno, aglen,
|
||||
&rbno, &rlen, false);
|
||||
if (error)
|
||||
return error;
|
||||
/* Is there still a shared block here? */
|
||||
if (rbno != NULLAGBLOCK)
|
||||
return 0;
|
||||
next:
|
||||
fbno = map.br_startoff + map.br_blockcount;
|
||||
}
|
||||
error = xfs_reflink_inode_has_shared_extents(*tpp, ip, &needs_flag);
|
||||
if (error || needs_flag)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* We didn't find any shared blocks so turn off the reflink flag.
|
||||
|
|
|
@ -20,9 +20,9 @@
|
|||
#ifndef __XFS_REFLINK_H
|
||||
#define __XFS_REFLINK_H 1
|
||||
|
||||
extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno,
|
||||
xfs_extlen_t *flen, bool find_maximal);
|
||||
extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_extlen_t aglen,
|
||||
xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_maximal);
|
||||
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
|
||||
struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
|
||||
|
||||
|
@ -47,6 +47,8 @@ extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
|
|||
extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
|
||||
extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe);
|
||||
extern int xfs_reflink_inode_has_shared_extents(struct xfs_trans *tp,
|
||||
struct xfs_inode *ip, bool *has_shared);
|
||||
extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
|
||||
struct xfs_trans **tpp);
|
||||
extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
|
||||
|
|
|
@ -1256,13 +1256,13 @@ xfs_rtpick_extent(
|
|||
{
|
||||
xfs_rtblock_t b; /* result block */
|
||||
int log2; /* log of sequence number */
|
||||
__uint64_t resid; /* residual after log removed */
|
||||
__uint64_t seq; /* sequence number of file creation */
|
||||
__uint64_t *seqp; /* pointer to seqno in inode */
|
||||
uint64_t resid; /* residual after log removed */
|
||||
uint64_t seq; /* sequence number of file creation */
|
||||
uint64_t *seqp; /* pointer to seqno in inode */
|
||||
|
||||
ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
|
||||
|
||||
seqp = (__uint64_t *)&VFS_I(mp->m_rbmip)->i_atime;
|
||||
seqp = (uint64_t *)&VFS_I(mp->m_rbmip)->i_atime;
|
||||
if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) {
|
||||
mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
|
||||
*seqp = 0;
|
||||
|
|
|
@ -107,6 +107,8 @@ xfs_growfs_rt(
|
|||
/*
|
||||
* From xfs_rtbitmap.c
|
||||
*/
|
||||
int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
|
||||
int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
xfs_rtblock_t start, xfs_extlen_t len, int val,
|
||||
xfs_rtblock_t *new, int *stat);
|
||||
|
@ -143,6 +145,7 @@ int xfs_rtalloc_query_all(struct xfs_trans *tp,
|
|||
# define xfs_growfs_rt(mp,in) (ENOSYS)
|
||||
# define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS)
|
||||
# define xfs_rtalloc_query_all(t,f,p) (ENOSYS)
|
||||
# define xfs_rtbuf_get(m,t,b,i,p) (ENOSYS)
|
||||
static inline int /* error */
|
||||
xfs_rtmount_init(
|
||||
xfs_mount_t *mp) /* file system mount structure */
|
||||
|
|
|
@ -33,9 +33,9 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
|
|||
{
|
||||
int i, j;
|
||||
int len = 0;
|
||||
__uint64_t xs_xstrat_bytes = 0;
|
||||
__uint64_t xs_write_bytes = 0;
|
||||
__uint64_t xs_read_bytes = 0;
|
||||
uint64_t xs_xstrat_bytes = 0;
|
||||
uint64_t xs_write_bytes = 0;
|
||||
uint64_t xs_read_bytes = 0;
|
||||
|
||||
static const struct xstats_entry {
|
||||
char *desc;
|
||||
|
@ -100,7 +100,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
|
|||
void xfs_stats_clearall(struct xfsstats __percpu *stats)
|
||||
{
|
||||
int c;
|
||||
__uint32_t vn_active;
|
||||
uint32_t vn_active;
|
||||
|
||||
xfs_notice(NULL, "Clearing xfsstats");
|
||||
for_each_possible_cpu(c) {
|
||||
|
|
|
@ -54,125 +54,125 @@ enum {
|
|||
*/
|
||||
struct __xfsstats {
|
||||
# define XFSSTAT_END_EXTENT_ALLOC 4
|
||||
__uint32_t xs_allocx;
|
||||
__uint32_t xs_allocb;
|
||||
__uint32_t xs_freex;
|
||||
__uint32_t xs_freeb;
|
||||
uint32_t xs_allocx;
|
||||
uint32_t xs_allocb;
|
||||
uint32_t xs_freex;
|
||||
uint32_t xs_freeb;
|
||||
# define XFSSTAT_END_ALLOC_BTREE (XFSSTAT_END_EXTENT_ALLOC+4)
|
||||
__uint32_t xs_abt_lookup;
|
||||
__uint32_t xs_abt_compare;
|
||||
__uint32_t xs_abt_insrec;
|
||||
__uint32_t xs_abt_delrec;
|
||||
uint32_t xs_abt_lookup;
|
||||
uint32_t xs_abt_compare;
|
||||
uint32_t xs_abt_insrec;
|
||||
uint32_t xs_abt_delrec;
|
||||
# define XFSSTAT_END_BLOCK_MAPPING (XFSSTAT_END_ALLOC_BTREE+7)
|
||||
__uint32_t xs_blk_mapr;
|
||||
__uint32_t xs_blk_mapw;
|
||||
__uint32_t xs_blk_unmap;
|
||||
__uint32_t xs_add_exlist;
|
||||
__uint32_t xs_del_exlist;
|
||||
__uint32_t xs_look_exlist;
|
||||
__uint32_t xs_cmp_exlist;
|
||||
uint32_t xs_blk_mapr;
|
||||
uint32_t xs_blk_mapw;
|
||||
uint32_t xs_blk_unmap;
|
||||
uint32_t xs_add_exlist;
|
||||
uint32_t xs_del_exlist;
|
||||
uint32_t xs_look_exlist;
|
||||
uint32_t xs_cmp_exlist;
|
||||
# define XFSSTAT_END_BLOCK_MAP_BTREE (XFSSTAT_END_BLOCK_MAPPING+4)
|
||||
__uint32_t xs_bmbt_lookup;
|
||||
__uint32_t xs_bmbt_compare;
|
||||
__uint32_t xs_bmbt_insrec;
|
||||
__uint32_t xs_bmbt_delrec;
|
||||
uint32_t xs_bmbt_lookup;
|
||||
uint32_t xs_bmbt_compare;
|
||||
uint32_t xs_bmbt_insrec;
|
||||
uint32_t xs_bmbt_delrec;
|
||||
# define XFSSTAT_END_DIRECTORY_OPS (XFSSTAT_END_BLOCK_MAP_BTREE+4)
|
||||
__uint32_t xs_dir_lookup;
|
||||
__uint32_t xs_dir_create;
|
||||
__uint32_t xs_dir_remove;
|
||||
__uint32_t xs_dir_getdents;
|
||||
uint32_t xs_dir_lookup;
|
||||
uint32_t xs_dir_create;
|
||||
uint32_t xs_dir_remove;
|
||||
uint32_t xs_dir_getdents;
|
||||
# define XFSSTAT_END_TRANSACTIONS (XFSSTAT_END_DIRECTORY_OPS+3)
|
||||
__uint32_t xs_trans_sync;
|
||||
__uint32_t xs_trans_async;
|
||||
__uint32_t xs_trans_empty;
|
||||
uint32_t xs_trans_sync;
|
||||
uint32_t xs_trans_async;
|
||||
uint32_t xs_trans_empty;
|
||||
# define XFSSTAT_END_INODE_OPS (XFSSTAT_END_TRANSACTIONS+7)
|
||||
__uint32_t xs_ig_attempts;
|
||||
__uint32_t xs_ig_found;
|
||||
__uint32_t xs_ig_frecycle;
|
||||
__uint32_t xs_ig_missed;
|
||||
__uint32_t xs_ig_dup;
|
||||
__uint32_t xs_ig_reclaims;
|
||||
__uint32_t xs_ig_attrchg;
|
||||
uint32_t xs_ig_attempts;
|
||||
uint32_t xs_ig_found;
|
||||
uint32_t xs_ig_frecycle;
|
||||
uint32_t xs_ig_missed;
|
||||
uint32_t xs_ig_dup;
|
||||
uint32_t xs_ig_reclaims;
|
||||
uint32_t xs_ig_attrchg;
|
||||
# define XFSSTAT_END_LOG_OPS (XFSSTAT_END_INODE_OPS+5)
|
||||
__uint32_t xs_log_writes;
|
||||
__uint32_t xs_log_blocks;
|
||||
__uint32_t xs_log_noiclogs;
|
||||
__uint32_t xs_log_force;
|
||||
__uint32_t xs_log_force_sleep;
|
||||
uint32_t xs_log_writes;
|
||||
uint32_t xs_log_blocks;
|
||||
uint32_t xs_log_noiclogs;
|
||||
uint32_t xs_log_force;
|
||||
uint32_t xs_log_force_sleep;
|
||||
# define XFSSTAT_END_TAIL_PUSHING (XFSSTAT_END_LOG_OPS+10)
|
||||
__uint32_t xs_try_logspace;
|
||||
__uint32_t xs_sleep_logspace;
|
||||
__uint32_t xs_push_ail;
|
||||
__uint32_t xs_push_ail_success;
|
||||
__uint32_t xs_push_ail_pushbuf;
|
||||
__uint32_t xs_push_ail_pinned;
|
||||
__uint32_t xs_push_ail_locked;
|
||||
__uint32_t xs_push_ail_flushing;
|
||||
__uint32_t xs_push_ail_restarts;
|
||||
__uint32_t xs_push_ail_flush;
|
||||
uint32_t xs_try_logspace;
|
||||
uint32_t xs_sleep_logspace;
|
||||
uint32_t xs_push_ail;
|
||||
uint32_t xs_push_ail_success;
|
||||
uint32_t xs_push_ail_pushbuf;
|
||||
uint32_t xs_push_ail_pinned;
|
||||
uint32_t xs_push_ail_locked;
|
||||
uint32_t xs_push_ail_flushing;
|
||||
uint32_t xs_push_ail_restarts;
|
||||
uint32_t xs_push_ail_flush;
|
||||
# define XFSSTAT_END_WRITE_CONVERT (XFSSTAT_END_TAIL_PUSHING+2)
|
||||
__uint32_t xs_xstrat_quick;
|
||||
__uint32_t xs_xstrat_split;
|
||||
uint32_t xs_xstrat_quick;
|
||||
uint32_t xs_xstrat_split;
|
||||
# define XFSSTAT_END_READ_WRITE_OPS (XFSSTAT_END_WRITE_CONVERT+2)
|
||||
__uint32_t xs_write_calls;
|
||||
__uint32_t xs_read_calls;
|
||||
uint32_t xs_write_calls;
|
||||
uint32_t xs_read_calls;
|
||||
# define XFSSTAT_END_ATTRIBUTE_OPS (XFSSTAT_END_READ_WRITE_OPS+4)
|
||||
__uint32_t xs_attr_get;
|
||||
__uint32_t xs_attr_set;
|
||||
__uint32_t xs_attr_remove;
|
||||
__uint32_t xs_attr_list;
|
||||
uint32_t xs_attr_get;
|
||||
uint32_t xs_attr_set;
|
||||
uint32_t xs_attr_remove;
|
||||
uint32_t xs_attr_list;
|
||||
# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_ATTRIBUTE_OPS+3)
|
||||
__uint32_t xs_iflush_count;
|
||||
__uint32_t xs_icluster_flushcnt;
|
||||
__uint32_t xs_icluster_flushinode;
|
||||
uint32_t xs_iflush_count;
|
||||
uint32_t xs_icluster_flushcnt;
|
||||
uint32_t xs_icluster_flushinode;
|
||||
# define XFSSTAT_END_VNODE_OPS (XFSSTAT_END_INODE_CLUSTER+8)
|
||||
__uint32_t vn_active; /* # vnodes not on free lists */
|
||||
__uint32_t vn_alloc; /* # times vn_alloc called */
|
||||
__uint32_t vn_get; /* # times vn_get called */
|
||||
__uint32_t vn_hold; /* # times vn_hold called */
|
||||
__uint32_t vn_rele; /* # times vn_rele called */
|
||||
__uint32_t vn_reclaim; /* # times vn_reclaim called */
|
||||
__uint32_t vn_remove; /* # times vn_remove called */
|
||||
__uint32_t vn_free; /* # times vn_free called */
|
||||
uint32_t vn_active; /* # vnodes not on free lists */
|
||||
uint32_t vn_alloc; /* # times vn_alloc called */
|
||||
uint32_t vn_get; /* # times vn_get called */
|
||||
uint32_t vn_hold; /* # times vn_hold called */
|
||||
uint32_t vn_rele; /* # times vn_rele called */
|
||||
uint32_t vn_reclaim; /* # times vn_reclaim called */
|
||||
uint32_t vn_remove; /* # times vn_remove called */
|
||||
uint32_t vn_free; /* # times vn_free called */
|
||||
#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
|
||||
__uint32_t xb_get;
|
||||
__uint32_t xb_create;
|
||||
__uint32_t xb_get_locked;
|
||||
__uint32_t xb_get_locked_waited;
|
||||
__uint32_t xb_busy_locked;
|
||||
__uint32_t xb_miss_locked;
|
||||
__uint32_t xb_page_retries;
|
||||
__uint32_t xb_page_found;
|
||||
__uint32_t xb_get_read;
|
||||
uint32_t xb_get;
|
||||
uint32_t xb_create;
|
||||
uint32_t xb_get_locked;
|
||||
uint32_t xb_get_locked_waited;
|
||||
uint32_t xb_busy_locked;
|
||||
uint32_t xb_miss_locked;
|
||||
uint32_t xb_page_retries;
|
||||
uint32_t xb_page_found;
|
||||
uint32_t xb_get_read;
|
||||
/* Version 2 btree counters */
|
||||
#define XFSSTAT_END_ABTB_V2 (XFSSTAT_END_BUF + __XBTS_MAX)
|
||||
__uint32_t xs_abtb_2[__XBTS_MAX];
|
||||
uint32_t xs_abtb_2[__XBTS_MAX];
|
||||
#define XFSSTAT_END_ABTC_V2 (XFSSTAT_END_ABTB_V2 + __XBTS_MAX)
|
||||
__uint32_t xs_abtc_2[__XBTS_MAX];
|
||||
uint32_t xs_abtc_2[__XBTS_MAX];
|
||||
#define XFSSTAT_END_BMBT_V2 (XFSSTAT_END_ABTC_V2 + __XBTS_MAX)
|
||||
__uint32_t xs_bmbt_2[__XBTS_MAX];
|
||||
uint32_t xs_bmbt_2[__XBTS_MAX];
|
||||
#define XFSSTAT_END_IBT_V2 (XFSSTAT_END_BMBT_V2 + __XBTS_MAX)
|
||||
__uint32_t xs_ibt_2[__XBTS_MAX];
|
||||
uint32_t xs_ibt_2[__XBTS_MAX];
|
||||
#define XFSSTAT_END_FIBT_V2 (XFSSTAT_END_IBT_V2 + __XBTS_MAX)
|
||||
__uint32_t xs_fibt_2[__XBTS_MAX];
|
||||
uint32_t xs_fibt_2[__XBTS_MAX];
|
||||
#define XFSSTAT_END_RMAP_V2 (XFSSTAT_END_FIBT_V2 + __XBTS_MAX)
|
||||
__uint32_t xs_rmap_2[__XBTS_MAX];
|
||||
uint32_t xs_rmap_2[__XBTS_MAX];
|
||||
#define XFSSTAT_END_REFCOUNT (XFSSTAT_END_RMAP_V2 + __XBTS_MAX)
|
||||
__uint32_t xs_refcbt_2[__XBTS_MAX];
|
||||
uint32_t xs_refcbt_2[__XBTS_MAX];
|
||||
#define XFSSTAT_END_XQMSTAT (XFSSTAT_END_REFCOUNT + 6)
|
||||
__uint32_t xs_qm_dqreclaims;
|
||||
__uint32_t xs_qm_dqreclaim_misses;
|
||||
__uint32_t xs_qm_dquot_dups;
|
||||
__uint32_t xs_qm_dqcachemisses;
|
||||
__uint32_t xs_qm_dqcachehits;
|
||||
__uint32_t xs_qm_dqwants;
|
||||
uint32_t xs_qm_dqreclaims;
|
||||
uint32_t xs_qm_dqreclaim_misses;
|
||||
uint32_t xs_qm_dquot_dups;
|
||||
uint32_t xs_qm_dqcachemisses;
|
||||
uint32_t xs_qm_dqcachehits;
|
||||
uint32_t xs_qm_dqwants;
|
||||
#define XFSSTAT_END_QM (XFSSTAT_END_XQMSTAT+2)
|
||||
__uint32_t xs_qm_dquot;
|
||||
__uint32_t xs_qm_dquot_unused;
|
||||
uint32_t xs_qm_dquot;
|
||||
uint32_t xs_qm_dquot_unused;
|
||||
/* Extra precision counters */
|
||||
__uint64_t xs_xstrat_bytes;
|
||||
__uint64_t xs_write_bytes;
|
||||
__uint64_t xs_read_bytes;
|
||||
uint64_t xs_xstrat_bytes;
|
||||
uint64_t xs_write_bytes;
|
||||
uint64_t xs_read_bytes;
|
||||
};
|
||||
|
||||
struct xfsstats {
|
||||
|
@ -186,7 +186,7 @@ struct xfsstats {
|
|||
* simple wrapper for getting the array index of s struct member offset
|
||||
*/
|
||||
#define XFS_STATS_CALC_INDEX(member) \
|
||||
(offsetof(struct __xfsstats, member) / (int)sizeof(__uint32_t))
|
||||
(offsetof(struct __xfsstats, member) / (int)sizeof(uint32_t))
|
||||
|
||||
|
||||
int xfs_stats_format(struct xfsstats __percpu *stats, char *buf);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue