2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2005-11-02 11:58:39 +08:00
|
|
|
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2005-11-02 11:58:39 +08:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
2005-04-17 06:20:36 +08:00
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
2005-11-02 11:58:39 +08:00
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2005-11-02 11:58:39 +08:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#ifndef __XFS_ALLOC_H__
|
|
|
|
#define __XFS_ALLOC_H__
|
|
|
|
|
|
|
|
struct xfs_buf;
|
2011-01-07 21:02:04 +08:00
|
|
|
struct xfs_btree_cur;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct xfs_mount;
|
|
|
|
struct xfs_perag;
|
|
|
|
struct xfs_trans;
|
|
|
|
|
2012-03-22 13:15:07 +08:00
|
|
|
extern struct workqueue_struct *xfs_alloc_wq;
|
|
|
|
|
2018-03-07 09:08:32 +08:00
|
|
|
unsigned int xfs_agfl_size(struct xfs_mount *mp);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Freespace allocation types. Argument to xfs_alloc_[v]extent.
|
|
|
|
*/
|
2010-06-24 09:49:12 +08:00
|
|
|
#define XFS_ALLOCTYPE_FIRST_AG 0x02 /* ... start at ag 0 */
|
|
|
|
#define XFS_ALLOCTYPE_THIS_AG 0x08 /* anywhere in this a.g. */
|
|
|
|
#define XFS_ALLOCTYPE_START_BNO 0x10 /* near this block else anywhere */
|
|
|
|
#define XFS_ALLOCTYPE_NEAR_BNO 0x20 /* in this a.g. and near this block */
|
|
|
|
#define XFS_ALLOCTYPE_THIS_BNO 0x40 /* at exactly this block */
|
|
|
|
|
|
|
|
/* this should become an enum again when the tracing code is fixed */
|
|
|
|
typedef unsigned int xfs_alloctype_t;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-12-15 07:14:59 +08:00
|
|
|
#define XFS_ALLOC_TYPES \
|
|
|
|
{ XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \
|
|
|
|
{ XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \
|
|
|
|
{ XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \
|
|
|
|
{ XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \
|
|
|
|
{ XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" }
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Flags for xfs_alloc_fix_freelist.
|
|
|
|
*/
|
|
|
|
#define XFS_ALLOC_FLAG_TRYLOCK 0x00000001 /* use trylock for buffer locking */
|
2006-06-09 12:55:18 +08:00
|
|
|
#define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
|
2016-08-03 10:19:53 +08:00
|
|
|
#define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
|
|
|
|
#define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
|
2017-01-10 05:44:30 +08:00
|
|
|
#define XFS_ALLOC_FLAG_CHECK 0x00000010 /* test only, don't modify args */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Argument structure for xfs_alloc routines.
|
|
|
|
* This is turned into a structure to avoid having 20 arguments passed
|
|
|
|
* down several levels of the stack.
|
|
|
|
*/
|
|
|
|
typedef struct xfs_alloc_arg {
|
|
|
|
struct xfs_trans *tp; /* transaction pointer */
|
|
|
|
struct xfs_mount *mp; /* file system mount point */
|
|
|
|
struct xfs_buf *agbp; /* buffer for a.g. freelist header */
|
|
|
|
struct xfs_perag *pag; /* per-ag struct for this agno */
|
2015-11-03 09:27:22 +08:00
|
|
|
struct xfs_inode *ip; /* for userdata zeroing method */
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_fsblock_t fsbno; /* file system block number */
|
|
|
|
xfs_agnumber_t agno; /* allocation group number */
|
|
|
|
xfs_agblock_t agbno; /* allocation group-relative block # */
|
|
|
|
xfs_extlen_t minlen; /* minimum size of extent */
|
|
|
|
xfs_extlen_t maxlen; /* maximum size of extent */
|
|
|
|
xfs_extlen_t mod; /* mod value for extent size */
|
|
|
|
xfs_extlen_t prod; /* prod value for extent size */
|
|
|
|
xfs_extlen_t minleft; /* min blocks must be left after us */
|
|
|
|
xfs_extlen_t total; /* total blocks needed in xaction */
|
|
|
|
xfs_extlen_t alignment; /* align answer to multiple of this */
|
|
|
|
xfs_extlen_t minalignslop; /* slop for minlen+alignment calcs */
|
xfs: support min/max agbno args in block allocator
The block allocator supports various arguments to tweak block allocation
behavior and set allocation requirements. The sparse inode chunk feature
introduces a new requirement not supported by the current arguments.
Sparse inode allocations must convert or merge into an inode record that
describes a fixed length chunk (64 inodes x inodesize). Full inode chunk
allocations by definition always result in valid inode records. Sparse
chunk allocations are smaller and the associated records can refer to
blocks not owned by the inode chunk. This model can result in invalid
inode records in certain cases.
For example, if a sparse allocation occurs near the start of an AG, the
aligned inode record for that chunk might refer to agbno 0. If an
allocation occurs towards the end of the AG and the AG size is not
aligned, the inode record could refer to blocks beyond the end of the
AG. While neither of these scenarios directly result in corruption, they
both insert invalid inode records and at minimum cause repair to
complain, are unlikely to merge into full chunks over time and set land
mines for other areas of code.
To guarantee sparse inode chunk allocation creates valid inode records,
support the ability to specify an agbno range limit for
XFS_ALLOCTYPE_NEAR_BNO block allocations. The min/max agbno's are
specified in the allocation arguments and limit the block allocation
algorithms to that range. The starting 'agbno' hint is clamped to the
range if the specified agbno is out of range. If no sufficient extent is
available within the range, the allocation fails. For backwards
compatibility, the min/max fields can be initialized to 0 to disable
range limiting (e.g., equivalent to min=0,max=agsize).
Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2015-05-29 06:53:00 +08:00
|
|
|
xfs_agblock_t min_agbno; /* set an agbno range for NEAR allocs */
|
|
|
|
xfs_agblock_t max_agbno; /* ... */
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_extlen_t len; /* output: actual size of extent */
|
|
|
|
xfs_alloctype_t type; /* allocation type XFS_ALLOCTYPE_... */
|
|
|
|
xfs_alloctype_t otype; /* original allocation type */
|
xfs: remote attribute blocks aren't really userdata
When adding a new remote attribute, we write the attribute to the
new extent before the allocation transaction is committed. This
means we cannot reuse busy extents as that violates crash
consistency semantics. Hence we currently treat remote attribute
extent allocation like userdata because it has the same overwrite
ordering constraints as userdata.
Unfortunately, this also allows the allocator to incorrectly apply
extent size hints to the remote attribute extent allocation. This
results in interesting failures, such as transaction block
reservation overruns and in-memory inode attribute fork corruption.
To fix this, we need to separate the busy extent reuse configuration
from the userdata configuration. This changes the definition of
XFS_BMAPI_METADATA slightly - it now means that allocation is
metadata and reuse of busy extents is acceptible due to the metadata
ordering semantics of the journal. If this flag is not set, it
means the allocation is that has unordered data writeback, and hence
busy extent reuse is not allowed. It no longer implies the
allocation is for user data, just that the data write will not be
strictly ordered. This matches the semantics for both user data
and remote attribute block allocation.
As such, This patch changes the "userdata" field to a "datatype"
field, and adds a "no busy reuse" flag to the field.
When we detect an unordered data extent allocation, we immediately set
the no reuse flag. We then set the "user data" flags based on the
inode fork we are allocating the extent to. Hence we only set
userdata flags on data fork allocations now and consider attribute
fork remote extents to be an unordered metadata extent.
The result is that remote attribute extents now have the expected
allocation semantics, and the data fork allocation behaviour is
completely unchanged.
It should be noted that there may be other ways to fix this (e.g.
use ordered metadata buffers for the remote attribute extent data
write) but they are more invasive and difficult to validate both
from a design and implementation POV. Hence this patch takes the
simple, obvious route to fixing the problem...
Reported-and-tested-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-26 06:21:28 +08:00
|
|
|
int datatype; /* mask defining data type treatment */
|
2005-04-17 06:20:36 +08:00
|
|
|
char wasdel; /* set if allocation was prev delayed */
|
|
|
|
char wasfromfl; /* set if allocation is from freelist */
|
2006-06-09 12:55:18 +08:00
|
|
|
xfs_fsblock_t firstblock; /* io first block allocated */
|
2016-08-03 09:33:42 +08:00
|
|
|
struct xfs_owner_info oinfo; /* owner of blocks being allocated */
|
xfs: set up per-AG free space reservations
One unfortunate quirk of the reference count and reverse mapping
btrees -- they can expand in size when blocks are written to *other*
allocation groups if, say, one large extent becomes a lot of tiny
extents. Since we don't want to start throwing errors in the middle
of CoWing, we need to reserve some blocks to handle future expansion.
The transaction block reservation counters aren't sufficient here
because we have to have a reserve of blocks in every AG, not just
somewhere in the filesystem.
Therefore, create two per-AG block reservation pools. One feeds the
AGFL so that rmapbt expansion always succeeds, and the other feeds all
other metadata so that refcountbt expansion never fails.
Use the count of how many reserved blocks we need to have on hand to
create a virtual reservation in the AG. Through selective clamping of
the maximum length of allocation requests and of the length of the
longest free extent, we can make it look like there's less free space
in the AG unless the reservation owner is asking for blocks.
In other words, play some accounting tricks in-core to make sure that
we always have blocks available. On the plus side, there's nothing to
clean up if we crash, which is contrast to the strategy that the rough
draft used (actually removing extents from the freespace btrees).
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-19 08:30:52 +08:00
|
|
|
enum xfs_ag_resv_type resv; /* block reservation to use */
|
2005-04-17 06:20:36 +08:00
|
|
|
} xfs_alloc_arg_t;
|
|
|
|
|
|
|
|
/*
|
xfs: remote attribute blocks aren't really userdata
When adding a new remote attribute, we write the attribute to the
new extent before the allocation transaction is committed. This
means we cannot reuse busy extents as that violates crash
consistency semantics. Hence we currently treat remote attribute
extent allocation like userdata because it has the same overwrite
ordering constraints as userdata.
Unfortunately, this also allows the allocator to incorrectly apply
extent size hints to the remote attribute extent allocation. This
results in interesting failures, such as transaction block
reservation overruns and in-memory inode attribute fork corruption.
To fix this, we need to separate the busy extent reuse configuration
from the userdata configuration. This changes the definition of
XFS_BMAPI_METADATA slightly - it now means that allocation is
metadata and reuse of busy extents is acceptible due to the metadata
ordering semantics of the journal. If this flag is not set, it
means the allocation is that has unordered data writeback, and hence
busy extent reuse is not allowed. It no longer implies the
allocation is for user data, just that the data write will not be
strictly ordered. This matches the semantics for both user data
and remote attribute block allocation.
As such, This patch changes the "userdata" field to a "datatype"
field, and adds a "no busy reuse" flag to the field.
When we detect an unordered data extent allocation, we immediately set
the no reuse flag. We then set the "user data" flags based on the
inode fork we are allocating the extent to. Hence we only set
userdata flags on data fork allocations now and consider attribute
fork remote extents to be an unordered metadata extent.
The result is that remote attribute extents now have the expected
allocation semantics, and the data fork allocation behaviour is
completely unchanged.
It should be noted that there may be other ways to fix this (e.g.
use ordered metadata buffers for the remote attribute extent data
write) but they are more invasive and difficult to validate both
from a design and implementation POV. Hence this patch takes the
simple, obvious route to fixing the problem...
Reported-and-tested-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-26 06:21:28 +08:00
|
|
|
* Defines for datatype
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2015-11-03 09:27:22 +08:00
|
|
|
#define XFS_ALLOC_USERDATA (1 << 0)/* allocation is for user data*/
|
|
|
|
#define XFS_ALLOC_INITIAL_USER_DATA (1 << 1)/* special case start of file */
|
|
|
|
#define XFS_ALLOC_USERDATA_ZERO (1 << 2)/* zero extent on allocation */
|
xfs: remote attribute blocks aren't really userdata
When adding a new remote attribute, we write the attribute to the
new extent before the allocation transaction is committed. This
means we cannot reuse busy extents as that violates crash
consistency semantics. Hence we currently treat remote attribute
extent allocation like userdata because it has the same overwrite
ordering constraints as userdata.
Unfortunately, this also allows the allocator to incorrectly apply
extent size hints to the remote attribute extent allocation. This
results in interesting failures, such as transaction block
reservation overruns and in-memory inode attribute fork corruption.
To fix this, we need to separate the busy extent reuse configuration
from the userdata configuration. This changes the definition of
XFS_BMAPI_METADATA slightly - it now means that allocation is
metadata and reuse of busy extents is acceptible due to the metadata
ordering semantics of the journal. If this flag is not set, it
means the allocation is that has unordered data writeback, and hence
busy extent reuse is not allowed. It no longer implies the
allocation is for user data, just that the data write will not be
strictly ordered. This matches the semantics for both user data
and remote attribute block allocation.
As such, This patch changes the "userdata" field to a "datatype"
field, and adds a "no busy reuse" flag to the field.
When we detect an unordered data extent allocation, we immediately set
the no reuse flag. We then set the "user data" flags based on the
inode fork we are allocating the extent to. Hence we only set
userdata flags on data fork allocations now and consider attribute
fork remote extents to be an unordered metadata extent.
The result is that remote attribute extents now have the expected
allocation semantics, and the data fork allocation behaviour is
completely unchanged.
It should be noted that there may be other ways to fix this (e.g.
use ordered metadata buffers for the remote attribute extent data
write) but they are more invasive and difficult to validate both
from a design and implementation POV. Hence this patch takes the
simple, obvious route to fixing the problem...
Reported-and-tested-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-26 06:21:28 +08:00
|
|
|
#define XFS_ALLOC_NOBUSY (1 << 3)/* Busy extents not allowed */
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
xfs_alloc_is_userdata(int datatype)
|
|
|
|
{
|
|
|
|
return (datatype & ~XFS_ALLOC_NOBUSY) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
xfs_alloc_allow_busy_reuse(int datatype)
|
|
|
|
{
|
|
|
|
return (datatype & XFS_ALLOC_NOBUSY) == 0;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-08-03 09:38:24 +08:00
|
|
|
/* freespace limit calculations */
|
|
|
|
#define XFS_ALLOC_AGFL_RESERVE 4
|
|
|
|
unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
|
|
|
|
unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
|
|
|
|
|
2018-04-07 01:09:42 +08:00
|
|
|
xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_perag *pag,
|
|
|
|
xfs_extlen_t need, xfs_extlen_t reserved);
|
2015-06-22 08:13:30 +08:00
|
|
|
unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
|
2009-03-16 15:29:46 +08:00
|
|
|
struct xfs_perag *pag);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute and fill in value of m_ag_maxlevels.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_alloc_compute_maxlevels(
|
|
|
|
struct xfs_mount *mp); /* file system mount structure */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a block from the freelist.
|
|
|
|
* Returns with the buffer for the block gotten.
|
|
|
|
*/
|
|
|
|
int /* error */
|
|
|
|
xfs_alloc_get_freelist(
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
struct xfs_buf *agbp, /* buffer containing the agf structure */
|
[XFS] Lazy Superblock Counters
When we have a couple of hundred transactions on the fly at once, they all
typically modify the on disk superblock in some way.
create/unclink/mkdir/rmdir modify inode counts, allocation/freeing modify
free block counts.
When these counts are modified in a transaction, they must eventually lock
the superblock buffer and apply the mods. The buffer then remains locked
until the transaction is committed into the incore log buffer. The result
of this is that with enough transactions on the fly the incore superblock
buffer becomes a bottleneck.
The result of contention on the incore superblock buffer is that
transaction rates fall - the more pressure that is put on the superblock
buffer, the slower things go.
The key to removing the contention is to not require the superblock fields
in question to be locked. We do that by not marking the superblock dirty
in the transaction. IOWs, we modify the incore superblock but do not
modify the cached superblock buffer. In short, we do not log superblock
modifications to critical fields in the superblock on every transaction.
In fact we only do it just before we write the superblock to disk every
sync period or just before unmount.
This creates an interesting problem - if we don't log or write out the
fields in every transaction, then how do the values get recovered after a
crash? the answer is simple - we keep enough duplicate, logged information
in other structures that we can reconstruct the correct count after log
recovery has been performed.
It is the AGF and AGI structures that contain the duplicate information;
after recovery, we walk every AGI and AGF and sum their individual
counters to get the correct value, and we do a transaction into the log to
correct them. An optimisation of this is that if we have a clean unmount
record, we know the value in the superblock is correct, so we can avoid
the summation walk under normal conditions and so mount/recovery times do
not change under normal operation.
One wrinkle that was discovered during development was that the blocks
used in the freespace btrees are never accounted for in the AGF counters.
This was once a valid optimisation to make; when the filesystem is full,
the free space btrees are empty and consume no space. Hence when it
matters, the "accounting" is correct. But that means the when we do the
AGF summations, we would not have a correct count and xfs_check would
complain. Hence a new counter was added to track the number of blocks used
by the free space btrees. This is an *on-disk format change*.
As a result of this, lazy superblock counters are a mkfs option and at the
moment on linux there is no way to convert an old filesystem. This is
possible - xfs_db can be used to twiddle the right bits and then
xfs_repair will do the format conversion for you. Similarly, you can
convert backwards as well. At some point we'll add functionality to
xfs_admin to do the bit twiddling easily....
SGI-PV: 964999
SGI-Modid: xfs-linux-melb:xfs-kern:28652a
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Tim Shimmin <tes@sgi.com>
2007-05-24 13:26:31 +08:00
|
|
|
xfs_agblock_t *bnop, /* block address retrieved from freelist */
|
|
|
|
int btreeblk); /* destination is a AGF btree */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Log the given fields from the agf structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_alloc_log_agf(
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
struct xfs_buf *bp, /* buffer for a.g. freelist header */
|
|
|
|
int fields);/* mask of fields to be logged (XFS_AGF_...) */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interface for inode allocation to force the pag data to be initialized.
|
|
|
|
*/
|
|
|
|
int /* error */
|
|
|
|
xfs_alloc_pagf_init(
|
|
|
|
struct xfs_mount *mp, /* file system mount structure */
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
xfs_agnumber_t agno, /* allocation group number */
|
|
|
|
int flags); /* XFS_ALLOC_FLAGS_... */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put the block on the freelist for the allocation group.
|
|
|
|
*/
|
|
|
|
int /* error */
|
|
|
|
xfs_alloc_put_freelist(
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
struct xfs_buf *agbp, /* buffer for a.g. freelist header */
|
|
|
|
struct xfs_buf *agflbp,/* buffer for a.g. free block array */
|
[XFS] Lazy Superblock Counters
When we have a couple of hundred transactions on the fly at once, they all
typically modify the on disk superblock in some way.
create/unclink/mkdir/rmdir modify inode counts, allocation/freeing modify
free block counts.
When these counts are modified in a transaction, they must eventually lock
the superblock buffer and apply the mods. The buffer then remains locked
until the transaction is committed into the incore log buffer. The result
of this is that with enough transactions on the fly the incore superblock
buffer becomes a bottleneck.
The result of contention on the incore superblock buffer is that
transaction rates fall - the more pressure that is put on the superblock
buffer, the slower things go.
The key to removing the contention is to not require the superblock fields
in question to be locked. We do that by not marking the superblock dirty
in the transaction. IOWs, we modify the incore superblock but do not
modify the cached superblock buffer. In short, we do not log superblock
modifications to critical fields in the superblock on every transaction.
In fact we only do it just before we write the superblock to disk every
sync period or just before unmount.
This creates an interesting problem - if we don't log or write out the
fields in every transaction, then how do the values get recovered after a
crash? the answer is simple - we keep enough duplicate, logged information
in other structures that we can reconstruct the correct count after log
recovery has been performed.
It is the AGF and AGI structures that contain the duplicate information;
after recovery, we walk every AGI and AGF and sum their individual
counters to get the correct value, and we do a transaction into the log to
correct them. An optimisation of this is that if we have a clean unmount
record, we know the value in the superblock is correct, so we can avoid
the summation walk under normal conditions and so mount/recovery times do
not change under normal operation.
One wrinkle that was discovered during development was that the blocks
used in the freespace btrees are never accounted for in the AGF counters.
This was once a valid optimisation to make; when the filesystem is full,
the free space btrees are empty and consume no space. Hence when it
matters, the "accounting" is correct. But that means the when we do the
AGF summations, we would not have a correct count and xfs_check would
complain. Hence a new counter was added to track the number of blocks used
by the free space btrees. This is an *on-disk format change*.
As a result of this, lazy superblock counters are a mkfs option and at the
moment on linux there is no way to convert an old filesystem. This is
possible - xfs_db can be used to twiddle the right bits and then
xfs_repair will do the format conversion for you. Similarly, you can
convert backwards as well. At some point we'll add functionality to
xfs_admin to do the bit twiddling easily....
SGI-PV: 964999
SGI-Modid: xfs-linux-melb:xfs-kern:28652a
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Tim Shimmin <tes@sgi.com>
2007-05-24 13:26:31 +08:00
|
|
|
xfs_agblock_t bno, /* block being freed */
|
|
|
|
int btreeblk); /* owner was a AGF btree */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read in the allocation group header (free/alloc section).
|
|
|
|
*/
|
|
|
|
int /* error */
|
|
|
|
xfs_alloc_read_agf(
|
|
|
|
struct xfs_mount *mp, /* mount point structure */
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
xfs_agnumber_t agno, /* allocation group number */
|
|
|
|
int flags, /* XFS_ALLOC_FLAG_... */
|
|
|
|
struct xfs_buf **bpp); /* buffer for the ag freelist header */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an extent (variable-size).
|
|
|
|
*/
|
|
|
|
int /* error */
|
|
|
|
xfs_alloc_vextent(
|
|
|
|
xfs_alloc_arg_t *args); /* allocation argument structure */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free an extent.
|
|
|
|
*/
|
|
|
|
int /* error */
|
|
|
|
xfs_free_extent(
|
2016-08-03 09:33:42 +08:00
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
xfs_fsblock_t bno, /* starting block number of extent */
|
|
|
|
xfs_extlen_t len, /* length of extent */
|
xfs: set up per-AG free space reservations
One unfortunate quirk of the reference count and reverse mapping
btrees -- they can expand in size when blocks are written to *other*
allocation groups if, say, one large extent becomes a lot of tiny
extents. Since we don't want to start throwing errors in the middle
of CoWing, we need to reserve some blocks to handle future expansion.
The transaction block reservation counters aren't sufficient here
because we have to have a reserve of blocks in every AG, not just
somewhere in the filesystem.
Therefore, create two per-AG block reservation pools. One feeds the
AGFL so that rmapbt expansion always succeeds, and the other feeds all
other metadata so that refcountbt expansion never fails.
Use the count of how many reserved blocks we need to have on hand to
create a virtual reservation in the AG. Through selective clamping of
the maximum length of allocation requests and of the length of the
longest free extent, we can make it look like there's less free space
in the AG unless the reservation owner is asking for blocks.
In other words, play some accounting tricks in-core to make sure that
we always have blocks available. On the plus side, there's nothing to
clean up if we crash, which is contrast to the strategy that the rough
draft used (actually removing extents from the freespace btrees).
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
2016-09-19 08:30:52 +08:00
|
|
|
struct xfs_owner_info *oinfo, /* extent owner */
|
|
|
|
enum xfs_ag_resv_type type); /* block reservation type */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-01-17 10:52:12 +08:00
|
|
|
int /* error */
|
|
|
|
xfs_alloc_lookup_le(
|
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
|
xfs_agblock_t bno, /* starting block of extent */
|
|
|
|
xfs_extlen_t len, /* length of extent */
|
|
|
|
int *stat); /* success/failure */
|
|
|
|
|
2012-03-22 13:15:12 +08:00
|
|
|
int /* error */
|
|
|
|
xfs_alloc_lookup_ge(
|
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
|
xfs_agblock_t bno, /* starting block of extent */
|
|
|
|
xfs_extlen_t len, /* length of extent */
|
|
|
|
int *stat); /* success/failure */
|
|
|
|
|
2011-01-07 21:02:04 +08:00
|
|
|
int /* error */
|
|
|
|
xfs_alloc_get_rec(
|
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
|
xfs_agblock_t *bno, /* output: starting block of extent */
|
|
|
|
xfs_extlen_t *len, /* output: length of extent */
|
|
|
|
int *stat); /* output: success/failure */
|
|
|
|
|
2014-11-28 11:25:04 +08:00
|
|
|
int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
|
|
|
|
xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
|
2017-06-17 02:00:07 +08:00
|
|
|
int xfs_alloc_read_agfl(struct xfs_mount *mp, struct xfs_trans *tp,
|
|
|
|
xfs_agnumber_t agno, struct xfs_buf **bpp);
|
2016-01-04 13:10:42 +08:00
|
|
|
int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, int flags);
|
2016-06-21 09:53:28 +08:00
|
|
|
int xfs_free_extent_fix_freelist(struct xfs_trans *tp, xfs_agnumber_t agno,
|
|
|
|
struct xfs_buf **agbp);
|
2014-11-28 11:25:04 +08:00
|
|
|
|
2016-08-03 09:31:47 +08:00
|
|
|
xfs_extlen_t xfs_prealloc_blocks(struct xfs_mount *mp);
|
|
|
|
|
2017-03-29 05:56:35 +08:00
|
|
|
typedef int (*xfs_alloc_query_range_fn)(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_alloc_rec_incore *rec,
|
|
|
|
void *priv);
|
|
|
|
|
|
|
|
int xfs_alloc_query_range(struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_alloc_rec_incore *low_rec,
|
|
|
|
struct xfs_alloc_rec_incore *high_rec,
|
|
|
|
xfs_alloc_query_range_fn fn, void *priv);
|
2017-03-29 05:56:35 +08:00
|
|
|
int xfs_alloc_query_all(struct xfs_btree_cur *cur, xfs_alloc_query_range_fn fn,
|
|
|
|
void *priv);
|
2017-10-18 12:37:32 +08:00
|
|
|
xfs_agblock_t xfs_ag_block_count(struct xfs_mount *mp, xfs_agnumber_t agno);
|
|
|
|
bool xfs_verify_agbno(struct xfs_mount *mp, xfs_agnumber_t agno,
|
|
|
|
xfs_agblock_t agbno);
|
|
|
|
bool xfs_verify_fsbno(struct xfs_mount *mp, xfs_fsblock_t fsbno);
|
2017-03-29 05:56:35 +08:00
|
|
|
|
2018-01-17 10:52:12 +08:00
|
|
|
int xfs_alloc_has_record(struct xfs_btree_cur *cur, xfs_agblock_t bno,
|
|
|
|
xfs_extlen_t len, bool *exist);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* __XFS_ALLOC_H__ */
|