2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-06-19 06:40:27 +08:00
|
|
|
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
|
2005-11-02 11:58:39 +08:00
|
|
|
* All Rights Reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2005-11-02 11:58:39 +08:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
2005-04-17 06:20:36 +08:00
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
2005-11-02 11:58:39 +08:00
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2005-11-02 11:58:39 +08:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_bit.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_log.h"
|
|
|
|
#include "xfs_clnt.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_inum.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_trans.h"
|
|
|
|
#include "xfs_sb.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_ag.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_dir2.h"
|
|
|
|
#include "xfs_alloc.h"
|
|
|
|
#include "xfs_dmapi.h"
|
|
|
|
#include "xfs_quota.h"
|
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_bmap_btree.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_alloc_btree.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_ialloc_btree.h"
|
|
|
|
#include "xfs_dir2_sf.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_attr_sf.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_dinode.h"
|
|
|
|
#include "xfs_inode.h"
|
2005-11-02 11:38:42 +08:00
|
|
|
#include "xfs_btree.h"
|
|
|
|
#include "xfs_ialloc.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "xfs_bmap.h"
|
|
|
|
#include "xfs_rtalloc.h"
|
|
|
|
#include "xfs_error.h"
|
|
|
|
#include "xfs_itable.h"
|
|
|
|
#include "xfs_rw.h"
|
|
|
|
#include "xfs_acl.h"
|
|
|
|
#include "xfs_attr.h"
|
|
|
|
#include "xfs_buf_item.h"
|
|
|
|
#include "xfs_utils.h"
|
|
|
|
#include "xfs_version.h"
|
|
|
|
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mount.h>
|
2005-09-02 14:58:49 +08:00
|
|
|
#include <linux/mempool.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/writeback.h>
|
2005-09-05 06:34:18 +08:00
|
|
|
#include <linux/kthread.h>
|
2006-12-07 12:34:23 +08:00
|
|
|
#include <linux/freezer.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-02-10 15:34:56 +08:00
|
|
|
static struct quotactl_ops xfs_quotactl_operations;
|
|
|
|
static struct super_operations xfs_super_operations;
|
|
|
|
static kmem_zone_t *xfs_vnode_zone;
|
|
|
|
static kmem_zone_t *xfs_ioend_zone;
|
2005-09-02 14:58:49 +08:00
|
|
|
mempool_t *xfs_ioend_pool;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
STATIC struct xfs_mount_args *
|
|
|
|
xfs_args_allocate(
|
2006-03-31 11:04:17 +08:00
|
|
|
struct super_block *sb,
|
|
|
|
int silent)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct xfs_mount_args *args;
|
|
|
|
|
|
|
|
args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
|
|
|
|
args->logbufs = args->logbufsize = -1;
|
|
|
|
strncpy(args->fsname, sb->s_id, MAXNAMELEN);
|
|
|
|
|
|
|
|
/* Copy the already-parsed mount(2) flags we're interested in */
|
|
|
|
if (sb->s_flags & MS_DIRSYNC)
|
|
|
|
args->flags |= XFSMNT_DIRSYNC;
|
|
|
|
if (sb->s_flags & MS_SYNCHRONOUS)
|
|
|
|
args->flags |= XFSMNT_WSYNC;
|
2006-03-31 11:04:17 +08:00
|
|
|
if (silent)
|
|
|
|
args->flags |= XFSMNT_QUIET;
|
2005-04-17 06:20:36 +08:00
|
|
|
args->flags |= XFSMNT_32BITINODES;
|
|
|
|
|
|
|
|
return args;
|
|
|
|
}
|
|
|
|
|
|
|
|
__uint64_t
|
|
|
|
xfs_max_file_offset(
|
|
|
|
unsigned int blockshift)
|
|
|
|
{
|
|
|
|
unsigned int pagefactor = 1;
|
|
|
|
unsigned int bitshift = BITS_PER_LONG - 1;
|
|
|
|
|
|
|
|
/* Figure out maximum filesize, on Linux this can depend on
|
|
|
|
* the filesystem blocksize (on 32 bit platforms).
|
|
|
|
* __block_prepare_write does this in an [unsigned] long...
|
|
|
|
* page->index << (PAGE_CACHE_SHIFT - bbits)
|
|
|
|
* So, for page sized blocks (4K on 32 bit platforms),
|
|
|
|
* this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
|
|
|
|
* (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
|
|
|
|
* but for smaller blocksizes it is less (bbits = log2 bsize).
|
|
|
|
* Note1: get_block_t takes a long (implicit cast from above)
|
|
|
|
* Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
|
|
|
|
* can optionally convert the [unsigned] long from above into
|
|
|
|
* an [unsigned] long long.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if BITS_PER_LONG == 32
|
|
|
|
# if defined(CONFIG_LBD)
|
|
|
|
ASSERT(sizeof(sector_t) == 8);
|
|
|
|
pagefactor = PAGE_CACHE_SIZE;
|
|
|
|
bitshift = BITS_PER_LONG;
|
|
|
|
# else
|
|
|
|
pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return (((__uint64_t)pagefactor) << bitshift) - 1;
|
|
|
|
}
|
|
|
|
|
2007-02-10 15:34:56 +08:00
|
|
|
STATIC_INLINE void
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_set_inodeops(
|
|
|
|
struct inode *inode)
|
|
|
|
{
|
2005-09-02 14:46:51 +08:00
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFREG:
|
2006-03-14 11:00:51 +08:00
|
|
|
inode->i_op = &xfs_inode_operations;
|
2006-03-14 11:00:35 +08:00
|
|
|
inode->i_fop = &xfs_file_operations;
|
2006-03-14 10:54:26 +08:00
|
|
|
inode->i_mapping->a_ops = &xfs_address_space_operations;
|
2005-09-02 14:46:51 +08:00
|
|
|
break;
|
|
|
|
case S_IFDIR:
|
2006-03-14 11:00:51 +08:00
|
|
|
inode->i_op = &xfs_dir_inode_operations;
|
2006-03-14 11:00:35 +08:00
|
|
|
inode->i_fop = &xfs_dir_file_operations;
|
2005-09-02 14:46:51 +08:00
|
|
|
break;
|
|
|
|
case S_IFLNK:
|
2006-03-14 11:00:51 +08:00
|
|
|
inode->i_op = &xfs_symlink_inode_operations;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (inode->i_blocks)
|
2006-03-14 10:54:26 +08:00
|
|
|
inode->i_mapping->a_ops = &xfs_address_space_operations;
|
2005-09-02 14:46:51 +08:00
|
|
|
break;
|
|
|
|
default:
|
2006-03-14 11:00:51 +08:00
|
|
|
inode->i_op = &xfs_inode_operations;
|
2005-04-17 06:20:36 +08:00
|
|
|
init_special_inode(inode, inode->i_mode, inode->i_rdev);
|
2005-09-02 14:46:51 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-10 15:34:56 +08:00
|
|
|
STATIC_INLINE void
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_revalidate_inode(
|
|
|
|
xfs_mount_t *mp,
|
2006-06-09 15:00:52 +08:00
|
|
|
bhv_vnode_t *vp,
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_inode_t *ip)
|
|
|
|
{
|
2006-03-17 14:25:36 +08:00
|
|
|
struct inode *inode = vn_to_inode(vp);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-09-02 14:46:51 +08:00
|
|
|
inode->i_mode = ip->i_d.di_mode;
|
2005-04-17 06:20:36 +08:00
|
|
|
inode->i_nlink = ip->i_d.di_nlink;
|
|
|
|
inode->i_uid = ip->i_d.di_uid;
|
|
|
|
inode->i_gid = ip->i_d.di_gid;
|
2005-09-02 14:46:51 +08:00
|
|
|
|
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFBLK:
|
|
|
|
case S_IFCHR:
|
|
|
|
inode->i_rdev =
|
|
|
|
MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
|
|
|
|
sysv_minor(ip->i_df.if_u2.if_rdev));
|
|
|
|
break;
|
|
|
|
default:
|
2005-04-17 06:20:36 +08:00
|
|
|
inode->i_rdev = 0;
|
2005-09-02 14:46:51 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-09-02 14:46:51 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
inode->i_generation = ip->i_d.di_gen;
|
|
|
|
i_size_write(inode, ip->i_d.di_size);
|
|
|
|
inode->i_blocks =
|
|
|
|
XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
|
|
|
|
inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
|
|
|
|
inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
|
|
|
|
inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
|
|
|
|
inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
|
|
|
|
inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
|
|
|
|
inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
|
|
|
|
if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
|
|
|
|
inode->i_flags |= S_IMMUTABLE;
|
|
|
|
else
|
|
|
|
inode->i_flags &= ~S_IMMUTABLE;
|
|
|
|
if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
|
|
|
|
inode->i_flags |= S_APPEND;
|
|
|
|
else
|
|
|
|
inode->i_flags &= ~S_APPEND;
|
|
|
|
if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
|
|
|
|
inode->i_flags |= S_SYNC;
|
|
|
|
else
|
|
|
|
inode->i_flags &= ~S_SYNC;
|
|
|
|
if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
|
|
|
|
inode->i_flags |= S_NOATIME;
|
|
|
|
else
|
|
|
|
inode->i_flags &= ~S_NOATIME;
|
|
|
|
vp->v_flag &= ~VMODIFIED;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_initialize_vnode(
|
|
|
|
bhv_desc_t *bdp,
|
2006-06-09 15:00:52 +08:00
|
|
|
bhv_vnode_t *vp,
|
2005-04-17 06:20:36 +08:00
|
|
|
bhv_desc_t *inode_bhv,
|
|
|
|
int unlock)
|
|
|
|
{
|
|
|
|
xfs_inode_t *ip = XFS_BHVTOI(inode_bhv);
|
2006-03-17 14:25:36 +08:00
|
|
|
struct inode *inode = vn_to_inode(vp);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!inode_bhv->bd_vobj) {
|
|
|
|
vp->v_vfsp = bhvtovfs(bdp);
|
|
|
|
bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
|
|
|
|
bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to set the ops vectors, and unlock the inode, but if
|
|
|
|
* we have been called during the new inode create process, it is
|
|
|
|
* too early to fill in the Linux inode. We will get called a
|
|
|
|
* second time once the inode is properly set up, and then we can
|
|
|
|
* finish our work.
|
|
|
|
*/
|
|
|
|
if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
|
|
|
|
xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
|
|
|
|
xfs_set_inodeops(inode);
|
2006-03-17 14:25:36 +08:00
|
|
|
|
2006-11-11 15:04:54 +08:00
|
|
|
xfs_iflags_clear(ip, XFS_INEW);
|
2005-04-17 06:20:36 +08:00
|
|
|
barrier();
|
|
|
|
|
|
|
|
unlock_new_inode(inode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
xfs_blkdev_get(
|
|
|
|
xfs_mount_t *mp,
|
|
|
|
const char *name,
|
|
|
|
struct block_device **bdevp)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
*bdevp = open_bdev_excl(name, 0, mp);
|
|
|
|
if (IS_ERR(*bdevp)) {
|
|
|
|
error = PTR_ERR(*bdevp);
|
|
|
|
printk("XFS: Invalid device [%s], error=%d\n", name, error);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -error;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_blkdev_put(
|
|
|
|
struct block_device *bdev)
|
|
|
|
{
|
|
|
|
if (bdev)
|
|
|
|
close_bdev_excl(bdev);
|
|
|
|
}
|
|
|
|
|
2005-11-02 07:26:59 +08:00
|
|
|
/*
|
|
|
|
* Try to write out the superblock using barriers.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xfs_barrier_test(
|
|
|
|
xfs_mount_t *mp)
|
|
|
|
{
|
|
|
|
xfs_buf_t *sbp = xfs_getsb(mp, 0);
|
|
|
|
int error;
|
|
|
|
|
|
|
|
XFS_BUF_UNDONE(sbp);
|
|
|
|
XFS_BUF_UNREAD(sbp);
|
|
|
|
XFS_BUF_UNDELAYWRITE(sbp);
|
|
|
|
XFS_BUF_WRITE(sbp);
|
|
|
|
XFS_BUF_UNASYNC(sbp);
|
|
|
|
XFS_BUF_ORDERED(sbp);
|
|
|
|
|
|
|
|
xfsbdstrat(mp, sbp);
|
|
|
|
error = xfs_iowait(sbp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear all the flags we set and possible error state in the
|
|
|
|
* buffer. We only did the write to try out whether barriers
|
|
|
|
* worked and shouldn't leave any traces in the superblock
|
|
|
|
* buffer.
|
|
|
|
*/
|
|
|
|
XFS_BUF_DONE(sbp);
|
|
|
|
XFS_BUF_ERROR(sbp, 0);
|
|
|
|
XFS_BUF_UNORDERED(sbp);
|
|
|
|
|
|
|
|
xfs_buf_relse(sbp);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_mountfs_check_barriers(xfs_mount_t *mp)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (mp->m_logdev_targp != mp->m_ddev_targp) {
|
|
|
|
xfs_fs_cmn_err(CE_NOTE, mp,
|
|
|
|
"Disabling barriers, not supported with external log device");
|
|
|
|
mp->m_flags &= ~XFS_MOUNT_BARRIER;
|
2006-01-11 12:27:18 +08:00
|
|
|
return;
|
2005-11-02 07:26:59 +08:00
|
|
|
}
|
|
|
|
|
2006-01-11 12:39:08 +08:00
|
|
|
if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
|
2005-11-02 07:26:59 +08:00
|
|
|
QUEUE_ORDERED_NONE) {
|
|
|
|
xfs_fs_cmn_err(CE_NOTE, mp,
|
|
|
|
"Disabling barriers, not supported by the underlying device");
|
|
|
|
mp->m_flags &= ~XFS_MOUNT_BARRIER;
|
2006-01-11 12:27:18 +08:00
|
|
|
return;
|
2005-11-02 07:26:59 +08:00
|
|
|
}
|
|
|
|
|
2006-07-28 15:05:13 +08:00
|
|
|
if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
|
|
|
|
xfs_fs_cmn_err(CE_NOTE, mp,
|
|
|
|
"Disabling barriers, underlying device is readonly");
|
|
|
|
mp->m_flags &= ~XFS_MOUNT_BARRIER;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-11-02 07:26:59 +08:00
|
|
|
error = xfs_barrier_test(mp);
|
|
|
|
if (error) {
|
|
|
|
xfs_fs_cmn_err(CE_NOTE, mp,
|
|
|
|
"Disabling barriers, trial barrier write failed");
|
|
|
|
mp->m_flags &= ~XFS_MOUNT_BARRIER;
|
2006-01-11 12:27:18 +08:00
|
|
|
return;
|
2005-11-02 07:26:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_blkdev_issue_flush(
|
|
|
|
xfs_buftarg_t *buftarg)
|
|
|
|
{
|
2006-01-11 12:39:08 +08:00
|
|
|
blkdev_issue_flush(buftarg->bt_bdev, NULL);
|
2005-11-02 07:26:59 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
STATIC struct inode *
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_alloc_inode(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb)
|
|
|
|
{
|
2006-06-09 15:00:52 +08:00
|
|
|
bhv_vnode_t *vp;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-14 10:18:19 +08:00
|
|
|
vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
|
|
|
|
if (unlikely(!vp))
|
2005-04-17 06:20:36 +08:00
|
|
|
return NULL;
|
2006-03-17 14:25:36 +08:00
|
|
|
return vn_to_inode(vp);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_destroy_inode(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inode *inode)
|
|
|
|
{
|
2006-03-17 14:25:36 +08:00
|
|
|
kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_inode_init_once(
|
2006-03-14 10:18:19 +08:00
|
|
|
void *vnode,
|
|
|
|
kmem_zone_t *zonep,
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long flags)
|
|
|
|
{
|
2007-05-17 13:10:57 +08:00
|
|
|
inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 10:18:19 +08:00
|
|
|
xfs_init_zones(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-06-09 15:00:52 +08:00
|
|
|
xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode",
|
2006-03-14 10:19:55 +08:00
|
|
|
KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
|
|
|
|
KM_ZONE_SPREAD,
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_inode_init_once);
|
2005-09-02 14:58:49 +08:00
|
|
|
if (!xfs_vnode_zone)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
|
|
|
|
if (!xfs_ioend_zone)
|
|
|
|
goto out_destroy_vnode_zone;
|
|
|
|
|
2006-03-26 17:37:50 +08:00
|
|
|
xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
|
|
|
|
xfs_ioend_zone);
|
2005-09-02 14:58:49 +08:00
|
|
|
if (!xfs_ioend_pool)
|
|
|
|
goto out_free_ioend_zone;
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2005-09-02 14:58:49 +08:00
|
|
|
|
|
|
|
out_free_ioend_zone:
|
|
|
|
kmem_zone_destroy(xfs_ioend_zone);
|
|
|
|
out_destroy_vnode_zone:
|
|
|
|
kmem_zone_destroy(xfs_vnode_zone);
|
|
|
|
out:
|
|
|
|
return -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
2006-03-14 10:18:19 +08:00
|
|
|
xfs_destroy_zones(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-09-02 14:58:49 +08:00
|
|
|
mempool_destroy(xfs_ioend_pool);
|
|
|
|
kmem_zone_destroy(xfs_vnode_zone);
|
|
|
|
kmem_zone_destroy(xfs_ioend_zone);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to flush the inode, this will actually fail
|
|
|
|
* if the inode is pinned, but we dirty the inode again
|
|
|
|
* at the point when it is unpinned after a log write,
|
2006-03-14 10:18:19 +08:00
|
|
|
* since this is when the inode itself becomes flushable.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_write_inode(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inode *inode,
|
|
|
|
int sync)
|
|
|
|
{
|
2006-06-09 15:00:52 +08:00
|
|
|
bhv_vnode_t *vp = vn_from_inode(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error = 0, flags = FLUSH_INODE;
|
|
|
|
|
|
|
|
if (vp) {
|
|
|
|
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
|
2007-09-14 13:22:50 +08:00
|
|
|
if (sync) {
|
|
|
|
filemap_fdatawait(inode->i_mapping);
|
2005-04-17 06:20:36 +08:00
|
|
|
flags |= FLUSH_SYNC;
|
2007-09-14 13:22:50 +08:00
|
|
|
}
|
2006-06-09 15:00:52 +08:00
|
|
|
error = bhv_vop_iflush(vp, flags);
|
|
|
|
if (error == EAGAIN)
|
|
|
|
error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return -error;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_clear_inode(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inode *inode)
|
|
|
|
{
|
2006-06-09 15:00:52 +08:00
|
|
|
bhv_vnode_t *vp = vn_from_inode(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-14 11:06:18 +08:00
|
|
|
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
|
2005-09-05 06:23:54 +08:00
|
|
|
|
|
|
|
XFS_STATS_INC(vn_rele);
|
|
|
|
XFS_STATS_INC(vn_remove);
|
|
|
|
XFS_STATS_INC(vn_reclaim);
|
|
|
|
XFS_STATS_DEC(vn_active);
|
|
|
|
|
2005-09-05 06:28:02 +08:00
|
|
|
/*
|
|
|
|
* This can happen because xfs_iget_core calls xfs_idestroy if we
|
|
|
|
* find an inode with di_mode == 0 but without IGET_CREATE set.
|
|
|
|
*/
|
2006-06-09 15:00:52 +08:00
|
|
|
if (VNHEAD(vp))
|
|
|
|
bhv_vop_inactive(vp, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-09-05 06:23:54 +08:00
|
|
|
VN_LOCK(vp);
|
|
|
|
vp->v_flag &= ~VMODIFIED;
|
|
|
|
VN_UNLOCK(vp, 0);
|
|
|
|
|
2006-06-09 15:00:52 +08:00
|
|
|
if (VNHEAD(vp))
|
|
|
|
if (bhv_vop_reclaim(vp))
|
|
|
|
panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, vp);
|
2005-09-05 06:23:54 +08:00
|
|
|
|
2006-06-09 15:00:52 +08:00
|
|
|
ASSERT(VNHEAD(vp) == NULL);
|
2005-09-05 06:23:54 +08:00
|
|
|
|
|
|
|
#ifdef XFS_VNODE_TRACE
|
|
|
|
ktrace_free(vp->v_trace);
|
|
|
|
#endif
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Enqueue a work item to be picked up by the vfs xfssyncd thread.
|
|
|
|
* Doing this has two advantages:
|
|
|
|
* - It saves on stack space, which is tight in certain situations
|
|
|
|
* - It can be used (with care) as a mechanism to avoid deadlocks.
|
|
|
|
* Flushing while allocating in a full filesystem requires both.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_syncd_queue_work(
|
2006-06-09 14:48:30 +08:00
|
|
|
struct bhv_vfs *vfs,
|
2005-04-17 06:20:36 +08:00
|
|
|
void *data,
|
2006-06-09 14:48:30 +08:00
|
|
|
void (*syncer)(bhv_vfs_t *, void *))
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
struct bhv_vfs_sync_work *work;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-06-09 14:48:30 +08:00
|
|
|
work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
|
2005-04-17 06:20:36 +08:00
|
|
|
INIT_LIST_HEAD(&work->w_list);
|
|
|
|
work->w_syncer = syncer;
|
|
|
|
work->w_data = data;
|
|
|
|
work->w_vfs = vfs;
|
|
|
|
spin_lock(&vfs->vfs_sync_lock);
|
|
|
|
list_add_tail(&work->w_list, &vfs->vfs_sync_list);
|
|
|
|
spin_unlock(&vfs->vfs_sync_lock);
|
|
|
|
wake_up_process(vfs->vfs_sync_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush delayed allocate data, attempting to free up reserved space
|
|
|
|
* from existing allocations. At this point a new allocation attempt
|
|
|
|
* has failed with ENOSPC and we are in the process of scratching our
|
|
|
|
* heads, looking about for more room...
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_flush_inode_work(
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_t *vfs,
|
2005-04-17 06:20:36 +08:00
|
|
|
void *inode)
|
|
|
|
{
|
|
|
|
filemap_flush(((struct inode *)inode)->i_mapping);
|
|
|
|
iput((struct inode *)inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_flush_inode(
|
|
|
|
xfs_inode_t *ip)
|
|
|
|
{
|
2006-03-17 14:25:36 +08:00
|
|
|
struct inode *inode = vn_to_inode(XFS_ITOV(ip));
|
2006-06-09 14:48:30 +08:00
|
|
|
struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
igrab(inode);
|
|
|
|
xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
|
2005-09-10 15:27:23 +08:00
|
|
|
delay(msecs_to_jiffies(500));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the "bigger hammer" version of xfs_flush_inode_work...
|
|
|
|
* (IOW, "If at first you don't succeed, use a Bigger Hammer").
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_flush_device_work(
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_t *vfs,
|
2005-04-17 06:20:36 +08:00
|
|
|
void *inode)
|
|
|
|
{
|
|
|
|
sync_blockdev(vfs->vfs_super->s_bdev);
|
|
|
|
iput((struct inode *)inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_flush_device(
|
|
|
|
xfs_inode_t *ip)
|
|
|
|
{
|
2006-03-17 14:25:36 +08:00
|
|
|
struct inode *inode = vn_to_inode(XFS_ITOV(ip));
|
2006-06-09 14:48:30 +08:00
|
|
|
struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
igrab(inode);
|
|
|
|
xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
|
2005-09-10 15:27:23 +08:00
|
|
|
delay(msecs_to_jiffies(500));
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
|
|
|
vfs_sync_worker(
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_t *vfsp,
|
2005-04-17 06:20:36 +08:00
|
|
|
void *unused)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (!(vfsp->vfs_flag & VFS_RDONLY))
|
2006-06-09 14:48:30 +08:00
|
|
|
error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \
|
[XFS] Lazy Superblock Counters
When we have a couple of hundred transactions on the fly at once, they all
typically modify the on disk superblock in some way.
create/unclink/mkdir/rmdir modify inode counts, allocation/freeing modify
free block counts.
When these counts are modified in a transaction, they must eventually lock
the superblock buffer and apply the mods. The buffer then remains locked
until the transaction is committed into the incore log buffer. The result
of this is that with enough transactions on the fly the incore superblock
buffer becomes a bottleneck.
The result of contention on the incore superblock buffer is that
transaction rates fall - the more pressure that is put on the superblock
buffer, the slower things go.
The key to removing the contention is to not require the superblock fields
in question to be locked. We do that by not marking the superblock dirty
in the transaction. IOWs, we modify the incore superblock but do not
modify the cached superblock buffer. In short, we do not log superblock
modifications to critical fields in the superblock on every transaction.
In fact we only do it just before we write the superblock to disk every
sync period or just before unmount.
This creates an interesting problem - if we don't log or write out the
fields in every transaction, then how do the values get recovered after a
crash? the answer is simple - we keep enough duplicate, logged information
in other structures that we can reconstruct the correct count after log
recovery has been performed.
It is the AGF and AGI structures that contain the duplicate information;
after recovery, we walk every AGI and AGF and sum their individual
counters to get the correct value, and we do a transaction into the log to
correct them. An optimisation of this is that if we have a clean unmount
record, we know the value in the superblock is correct, so we can avoid
the summation walk under normal conditions and so mount/recovery times do
not change under normal operation.
One wrinkle that was discovered during development was that the blocks
used in the freespace btrees are never accounted for in the AGF counters.
This was once a valid optimisation to make; when the filesystem is full,
the free space btrees are empty and consume no space. Hence when it
matters, the "accounting" is correct. But that means the when we do the
AGF summations, we would not have a correct count and xfs_check would
complain. Hence a new counter was added to track the number of blocks used
by the free space btrees. This is an *on-disk format change*.
As a result of this, lazy superblock counters are a mkfs option and at the
moment on linux there is no way to convert an old filesystem. This is
possible - xfs_db can be used to twiddle the right bits and then
xfs_repair will do the format conversion for you. Similarly, you can
convert backwards as well. At some point we'll add functionality to
xfs_admin to do the bit twiddling easily....
SGI-PV: 964999
SGI-Modid: xfs-linux-melb:xfs-kern:28652a
Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Tim Shimmin <tes@sgi.com>
2007-05-24 13:26:31 +08:00
|
|
|
SYNC_ATTR | SYNC_REFCACHE | SYNC_SUPER,
|
|
|
|
NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
vfsp->vfs_sync_seq++;
|
|
|
|
wake_up(&vfsp->vfs_wait_single_sync_task);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfssyncd(
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
long timeleft;
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_t *vfsp = (bhv_vfs_t *) arg;
|
|
|
|
bhv_vfs_sync_work_t *work, *n;
|
2005-09-05 06:34:18 +08:00
|
|
|
LIST_HEAD (tmp);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-17 19:03:35 +08:00
|
|
|
set_freezable();
|
2005-09-10 15:27:23 +08:00
|
|
|
timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
|
2005-04-17 06:20:36 +08:00
|
|
|
for (;;) {
|
2005-09-10 15:27:23 +08:00
|
|
|
timeleft = schedule_timeout_interruptible(timeleft);
|
2005-04-17 06:20:36 +08:00
|
|
|
/* swsusp */
|
2005-06-25 14:13:50 +08:00
|
|
|
try_to_freeze();
|
2006-01-11 18:02:29 +08:00
|
|
|
if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
spin_lock(&vfsp->vfs_sync_lock);
|
|
|
|
/*
|
|
|
|
* We can get woken by laptop mode, to do a sync -
|
|
|
|
* that's the (only!) case where the list would be
|
|
|
|
* empty with time remaining.
|
|
|
|
*/
|
|
|
|
if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
|
|
|
|
if (!timeleft)
|
2005-09-10 15:27:23 +08:00
|
|
|
timeleft = xfs_syncd_centisecs *
|
|
|
|
msecs_to_jiffies(10);
|
2005-04-17 06:20:36 +08:00
|
|
|
INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
|
|
|
|
list_add_tail(&vfsp->vfs_sync_work.w_list,
|
|
|
|
&vfsp->vfs_sync_list);
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
|
|
|
|
list_move(&work->w_list, &tmp);
|
|
|
|
spin_unlock(&vfsp->vfs_sync_lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(work, n, &tmp, w_list) {
|
|
|
|
(*work->w_syncer)(vfsp, work->w_data);
|
|
|
|
list_del(&work->w_list);
|
|
|
|
if (work == &vfsp->vfs_sync_work)
|
|
|
|
continue;
|
2006-06-09 14:48:30 +08:00
|
|
|
kmem_free(work, sizeof(struct bhv_vfs_sync_work));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_start_syncd(
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_t *vfsp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-09-05 06:34:18 +08:00
|
|
|
vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
|
|
|
|
vfsp->vfs_sync_work.w_vfs = vfsp;
|
|
|
|
vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
|
|
|
|
if (IS_ERR(vfsp->vfs_sync_task))
|
|
|
|
return -PTR_ERR(vfsp->vfs_sync_task);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_stop_syncd(
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_t *vfsp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-09-05 06:34:18 +08:00
|
|
|
kthread_stop(vfsp->vfs_sync_task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_put_super(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_t *vfsp = vfs_from_sb(sb);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_stop_syncd(vfsp);
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_sync(vfsp, SYNC_ATTR | SYNC_DELWRI, NULL);
|
|
|
|
error = bhv_vfs_unmount(vfsp, 0, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error) {
|
2006-06-09 14:48:30 +08:00
|
|
|
printk("XFS: unmount got error=%d\n", error);
|
|
|
|
printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__, vfsp);
|
|
|
|
} else {
|
|
|
|
vfs_deallocate(vfsp);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_write_super(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
if (!(sb->s_flags & MS_RDONLY))
|
|
|
|
bhv_vfs_sync(vfs_from_sb(sb), SYNC_FSDATA, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
sb->s_dirt = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_sync_super(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb,
|
|
|
|
int wait)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_t *vfsp = vfs_from_sb(sb);
|
|
|
|
int error;
|
|
|
|
int flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-02-10 15:36:40 +08:00
|
|
|
if (unlikely(sb->s_frozen == SB_FREEZE_WRITE)) {
|
|
|
|
/*
|
|
|
|
* First stage of freeze - no more writers will make progress
|
|
|
|
* now we are here, so we flush delwri and delalloc buffers
|
|
|
|
* here, then wait for all I/O to complete. Data is frozen at
|
|
|
|
* that point. Metadata is not frozen, transactions can still
|
|
|
|
* occur here so don't bother flushing the buftarg (i.e
|
|
|
|
* SYNC_QUIESCE) because it'll just get dirty again.
|
|
|
|
*/
|
2007-06-18 14:50:48 +08:00
|
|
|
flags = SYNC_DATA_QUIESCE;
|
2007-02-10 15:36:40 +08:00
|
|
|
} else
|
2005-06-21 13:40:48 +08:00
|
|
|
flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-06-09 14:48:30 +08:00
|
|
|
error = bhv_vfs_sync(vfsp, flags, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
sb->s_dirt = 0;
|
|
|
|
|
|
|
|
if (unlikely(laptop_mode)) {
|
|
|
|
int prev_sync_seq = vfsp->vfs_sync_seq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The disk must be active because we're syncing.
|
|
|
|
* We schedule xfssyncd now (now that the disk is
|
|
|
|
* active) instead of later (when it might not be).
|
|
|
|
*/
|
|
|
|
wake_up_process(vfsp->vfs_sync_task);
|
|
|
|
/*
|
|
|
|
* We have to wait for the sync iteration to complete.
|
|
|
|
* If we don't, the disk activity caused by the sync
|
|
|
|
* will come after the sync is completed, and that
|
|
|
|
* triggers another sync from laptop mode.
|
|
|
|
*/
|
|
|
|
wait_event(vfsp->vfs_wait_single_sync_task,
|
|
|
|
vfsp->vfs_sync_seq != prev_sync_seq);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -error;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_statfs(
|
2006-06-23 17:02:58 +08:00
|
|
|
struct dentry *dentry,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct kstatfs *statp)
|
|
|
|
{
|
2006-06-23 17:02:59 +08:00
|
|
|
return -bhv_vfs_statvfs(vfs_from_sb(dentry->d_sb), statp,
|
|
|
|
vn_from_inode(dentry->d_inode));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_remount(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb,
|
|
|
|
int *flags,
|
|
|
|
char *options)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_t *vfsp = vfs_from_sb(sb);
|
2006-03-31 11:04:17 +08:00
|
|
|
struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2006-06-09 14:48:30 +08:00
|
|
|
error = bhv_vfs_parseargs(vfsp, options, args, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!error)
|
2006-06-09 14:48:30 +08:00
|
|
|
error = bhv_vfs_mntupdate(vfsp, flags, args);
|
2005-04-17 06:20:36 +08:00
|
|
|
kmem_free(args, sizeof(*args));
|
|
|
|
return -error;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_lockfs(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_freeze(vfs_from_sb(sb));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_show_options(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct seq_file *m,
|
|
|
|
struct vfsmount *mnt)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
return -bhv_vfs_showargs(vfs_from_sb(mnt->mnt_sb), m);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-11-02 07:32:38 +08:00
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_quotasync(
|
2005-11-02 07:32:38 +08:00
|
|
|
struct super_block *sb,
|
|
|
|
int type)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XQUOTASYNC, 0, NULL);
|
2005-11-02 07:32:38 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_getxstate(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb,
|
|
|
|
struct fs_quota_stat *fqs)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_setxstate(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb,
|
|
|
|
unsigned int flags,
|
|
|
|
int op)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
return -bhv_vfs_quotactl(vfs_from_sb(sb), op, 0, (caddr_t)&flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_getxquota(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb,
|
|
|
|
int type,
|
|
|
|
qid_t id,
|
|
|
|
struct fs_disk_quota *fdq)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
return -bhv_vfs_quotactl(vfs_from_sb(sb),
|
|
|
|
(type == USRQUOTA) ? Q_XGETQUOTA :
|
|
|
|
((type == GRPQUOTA) ? Q_XGETGQUOTA :
|
|
|
|
Q_XGETPQUOTA), id, (caddr_t)fdq);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_setxquota(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb,
|
|
|
|
int type,
|
|
|
|
qid_t id,
|
|
|
|
struct fs_disk_quota *fdq)
|
|
|
|
{
|
2006-06-09 14:48:30 +08:00
|
|
|
return -bhv_vfs_quotactl(vfs_from_sb(sb),
|
|
|
|
(type == USRQUOTA) ? Q_XSETQLIM :
|
|
|
|
((type == GRPQUOTA) ? Q_XSETGQLIM :
|
|
|
|
Q_XSETPQLIM), id, (caddr_t)fdq);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_fill_super(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct super_block *sb,
|
|
|
|
void *data,
|
|
|
|
int silent)
|
|
|
|
{
|
2006-06-09 15:00:52 +08:00
|
|
|
struct bhv_vnode *rootvp;
|
2006-06-09 14:48:30 +08:00
|
|
|
struct bhv_vfs *vfsp = vfs_allocate(sb);
|
2006-03-31 11:04:17 +08:00
|
|
|
struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct kstatfs statvfs;
|
2006-06-09 14:48:30 +08:00
|
|
|
int error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
bhv_insert_all_vfsops(vfsp);
|
|
|
|
|
2006-06-09 14:48:30 +08:00
|
|
|
error = bhv_vfs_parseargs(vfsp, (char *)data, args, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error) {
|
|
|
|
bhv_remove_all_vfsops(vfsp, 1);
|
|
|
|
goto fail_vfsop;
|
|
|
|
}
|
|
|
|
|
|
|
|
sb_min_blocksize(sb, BBSIZE);
|
2006-03-14 11:06:18 +08:00
|
|
|
sb->s_export_op = &xfs_export_operations;
|
|
|
|
sb->s_qcop = &xfs_quotactl_operations;
|
|
|
|
sb->s_op = &xfs_super_operations;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-06-09 14:48:30 +08:00
|
|
|
error = bhv_vfs_mount(vfsp, args, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error) {
|
|
|
|
bhv_remove_all_vfsops(vfsp, 1);
|
|
|
|
goto fail_vfsop;
|
|
|
|
}
|
|
|
|
|
2006-06-09 14:48:30 +08:00
|
|
|
error = bhv_vfs_statvfs(vfsp, &statvfs, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error)
|
|
|
|
goto fail_unmount;
|
|
|
|
|
|
|
|
sb->s_dirt = 1;
|
|
|
|
sb->s_magic = statvfs.f_type;
|
|
|
|
sb->s_blocksize = statvfs.f_bsize;
|
|
|
|
sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
|
|
|
|
sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
|
|
|
|
sb->s_time_gran = 1;
|
|
|
|
set_posix_acl_flag(sb);
|
|
|
|
|
2006-06-09 14:48:30 +08:00
|
|
|
error = bhv_vfs_root(vfsp, &rootvp);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error)
|
|
|
|
goto fail_unmount;
|
|
|
|
|
2006-03-17 14:25:36 +08:00
|
|
|
sb->s_root = d_alloc_root(vn_to_inode(rootvp));
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!sb->s_root) {
|
|
|
|
error = ENOMEM;
|
|
|
|
goto fail_vnrele;
|
|
|
|
}
|
|
|
|
if (is_bad_inode(sb->s_root->d_inode)) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto fail_vnrele;
|
|
|
|
}
|
2006-03-14 11:06:18 +08:00
|
|
|
if ((error = xfs_fs_start_syncd(vfsp)))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto fail_vnrele;
|
|
|
|
vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
|
|
|
|
|
|
|
|
kmem_free(args, sizeof(*args));
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_vnrele:
|
|
|
|
if (sb->s_root) {
|
|
|
|
dput(sb->s_root);
|
|
|
|
sb->s_root = NULL;
|
|
|
|
} else {
|
|
|
|
VN_RELE(rootvp);
|
|
|
|
}
|
|
|
|
|
|
|
|
fail_unmount:
|
2006-06-09 14:48:30 +08:00
|
|
|
bhv_vfs_unmount(vfsp, 0, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
fail_vfsop:
|
|
|
|
vfs_deallocate(vfsp);
|
|
|
|
kmem_free(args, sizeof(*args));
|
|
|
|
return -error;
|
|
|
|
}
|
|
|
|
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 17:02:57 +08:00
|
|
|
STATIC int
|
2006-03-14 11:06:18 +08:00
|
|
|
xfs_fs_get_sb(
|
2005-04-17 06:20:36 +08:00
|
|
|
struct file_system_type *fs_type,
|
|
|
|
int flags,
|
|
|
|
const char *dev_name,
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 17:02:57 +08:00
|
|
|
void *data,
|
|
|
|
struct vfsmount *mnt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 17:02:57 +08:00
|
|
|
return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
|
|
|
|
mnt);
|
2006-03-14 11:06:18 +08:00
|
|
|
}
|
|
|
|
|
2007-02-10 15:34:56 +08:00
|
|
|
static struct super_operations xfs_super_operations = {
|
2006-03-14 11:06:18 +08:00
|
|
|
.alloc_inode = xfs_fs_alloc_inode,
|
|
|
|
.destroy_inode = xfs_fs_destroy_inode,
|
|
|
|
.write_inode = xfs_fs_write_inode,
|
|
|
|
.clear_inode = xfs_fs_clear_inode,
|
|
|
|
.put_super = xfs_fs_put_super,
|
|
|
|
.write_super = xfs_fs_write_super,
|
|
|
|
.sync_fs = xfs_fs_sync_super,
|
|
|
|
.write_super_lockfs = xfs_fs_lockfs,
|
|
|
|
.statfs = xfs_fs_statfs,
|
|
|
|
.remount_fs = xfs_fs_remount,
|
|
|
|
.show_options = xfs_fs_show_options,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2007-02-10 15:34:56 +08:00
|
|
|
static struct quotactl_ops xfs_quotactl_operations = {
|
2006-03-14 11:06:18 +08:00
|
|
|
.quota_sync = xfs_fs_quotasync,
|
|
|
|
.get_xstate = xfs_fs_getxstate,
|
|
|
|
.set_xstate = xfs_fs_setxstate,
|
|
|
|
.get_xquota = xfs_fs_getxquota,
|
|
|
|
.set_xquota = xfs_fs_setxquota,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2007-02-21 05:57:47 +08:00
|
|
|
static struct file_system_type xfs_fs_type = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.name = "xfs",
|
2006-03-14 11:06:18 +08:00
|
|
|
.get_sb = xfs_fs_get_sb,
|
2005-04-17 06:20:36 +08:00
|
|
|
.kill_sb = kill_block_super,
|
|
|
|
.fs_flags = FS_REQUIRES_DEV,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
STATIC int __init
|
|
|
|
init_xfs_fs( void )
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct sysinfo si;
|
|
|
|
static char message[] __initdata = KERN_INFO \
|
|
|
|
XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
|
|
|
|
|
|
|
|
printk(message);
|
|
|
|
|
|
|
|
si_meminfo(&si);
|
|
|
|
xfs_physmem = si.totalram;
|
|
|
|
|
|
|
|
ktrace_init(64);
|
|
|
|
|
2006-03-14 10:18:19 +08:00
|
|
|
error = xfs_init_zones();
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error < 0)
|
2005-09-02 14:58:49 +08:00
|
|
|
goto undo_zones;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-11 12:39:08 +08:00
|
|
|
error = xfs_buf_init();
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error < 0)
|
2006-01-11 12:39:08 +08:00
|
|
|
goto undo_buffers;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
vn_init();
|
|
|
|
xfs_init();
|
|
|
|
uuid_init();
|
|
|
|
vfs_initquota();
|
|
|
|
|
|
|
|
error = register_filesystem(&xfs_fs_type);
|
|
|
|
if (error)
|
|
|
|
goto undo_register;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
undo_register:
|
2006-01-11 12:39:08 +08:00
|
|
|
xfs_buf_terminate();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-11 12:39:08 +08:00
|
|
|
undo_buffers:
|
2006-03-14 10:18:19 +08:00
|
|
|
xfs_destroy_zones();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-09-02 14:58:49 +08:00
|
|
|
undo_zones:
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void __exit
|
|
|
|
exit_xfs_fs( void )
|
|
|
|
{
|
|
|
|
vfs_exitquota();
|
|
|
|
unregister_filesystem(&xfs_fs_type);
|
|
|
|
xfs_cleanup();
|
2006-01-11 12:39:08 +08:00
|
|
|
xfs_buf_terminate();
|
2006-03-14 10:18:19 +08:00
|
|
|
xfs_destroy_zones();
|
2005-04-17 06:20:36 +08:00
|
|
|
ktrace_uninit();
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(init_xfs_fs);
|
|
|
|
module_exit(exit_xfs_fs);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Silicon Graphics, Inc.");
|
|
|
|
MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
|
|
|
|
MODULE_LICENSE("GPL");
|