mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (27 commits) GFS2: Use DEFINE_SPINLOCK GFS2: Fix use-after-free bug on umount (try #2) Revert "GFS2: Fix use-after-free bug on umount" GFS2: Streamline alloc calculations for writes GFS2: Send useful information with uevent messages GFS2: Fix use-after-free bug on umount GFS2: Remove ancient, unused code GFS2: Move four functions from super.c GFS2: Fix bug in gfs2_lock_fs_check_clean() GFS2: Send some sensible sysfs stuff GFS2: Kill two daemons with one patch GFS2: Move gfs2_recoverd into recovery.c GFS2: Fix "truncate in progress" hang GFS2: Clean up & move gfs2_quotad GFS2: Add more detail to debugfs glock dumps GFS2: Banish struct gfs2_rgrpd_host GFS2: Move rg_free from gfs2_rgrpd_host to gfs2_rgrpd GFS2: Move rg_igeneration into struct gfs2_rgrpd GFS2: Banish struct gfs2_dinode_host GFS2: Move i_size from gfs2_dinode_host and rename it to i_disksize ...
This commit is contained in:
commit
c54febae99
|
@ -1,5 +1,5 @@
|
|||
obj-$(CONFIG_GFS2_FS) += gfs2.o
|
||||
gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
|
||||
gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \
|
||||
glops.o inode.o log.o lops.o locking.o main.o meta_io.o \
|
||||
mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \
|
||||
ops_fstype.o ops_inode.o ops_super.o quota.o \
|
||||
|
|
|
@ -91,7 +91,7 @@ static int acl_get(struct gfs2_inode *ip, int access, struct posix_acl **acl,
|
|||
struct gfs2_ea_location el_this;
|
||||
int error;
|
||||
|
||||
if (!ip->i_di.di_eattr)
|
||||
if (!ip->i_eattr)
|
||||
return 0;
|
||||
|
||||
memset(&er, 0, sizeof(struct gfs2_ea_request));
|
||||
|
|
|
@ -75,9 +75,9 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
|
|||
void *kaddr = kmap(page);
|
||||
|
||||
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
|
||||
ip->i_di.di_size);
|
||||
memset(kaddr + ip->i_di.di_size, 0,
|
||||
PAGE_CACHE_SIZE - ip->i_di.di_size);
|
||||
ip->i_disksize);
|
||||
memset(kaddr + ip->i_disksize, 0,
|
||||
PAGE_CACHE_SIZE - ip->i_disksize);
|
||||
kunmap(page);
|
||||
|
||||
SetPageUptodate(page);
|
||||
|
@ -132,7 +132,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
|
|||
if (error)
|
||||
goto out;
|
||||
|
||||
if (ip->i_di.di_size) {
|
||||
if (ip->i_disksize) {
|
||||
/* Get a free block, fill it with the stuffed data,
|
||||
and write it out to disk */
|
||||
|
||||
|
@ -159,7 +159,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
|
|||
di = (struct gfs2_dinode *)dibh->b_data;
|
||||
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
|
||||
|
||||
if (ip->i_di.di_size) {
|
||||
if (ip->i_disksize) {
|
||||
*(__be64 *)(di + 1) = cpu_to_be64(block);
|
||||
gfs2_add_inode_blocks(&ip->i_inode, 1);
|
||||
di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
|
||||
|
@ -926,7 +926,7 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
|
|||
}
|
||||
}
|
||||
|
||||
ip->i_di.di_size = size;
|
||||
ip->i_disksize = size;
|
||||
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
gfs2_dinode_out(ip, dibh->b_data);
|
||||
|
@ -1033,7 +1033,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
|
|||
goto out;
|
||||
|
||||
if (gfs2_is_stuffed(ip)) {
|
||||
ip->i_di.di_size = size;
|
||||
ip->i_disksize = size;
|
||||
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
gfs2_dinode_out(ip, dibh->b_data);
|
||||
|
@ -1045,9 +1045,9 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
|
|||
error = gfs2_block_truncate_page(ip->i_inode.i_mapping);
|
||||
|
||||
if (!error) {
|
||||
ip->i_di.di_size = size;
|
||||
ip->i_disksize = size;
|
||||
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
|
||||
ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG;
|
||||
ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
gfs2_dinode_out(ip, dibh->b_data);
|
||||
}
|
||||
|
@ -1114,13 +1114,13 @@ static int trunc_end(struct gfs2_inode *ip)
|
|||
if (error)
|
||||
goto out;
|
||||
|
||||
if (!ip->i_di.di_size) {
|
||||
if (!ip->i_disksize) {
|
||||
ip->i_height = 0;
|
||||
ip->i_goal = ip->i_no_addr;
|
||||
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
|
||||
}
|
||||
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
|
||||
ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG;
|
||||
ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
|
||||
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
gfs2_dinode_out(ip, dibh->b_data);
|
||||
|
@ -1205,9 +1205,9 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
|
|||
if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode)))
|
||||
return -EINVAL;
|
||||
|
||||
if (size > ip->i_di.di_size)
|
||||
if (size > ip->i_disksize)
|
||||
error = do_grow(ip, size);
|
||||
else if (size < ip->i_di.di_size)
|
||||
else if (size < ip->i_disksize)
|
||||
error = do_shrink(ip, size);
|
||||
else
|
||||
/* update time stamps */
|
||||
|
@ -1219,7 +1219,7 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
|
|||
int gfs2_truncatei_resume(struct gfs2_inode *ip)
|
||||
{
|
||||
int error;
|
||||
error = trunc_dealloc(ip, ip->i_di.di_size);
|
||||
error = trunc_dealloc(ip, ip->i_disksize);
|
||||
if (!error)
|
||||
error = trunc_end(ip);
|
||||
return error;
|
||||
|
@ -1230,35 +1230,6 @@ int gfs2_file_dealloc(struct gfs2_inode *ip)
|
|||
return trunc_dealloc(ip, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_write_calc_reserv - calculate number of blocks needed to write to a file
|
||||
* @ip: the file
|
||||
* @len: the number of bytes to be written to the file
|
||||
* @data_blocks: returns the number of data blocks required
|
||||
* @ind_blocks: returns the number of indirect blocks required
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len,
|
||||
unsigned int *data_blocks, unsigned int *ind_blocks)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
unsigned int tmp;
|
||||
|
||||
if (gfs2_is_dir(ip)) {
|
||||
*data_blocks = DIV_ROUND_UP(len, sdp->sd_jbsize) + 2;
|
||||
*ind_blocks = 3 * (sdp->sd_max_jheight - 1);
|
||||
} else {
|
||||
*data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3;
|
||||
*ind_blocks = 3 * (sdp->sd_max_height - 1);
|
||||
}
|
||||
|
||||
for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) {
|
||||
tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
|
||||
*ind_blocks += tmp;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_write_alloc_required - figure out if a write will require an allocation
|
||||
* @ip: the file being written to
|
||||
|
@ -1276,6 +1247,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
|
|||
struct buffer_head bh;
|
||||
unsigned int shift;
|
||||
u64 lblock, lblock_stop, size;
|
||||
u64 end_of_file;
|
||||
|
||||
*alloc_required = 0;
|
||||
|
||||
|
@ -1291,19 +1263,12 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
|
|||
|
||||
*alloc_required = 1;
|
||||
shift = sdp->sd_sb.sb_bsize_shift;
|
||||
if (gfs2_is_dir(ip)) {
|
||||
unsigned int bsize = sdp->sd_jbsize;
|
||||
lblock = offset;
|
||||
do_div(lblock, bsize);
|
||||
lblock_stop = offset + len + bsize - 1;
|
||||
do_div(lblock_stop, bsize);
|
||||
} else {
|
||||
u64 end_of_file = (ip->i_di.di_size + sdp->sd_sb.sb_bsize - 1) >> shift;
|
||||
lblock = offset >> shift;
|
||||
lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
|
||||
if (lblock_stop > end_of_file)
|
||||
return 0;
|
||||
}
|
||||
BUG_ON(gfs2_is_dir(ip));
|
||||
end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift;
|
||||
lblock = offset >> shift;
|
||||
lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
|
||||
if (lblock_stop > end_of_file)
|
||||
return 0;
|
||||
|
||||
size = (lblock_stop - lblock) << shift;
|
||||
do {
|
||||
|
|
|
@ -10,10 +10,40 @@
|
|||
#ifndef __BMAP_DOT_H__
|
||||
#define __BMAP_DOT_H__
|
||||
|
||||
#include "inode.h"
|
||||
|
||||
struct inode;
|
||||
struct gfs2_inode;
|
||||
struct page;
|
||||
|
||||
|
||||
/**
|
||||
* gfs2_write_calc_reserv - calculate number of blocks needed to write to a file
|
||||
* @ip: the file
|
||||
* @len: the number of bytes to be written to the file
|
||||
* @data_blocks: returns the number of data blocks required
|
||||
* @ind_blocks: returns the number of indirect blocks required
|
||||
*
|
||||
*/
|
||||
|
||||
static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
|
||||
unsigned int len,
|
||||
unsigned int *data_blocks,
|
||||
unsigned int *ind_blocks)
|
||||
{
|
||||
const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
unsigned int tmp;
|
||||
|
||||
BUG_ON(gfs2_is_dir(ip));
|
||||
*data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3;
|
||||
*ind_blocks = 3 * (sdp->sd_max_height - 1);
|
||||
|
||||
for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) {
|
||||
tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
|
||||
*ind_blocks += tmp;
|
||||
}
|
||||
}
|
||||
|
||||
int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
|
||||
int gfs2_block_map(struct inode *inode, sector_t lblock, struct buffer_head *bh, int create);
|
||||
int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen);
|
||||
|
@ -21,10 +51,6 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
|
|||
int gfs2_truncatei(struct gfs2_inode *ip, u64 size);
|
||||
int gfs2_truncatei_resume(struct gfs2_inode *ip);
|
||||
int gfs2_file_dealloc(struct gfs2_inode *ip);
|
||||
|
||||
void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len,
|
||||
unsigned int *data_blocks,
|
||||
unsigned int *ind_blocks);
|
||||
int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
|
||||
unsigned int len, int *alloc_required);
|
||||
|
||||
|
|
136
fs/gfs2/daemon.c
136
fs/gfs2/daemon.c
|
@ -1,136 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "daemon.h"
|
||||
#include "glock.h"
|
||||
#include "log.h"
|
||||
#include "quota.h"
|
||||
#include "recovery.h"
|
||||
#include "super.h"
|
||||
#include "util.h"
|
||||
|
||||
/* This uses schedule_timeout() instead of msleep() because it's good for
|
||||
the daemons to wake up more often than the timeout when unmounting so
|
||||
the user's unmount doesn't sit there forever.
|
||||
|
||||
The kthread functions used to start these daemons block and flush signals. */
|
||||
|
||||
/**
|
||||
* gfs2_glockd - Reclaim unused glock structures
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
* One or more of these daemons run, reclaiming glocks on sd_reclaim_list.
|
||||
* Number of daemons can be set by user, with num_glockd mount option.
|
||||
*/
|
||||
|
||||
int gfs2_glockd(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
while (atomic_read(&sdp->sd_reclaim_count))
|
||||
gfs2_reclaim_glock(sdp);
|
||||
|
||||
wait_event_interruptible(sdp->sd_reclaim_wq,
|
||||
(atomic_read(&sdp->sd_reclaim_count) ||
|
||||
kthread_should_stop()));
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_recoverd - Recover dead machine's journals
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
*/
|
||||
|
||||
int gfs2_recoverd(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
unsigned long t;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
gfs2_check_journals(sdp);
|
||||
t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ;
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
schedule_timeout_interruptible(t);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_quotad - Write cached quota changes into the quota file
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
*/
|
||||
|
||||
int gfs2_quotad(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
unsigned long t;
|
||||
int error;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
/* Update the master statfs file */
|
||||
|
||||
t = sdp->sd_statfs_sync_time +
|
||||
gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
|
||||
|
||||
if (time_after_eq(jiffies, t)) {
|
||||
error = gfs2_statfs_sync(sdp);
|
||||
if (error &&
|
||||
error != -EROFS &&
|
||||
!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
|
||||
fs_err(sdp, "quotad: (1) error=%d\n", error);
|
||||
sdp->sd_statfs_sync_time = jiffies;
|
||||
}
|
||||
|
||||
/* Update quota file */
|
||||
|
||||
t = sdp->sd_quota_sync_time +
|
||||
gfs2_tune_get(sdp, gt_quota_quantum) * HZ;
|
||||
|
||||
if (time_after_eq(jiffies, t)) {
|
||||
error = gfs2_quota_sync(sdp);
|
||||
if (error &&
|
||||
error != -EROFS &&
|
||||
!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
|
||||
fs_err(sdp, "quotad: (2) error=%d\n", error);
|
||||
sdp->sd_quota_sync_time = jiffies;
|
||||
}
|
||||
|
||||
gfs2_quota_scan(sdp);
|
||||
|
||||
t = gfs2_tune_get(sdp, gt_quotad_secs) * HZ;
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
schedule_timeout_interruptible(t);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __DAEMON_DOT_H__
|
||||
#define __DAEMON_DOT_H__
|
||||
|
||||
int gfs2_glockd(void *data);
|
||||
int gfs2_recoverd(void *data);
|
||||
int gfs2_quotad(void *data);
|
||||
|
||||
#endif /* __DAEMON_DOT_H__ */
|
|
@ -36,7 +36,7 @@
|
|||
* the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the
|
||||
* beginning of the leaf block. The dirents reside in leaves when
|
||||
*
|
||||
* dip->i_di.di_flags & GFS2_DIF_EXHASH is true
|
||||
* dip->i_diskflags & GFS2_DIF_EXHASH is true
|
||||
*
|
||||
* Otherwise, the dirents are "linear", within a single stuffed dinode block.
|
||||
*
|
||||
|
@ -128,8 +128,8 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
|
|||
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
|
||||
if (ip->i_di.di_size < offset + size)
|
||||
ip->i_di.di_size = offset + size;
|
||||
if (ip->i_disksize < offset + size)
|
||||
ip->i_disksize = offset + size;
|
||||
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
|
||||
gfs2_dinode_out(ip, dibh->b_data);
|
||||
|
||||
|
@ -226,8 +226,8 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (ip->i_di.di_size < offset + copied)
|
||||
ip->i_di.di_size = offset + copied;
|
||||
if (ip->i_disksize < offset + copied)
|
||||
ip->i_disksize = offset + copied;
|
||||
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
|
||||
|
||||
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
|
||||
|
@ -277,11 +277,11 @@ static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset,
|
|||
int copied = 0;
|
||||
int error = 0;
|
||||
|
||||
if (offset >= ip->i_di.di_size)
|
||||
if (offset >= ip->i_disksize)
|
||||
return 0;
|
||||
|
||||
if (offset + size > ip->i_di.di_size)
|
||||
size = ip->i_di.di_size - offset;
|
||||
if (offset + size > ip->i_disksize)
|
||||
size = ip->i_disksize - offset;
|
||||
|
||||
if (!size)
|
||||
return 0;
|
||||
|
@ -755,12 +755,12 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
|
|||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
int error;
|
||||
|
||||
if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
|
||||
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
|
||||
struct gfs2_leaf *leaf;
|
||||
unsigned hsize = 1 << ip->i_depth;
|
||||
unsigned index;
|
||||
u64 ln;
|
||||
if (hsize * sizeof(u64) != ip->i_di.di_size) {
|
||||
if (hsize * sizeof(u64) != ip->i_disksize) {
|
||||
gfs2_consist_inode(ip);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
@ -858,8 +858,8 @@ static int dir_make_exhash(struct inode *inode)
|
|||
return -ENOSPC;
|
||||
bn = bh->b_blocknr;
|
||||
|
||||
gfs2_assert(sdp, dip->i_di.di_entries < (1 << 16));
|
||||
leaf->lf_entries = cpu_to_be16(dip->i_di.di_entries);
|
||||
gfs2_assert(sdp, dip->i_entries < (1 << 16));
|
||||
leaf->lf_entries = cpu_to_be16(dip->i_entries);
|
||||
|
||||
/* Copy dirents */
|
||||
|
||||
|
@ -905,9 +905,9 @@ static int dir_make_exhash(struct inode *inode)
|
|||
for (x = sdp->sd_hash_ptrs; x--; lp++)
|
||||
*lp = cpu_to_be64(bn);
|
||||
|
||||
dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2;
|
||||
dip->i_disksize = sdp->sd_sb.sb_bsize / 2;
|
||||
gfs2_add_inode_blocks(&dip->i_inode, 1);
|
||||
dip->i_di.di_flags |= GFS2_DIF_EXHASH;
|
||||
dip->i_diskflags |= GFS2_DIF_EXHASH;
|
||||
|
||||
for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
|
||||
dip->i_depth = y;
|
||||
|
@ -1082,7 +1082,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
|
|||
int error = 0;
|
||||
|
||||
hsize = 1 << dip->i_depth;
|
||||
if (hsize * sizeof(u64) != dip->i_di.di_size) {
|
||||
if (hsize * sizeof(u64) != dip->i_disksize) {
|
||||
gfs2_consist_inode(dip);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -1091,7 +1091,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
|
|||
|
||||
buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL);
|
||||
|
||||
for (block = dip->i_di.di_size >> sdp->sd_hash_bsize_shift; block--;) {
|
||||
for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) {
|
||||
error = gfs2_dir_read_data(dip, (char *)buf,
|
||||
block * sdp->sd_hash_bsize,
|
||||
sdp->sd_hash_bsize, 1);
|
||||
|
@ -1370,7 +1370,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
|
|||
unsigned depth = 0;
|
||||
|
||||
hsize = 1 << dip->i_depth;
|
||||
if (hsize * sizeof(u64) != dip->i_di.di_size) {
|
||||
if (hsize * sizeof(u64) != dip->i_disksize) {
|
||||
gfs2_consist_inode(dip);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -1426,10 +1426,10 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
|
|||
int copied = 0;
|
||||
int error;
|
||||
|
||||
if (!dip->i_di.di_entries)
|
||||
if (!dip->i_entries)
|
||||
return 0;
|
||||
|
||||
if (dip->i_di.di_flags & GFS2_DIF_EXHASH)
|
||||
if (dip->i_diskflags & GFS2_DIF_EXHASH)
|
||||
return dir_e_read(inode, offset, opaque, filldir);
|
||||
|
||||
if (!gfs2_is_stuffed(dip)) {
|
||||
|
@ -1453,17 +1453,17 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
|
|||
error = PTR_ERR(dent);
|
||||
goto out;
|
||||
}
|
||||
if (dip->i_di.di_entries != g.offset) {
|
||||
if (dip->i_entries != g.offset) {
|
||||
fs_warn(sdp, "Number of entries corrupt in dir %llu, "
|
||||
"ip->i_di.di_entries (%u) != g.offset (%u)\n",
|
||||
"ip->i_entries (%u) != g.offset (%u)\n",
|
||||
(unsigned long long)dip->i_no_addr,
|
||||
dip->i_di.di_entries,
|
||||
dip->i_entries,
|
||||
g.offset);
|
||||
error = -EIO;
|
||||
goto out;
|
||||
}
|
||||
error = do_filldir_main(dip, offset, opaque, filldir, darr,
|
||||
dip->i_di.di_entries, &copied);
|
||||
dip->i_entries, &copied);
|
||||
out:
|
||||
kfree(darr);
|
||||
}
|
||||
|
@ -1612,7 +1612,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
|
|||
dent = gfs2_init_dirent(inode, dent, name, bh);
|
||||
gfs2_inum_out(nip, dent);
|
||||
dent->de_type = cpu_to_be16(type);
|
||||
if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
|
||||
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
|
||||
leaf = (struct gfs2_leaf *)bh->b_data;
|
||||
be16_add_cpu(&leaf->lf_entries, 1);
|
||||
}
|
||||
|
@ -1621,14 +1621,14 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
|
|||
if (error)
|
||||
break;
|
||||
gfs2_trans_add_bh(ip->i_gl, bh, 1);
|
||||
ip->i_di.di_entries++;
|
||||
ip->i_entries++;
|
||||
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
|
||||
gfs2_dinode_out(ip, bh->b_data);
|
||||
brelse(bh);
|
||||
error = 0;
|
||||
break;
|
||||
}
|
||||
if (!(ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
|
||||
if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) {
|
||||
error = dir_make_exhash(inode);
|
||||
if (error)
|
||||
break;
|
||||
|
@ -1691,7 +1691,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
|
|||
}
|
||||
|
||||
dirent_del(dip, bh, prev, dent);
|
||||
if (dip->i_di.di_flags & GFS2_DIF_EXHASH) {
|
||||
if (dip->i_diskflags & GFS2_DIF_EXHASH) {
|
||||
struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data;
|
||||
u16 entries = be16_to_cpu(leaf->lf_entries);
|
||||
if (!entries)
|
||||
|
@ -1704,10 +1704,10 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (!dip->i_di.di_entries)
|
||||
if (!dip->i_entries)
|
||||
gfs2_consist_inode(dip);
|
||||
gfs2_trans_add_bh(dip->i_gl, bh, 1);
|
||||
dip->i_di.di_entries--;
|
||||
dip->i_entries--;
|
||||
dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME;
|
||||
gfs2_dinode_out(dip, bh->b_data);
|
||||
brelse(bh);
|
||||
|
@ -1748,7 +1748,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
|
|||
gfs2_inum_out(nip, dent);
|
||||
dent->de_type = cpu_to_be16(new_type);
|
||||
|
||||
if (dip->i_di.di_flags & GFS2_DIF_EXHASH) {
|
||||
if (dip->i_diskflags & GFS2_DIF_EXHASH) {
|
||||
brelse(bh);
|
||||
error = gfs2_meta_inode_buffer(dip, &bh);
|
||||
if (error)
|
||||
|
@ -1784,7 +1784,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
|
|||
int error = 0;
|
||||
|
||||
hsize = 1 << dip->i_depth;
|
||||
if (hsize * sizeof(u64) != dip->i_di.di_size) {
|
||||
if (hsize * sizeof(u64) != dip->i_disksize) {
|
||||
gfs2_consist_inode(dip);
|
||||
return -EIO;
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define __DIR_DOT_H__
|
||||
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/crc32.h>
|
||||
|
||||
struct inode;
|
||||
struct gfs2_inode;
|
||||
|
|
|
@ -114,11 +114,11 @@ static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
|
|||
__be64 *eablk, *end;
|
||||
int error;
|
||||
|
||||
error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
|
||||
error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
|
||||
if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
|
||||
error = ea_foreach_i(ip, bh, ea_call, data);
|
||||
goto out;
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (ip->i_di.di_eattr) {
|
||||
if (ip->i_eattr) {
|
||||
struct ea_list ei = { .ei_er = er, .ei_size = 0 };
|
||||
|
||||
error = ea_foreach(ip, ea_list_i, &ei);
|
||||
|
@ -514,7 +514,7 @@ int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
|||
struct gfs2_ea_location el;
|
||||
int error;
|
||||
|
||||
if (!ip->i_di.di_eattr)
|
||||
if (!ip->i_eattr)
|
||||
return -ENODATA;
|
||||
|
||||
error = gfs2_ea_find(ip, er, &el);
|
||||
|
@ -741,7 +741,7 @@ static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
ip->i_di.di_eattr = bh->b_blocknr;
|
||||
ip->i_eattr = bh->b_blocknr;
|
||||
error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
|
||||
|
||||
brelse(bh);
|
||||
|
@ -935,10 +935,10 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
|
|||
int error;
|
||||
int mh_size = sizeof(struct gfs2_meta_header);
|
||||
|
||||
if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
|
||||
if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
|
||||
__be64 *end;
|
||||
|
||||
error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
|
||||
error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT,
|
||||
&indbh);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -972,9 +972,9 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
|
|||
gfs2_buffer_clear_tail(indbh, mh_size);
|
||||
|
||||
eablk = (__be64 *)(indbh->b_data + mh_size);
|
||||
*eablk = cpu_to_be64(ip->i_di.di_eattr);
|
||||
ip->i_di.di_eattr = blk;
|
||||
ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
|
||||
*eablk = cpu_to_be64(ip->i_eattr);
|
||||
ip->i_eattr = blk;
|
||||
ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
|
||||
gfs2_add_inode_blocks(&ip->i_inode, 1);
|
||||
|
||||
eablk++;
|
||||
|
@ -1015,7 +1015,7 @@ static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
|
||||
if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
|
||||
blks++;
|
||||
if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
|
||||
blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
|
||||
|
@ -1040,7 +1040,7 @@ int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
|||
struct gfs2_ea_location el;
|
||||
int error;
|
||||
|
||||
if (!ip->i_di.di_eattr) {
|
||||
if (!ip->i_eattr) {
|
||||
if (er->er_flags & XATTR_REPLACE)
|
||||
return -ENODATA;
|
||||
return ea_init(ip, er);
|
||||
|
@ -1051,7 +1051,7 @@ int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
|||
return error;
|
||||
|
||||
if (el.el_ea) {
|
||||
if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
|
||||
if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
|
||||
brelse(el.el_bh);
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -1145,7 +1145,7 @@ int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
|
|||
struct gfs2_ea_location el;
|
||||
int error;
|
||||
|
||||
if (!ip->i_di.di_eattr)
|
||||
if (!ip->i_eattr)
|
||||
return -ENODATA;
|
||||
|
||||
error = gfs2_ea_find(ip, er, &el);
|
||||
|
@ -1309,7 +1309,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
|
|||
|
||||
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
|
||||
|
||||
error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh);
|
||||
error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -1388,7 +1388,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
|
|||
if (bstart)
|
||||
gfs2_free_meta(ip, bstart, blen);
|
||||
|
||||
ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
|
||||
ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
if (!error) {
|
||||
|
@ -1416,7 +1416,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
|
|||
struct buffer_head *dibh;
|
||||
int error;
|
||||
|
||||
rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
|
||||
rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr);
|
||||
if (!rgd) {
|
||||
gfs2_consist_inode(ip);
|
||||
return -EIO;
|
||||
|
@ -1432,9 +1432,9 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
|
|||
if (error)
|
||||
goto out_gunlock;
|
||||
|
||||
gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
|
||||
gfs2_free_meta(ip, ip->i_eattr, 1);
|
||||
|
||||
ip->i_di.di_eattr = 0;
|
||||
ip->i_eattr = 0;
|
||||
gfs2_add_inode_blocks(&ip->i_inode, -1);
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
|
@ -1479,7 +1479,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
|
|||
if (error)
|
||||
goto out_rindex;
|
||||
|
||||
if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
|
||||
if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
|
||||
error = ea_dealloc_indirect(ip);
|
||||
if (error)
|
||||
goto out_rindex;
|
||||
|
|
307
fs/gfs2/glock.c
307
fs/gfs2/glock.c
|
@ -40,6 +40,7 @@
|
|||
#include "quota.h"
|
||||
#include "super.h"
|
||||
#include "util.h"
|
||||
#include "bmap.h"
|
||||
|
||||
struct gfs2_gl_hash_bucket {
|
||||
struct hlist_head hb_list;
|
||||
|
@ -61,9 +62,10 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int
|
|||
|
||||
static DECLARE_RWSEM(gfs2_umount_flush_sem);
|
||||
static struct dentry *gfs2_root;
|
||||
static struct task_struct *scand_process;
|
||||
static unsigned int scand_secs = 5;
|
||||
static struct workqueue_struct *glock_workqueue;
|
||||
static LIST_HEAD(lru_list);
|
||||
static atomic_t lru_count = ATOMIC_INIT(0);
|
||||
static DEFINE_SPINLOCK(lru_lock);
|
||||
|
||||
#define GFS2_GL_HASH_SHIFT 15
|
||||
#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
|
||||
|
@ -173,6 +175,22 @@ static void gfs2_glock_hold(struct gfs2_glock *gl)
|
|||
atomic_inc(&gl->gl_ref);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
|
||||
* @gl: the glock
|
||||
*
|
||||
*/
|
||||
|
||||
static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
|
||||
{
|
||||
spin_lock(&lru_lock);
|
||||
if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) {
|
||||
list_add_tail(&gl->gl_lru, &lru_list);
|
||||
atomic_inc(&lru_count);
|
||||
}
|
||||
spin_unlock(&lru_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glock_put() - Decrement reference count on glock
|
||||
* @gl: The glock to put
|
||||
|
@ -187,14 +205,23 @@ int gfs2_glock_put(struct gfs2_glock *gl)
|
|||
if (atomic_dec_and_test(&gl->gl_ref)) {
|
||||
hlist_del(&gl->gl_list);
|
||||
write_unlock(gl_lock_addr(gl->gl_hash));
|
||||
spin_lock(&lru_lock);
|
||||
if (!list_empty(&gl->gl_lru)) {
|
||||
list_del_init(&gl->gl_lru);
|
||||
atomic_dec(&lru_count);
|
||||
}
|
||||
spin_unlock(&lru_lock);
|
||||
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
|
||||
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim));
|
||||
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_lru));
|
||||
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
|
||||
glock_free(gl);
|
||||
rv = 1;
|
||||
goto out;
|
||||
}
|
||||
write_unlock(gl_lock_addr(gl->gl_hash));
|
||||
/* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */
|
||||
if (atomic_read(&gl->gl_ref) == 2)
|
||||
gfs2_glock_schedule_for_reclaim(gl);
|
||||
out:
|
||||
return rv;
|
||||
}
|
||||
|
@ -289,10 +316,13 @@ static void gfs2_holder_wake(struct gfs2_holder *gh)
|
|||
* do_promote - promote as many requests as possible on the current queue
|
||||
* @gl: The glock
|
||||
*
|
||||
* Returns: true if there is a blocked holder at the head of the list
|
||||
* Returns: 1 if there is a blocked holder at the head of the list, or 2
|
||||
* if a type specific operation is underway.
|
||||
*/
|
||||
|
||||
static int do_promote(struct gfs2_glock *gl)
|
||||
__releases(&gl->gl_spin)
|
||||
__acquires(&gl->gl_spin)
|
||||
{
|
||||
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
||||
struct gfs2_holder *gh, *tmp;
|
||||
|
@ -310,6 +340,8 @@ static int do_promote(struct gfs2_glock *gl)
|
|||
ret = glops->go_lock(gh);
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (ret) {
|
||||
if (ret == 1)
|
||||
return 2;
|
||||
gh->gh_error = ret;
|
||||
list_del_init(&gh->gh_list);
|
||||
gfs2_holder_wake(gh);
|
||||
|
@ -414,6 +446,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
|
|||
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
||||
struct gfs2_holder *gh;
|
||||
unsigned state = ret & LM_OUT_ST_MASK;
|
||||
int rv;
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
state_change(gl, state);
|
||||
|
@ -468,7 +501,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
|
|||
gfs2_demote_wake(gl);
|
||||
if (state != LM_ST_UNLOCKED) {
|
||||
if (glops->go_xmote_bh) {
|
||||
int rv;
|
||||
spin_unlock(&gl->gl_spin);
|
||||
rv = glops->go_xmote_bh(gl, gh);
|
||||
if (rv == -EAGAIN)
|
||||
|
@ -479,10 +511,13 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
do_promote(gl);
|
||||
rv = do_promote(gl);
|
||||
if (rv == 2)
|
||||
goto out_locked;
|
||||
}
|
||||
out:
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
out_locked:
|
||||
spin_unlock(&gl->gl_spin);
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
@ -511,6 +546,8 @@ static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
|
|||
*/
|
||||
|
||||
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
|
||||
__releases(&gl->gl_spin)
|
||||
__acquires(&gl->gl_spin)
|
||||
{
|
||||
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
|
@ -576,8 +613,11 @@ static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
|
|||
*/
|
||||
|
||||
static void run_queue(struct gfs2_glock *gl, const int nonblock)
|
||||
__releases(&gl->gl_spin)
|
||||
__acquires(&gl->gl_spin)
|
||||
{
|
||||
struct gfs2_holder *gh = NULL;
|
||||
int ret;
|
||||
|
||||
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
|
||||
return;
|
||||
|
@ -596,8 +636,11 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock)
|
|||
} else {
|
||||
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
|
||||
gfs2_demote_wake(gl);
|
||||
if (do_promote(gl) == 0)
|
||||
ret = do_promote(gl);
|
||||
if (ret == 0)
|
||||
goto out;
|
||||
if (ret == 2)
|
||||
return;
|
||||
gh = find_first_waiter(gl);
|
||||
gl->gl_target = gh->gh_state;
|
||||
if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
|
||||
|
@ -820,7 +863,7 @@ static void wait_on_demote(struct gfs2_glock *gl)
|
|||
*/
|
||||
|
||||
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
|
||||
int remote, unsigned long delay)
|
||||
unsigned long delay)
|
||||
{
|
||||
int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
|
||||
|
||||
|
@ -828,9 +871,6 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
|
|||
if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
|
||||
gl->gl_demote_state = state;
|
||||
gl->gl_demote_time = jiffies;
|
||||
if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
|
||||
gl->gl_object)
|
||||
gfs2_glock_schedule_for_reclaim(gl);
|
||||
} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
|
||||
gl->gl_demote_state != state) {
|
||||
gl->gl_demote_state = LM_ST_UNLOCKED;
|
||||
|
@ -877,6 +917,8 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
|
|||
*/
|
||||
|
||||
static inline void add_to_queue(struct gfs2_holder *gh)
|
||||
__releases(&gl->gl_spin)
|
||||
__acquires(&gl->gl_spin)
|
||||
{
|
||||
struct gfs2_glock *gl = gh->gh_gl;
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
|
@ -998,7 +1040,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
|||
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (gh->gh_flags & GL_NOCACHE)
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0);
|
||||
|
||||
list_del_init(&gh->gh_list);
|
||||
if (find_first_holder(gl) == NULL) {
|
||||
|
@ -1269,12 +1311,26 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
|
|||
delay = gl->gl_ops->go_min_hold_time;
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
handle_callback(gl, state, 1, delay);
|
||||
handle_callback(gl, state, delay);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
static void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid)
|
||||
{
|
||||
struct gfs2_jdesc *jd;
|
||||
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||
if (jd->jd_jid != jid)
|
||||
continue;
|
||||
jd->jd_dirty = 1;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glock_cb - Callback used by locking module
|
||||
* @sdp: Pointer to the superblock
|
||||
|
@ -1338,80 +1394,83 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
|
|||
* Returns: 1 if it's ok
|
||||
*/
|
||||
|
||||
static int demote_ok(struct gfs2_glock *gl)
|
||||
static int demote_ok(const struct gfs2_glock *gl)
|
||||
{
|
||||
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
||||
int demote = 1;
|
||||
|
||||
if (test_bit(GLF_STICKY, &gl->gl_flags))
|
||||
demote = 0;
|
||||
else if (glops->go_demote_ok)
|
||||
demote = glops->go_demote_ok(gl);
|
||||
|
||||
return demote;
|
||||
if (gl->gl_state == LM_ST_UNLOCKED)
|
||||
return 0;
|
||||
if (!list_empty(&gl->gl_holders))
|
||||
return 0;
|
||||
if (glops->go_demote_ok)
|
||||
return glops->go_demote_ok(gl);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
|
||||
* @gl: the glock
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
|
||||
spin_lock(&sdp->sd_reclaim_lock);
|
||||
if (list_empty(&gl->gl_reclaim)) {
|
||||
gfs2_glock_hold(gl);
|
||||
list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
|
||||
atomic_inc(&sdp->sd_reclaim_count);
|
||||
spin_unlock(&sdp->sd_reclaim_lock);
|
||||
wake_up(&sdp->sd_reclaim_wq);
|
||||
} else
|
||||
spin_unlock(&sdp->sd_reclaim_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
|
||||
* @sdp: the filesystem
|
||||
*
|
||||
* Called from gfs2_glockd() glock reclaim daemon, or when promoting a
|
||||
* different glock and we notice that there are a lot of glocks in the
|
||||
* reclaim list.
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
|
||||
static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
|
||||
{
|
||||
struct gfs2_glock *gl;
|
||||
int done_callback = 0;
|
||||
int may_demote;
|
||||
int nr_skipped = 0;
|
||||
int got_ref = 0;
|
||||
LIST_HEAD(skipped);
|
||||
|
||||
spin_lock(&sdp->sd_reclaim_lock);
|
||||
if (list_empty(&sdp->sd_reclaim_list)) {
|
||||
spin_unlock(&sdp->sd_reclaim_lock);
|
||||
return;
|
||||
if (nr == 0)
|
||||
goto out;
|
||||
|
||||
if (!(gfp_mask & __GFP_FS))
|
||||
return -1;
|
||||
|
||||
spin_lock(&lru_lock);
|
||||
while(nr && !list_empty(&lru_list)) {
|
||||
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
|
||||
list_del_init(&gl->gl_lru);
|
||||
atomic_dec(&lru_count);
|
||||
|
||||
/* Test for being demotable */
|
||||
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
|
||||
gfs2_glock_hold(gl);
|
||||
got_ref = 1;
|
||||
spin_unlock(&lru_lock);
|
||||
spin_lock(&gl->gl_spin);
|
||||
may_demote = demote_ok(gl);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
if (may_demote) {
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0);
|
||||
nr--;
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
spin_lock(&lru_lock);
|
||||
if (may_demote)
|
||||
continue;
|
||||
}
|
||||
if (list_empty(&gl->gl_lru) &&
|
||||
(atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
|
||||
nr_skipped++;
|
||||
list_add(&gl->gl_lru, &skipped);
|
||||
}
|
||||
if (got_ref) {
|
||||
spin_unlock(&lru_lock);
|
||||
gfs2_glock_put(gl);
|
||||
spin_lock(&lru_lock);
|
||||
got_ref = 0;
|
||||
}
|
||||
}
|
||||
gl = list_entry(sdp->sd_reclaim_list.next,
|
||||
struct gfs2_glock, gl_reclaim);
|
||||
list_del_init(&gl->gl_reclaim);
|
||||
spin_unlock(&sdp->sd_reclaim_lock);
|
||||
|
||||
atomic_dec(&sdp->sd_reclaim_count);
|
||||
atomic_inc(&sdp->sd_reclaimed);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (find_first_holder(gl) == NULL &&
|
||||
gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) {
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
|
||||
done_callback = 1;
|
||||
}
|
||||
spin_unlock(&gl->gl_spin);
|
||||
if (!done_callback ||
|
||||
queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
list_splice(&skipped, &lru_list);
|
||||
atomic_add(nr_skipped, &lru_count);
|
||||
spin_unlock(&lru_lock);
|
||||
out:
|
||||
return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
|
||||
}
|
||||
|
||||
static struct shrinker glock_shrinker = {
|
||||
.shrink = gfs2_shrink_glock_memory,
|
||||
.seeks = DEFAULT_SEEKS,
|
||||
};
|
||||
|
||||
/**
|
||||
* examine_bucket - Call a function for glock in a hash bucket
|
||||
* @examiner: the function
|
||||
|
@ -1456,26 +1515,6 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
|
|||
return has_entries;
|
||||
}
|
||||
|
||||
/**
|
||||
* scan_glock - look at a glock and see if we can reclaim it
|
||||
* @gl: the glock to look at
|
||||
*
|
||||
*/
|
||||
|
||||
static void scan_glock(struct gfs2_glock *gl)
|
||||
{
|
||||
if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
|
||||
return;
|
||||
if (test_bit(GLF_LOCK, &gl->gl_flags))
|
||||
return;
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (find_first_holder(gl) == NULL &&
|
||||
gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
|
||||
gfs2_glock_schedule_for_reclaim(gl);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_glock - look at a glock and see if we can free it from glock cache
|
||||
* @gl: the glock to look at
|
||||
|
@ -1484,23 +1523,16 @@ static void scan_glock(struct gfs2_glock *gl)
|
|||
|
||||
static void clear_glock(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
int released;
|
||||
|
||||
spin_lock(&sdp->sd_reclaim_lock);
|
||||
if (!list_empty(&gl->gl_reclaim)) {
|
||||
list_del_init(&gl->gl_reclaim);
|
||||
atomic_dec(&sdp->sd_reclaim_count);
|
||||
spin_unlock(&sdp->sd_reclaim_lock);
|
||||
released = gfs2_glock_put(gl);
|
||||
gfs2_assert(sdp, !released);
|
||||
} else {
|
||||
spin_unlock(&sdp->sd_reclaim_lock);
|
||||
spin_lock(&lru_lock);
|
||||
if (!list_empty(&gl->gl_lru)) {
|
||||
list_del_init(&gl->gl_lru);
|
||||
atomic_dec(&lru_count);
|
||||
}
|
||||
spin_unlock(&lru_lock);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
gfs2_glock_hold(gl);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
|
@ -1548,6 +1580,20 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
|
|||
}
|
||||
}
|
||||
|
||||
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
|
||||
{
|
||||
struct gfs2_glock *gl = ip->i_gl;
|
||||
int ret;
|
||||
|
||||
ret = gfs2_truncatei_resume(ip);
|
||||
gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
run_queue(gl, 1);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
}
|
||||
|
||||
static const char *state2str(unsigned state)
|
||||
{
|
||||
switch(state) {
|
||||
|
@ -1623,8 +1669,6 @@ static const char *gflags2str(char *buf, const unsigned long *gflags)
|
|||
char *p = buf;
|
||||
if (test_bit(GLF_LOCK, gflags))
|
||||
*p++ = 'l';
|
||||
if (test_bit(GLF_STICKY, gflags))
|
||||
*p++ = 's';
|
||||
if (test_bit(GLF_DEMOTE, gflags))
|
||||
*p++ = 'D';
|
||||
if (test_bit(GLF_PENDING_DEMOTE, gflags))
|
||||
|
@ -1743,34 +1787,6 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
|
|||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_scand - Look for cached glocks and inodes to toss from memory
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
* One of these daemons runs, finding candidates to add to sd_reclaim_list.
|
||||
* See gfs2_glockd()
|
||||
*/
|
||||
|
||||
static int gfs2_scand(void *data)
|
||||
{
|
||||
unsigned x;
|
||||
unsigned delay;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
|
||||
examine_bucket(scan_glock, NULL, x);
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
delay = scand_secs;
|
||||
if (delay < 1)
|
||||
delay = 1;
|
||||
schedule_timeout_interruptible(delay * HZ);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int __init gfs2_glock_init(void)
|
||||
{
|
||||
|
@ -1784,28 +1800,21 @@ int __init gfs2_glock_init(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
|
||||
if (IS_ERR(scand_process))
|
||||
return PTR_ERR(scand_process);
|
||||
|
||||
glock_workqueue = create_workqueue("glock_workqueue");
|
||||
if (IS_ERR(glock_workqueue)) {
|
||||
kthread_stop(scand_process);
|
||||
if (IS_ERR(glock_workqueue))
|
||||
return PTR_ERR(glock_workqueue);
|
||||
}
|
||||
|
||||
register_shrinker(&glock_shrinker);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gfs2_glock_exit(void)
|
||||
{
|
||||
unregister_shrinker(&glock_shrinker);
|
||||
destroy_workqueue(glock_workqueue);
|
||||
kthread_stop(scand_process);
|
||||
}
|
||||
|
||||
module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
|
||||
|
||||
static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
|
||||
{
|
||||
struct gfs2_glock *gl;
|
||||
|
|
|
@ -129,9 +129,9 @@ int gfs2_lvb_hold(struct gfs2_glock *gl);
|
|||
void gfs2_lvb_unhold(struct gfs2_glock *gl);
|
||||
|
||||
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
|
||||
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
|
||||
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
|
||||
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
|
||||
void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
|
||||
|
||||
int __init gfs2_glock_init(void);
|
||||
void gfs2_glock_exit(void);
|
||||
|
|
|
@ -201,19 +201,12 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
|
|||
* Returns: 1 if it's ok
|
||||
*/
|
||||
|
||||
static int inode_go_demote_ok(struct gfs2_glock *gl)
|
||||
static int inode_go_demote_ok(const struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
int demote = 0;
|
||||
|
||||
if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
|
||||
demote = 1;
|
||||
else if (!sdp->sd_args.ar_localcaching &&
|
||||
time_after_eq(jiffies, gl->gl_stamp +
|
||||
gfs2_tune_get(sdp, gt_demote_secs) * HZ))
|
||||
demote = 1;
|
||||
|
||||
return demote;
|
||||
if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -227,6 +220,7 @@ static int inode_go_demote_ok(struct gfs2_glock *gl)
|
|||
static int inode_go_lock(struct gfs2_holder *gh)
|
||||
{
|
||||
struct gfs2_glock *gl = gh->gh_gl;
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
int error = 0;
|
||||
|
||||
|
@ -239,10 +233,16 @@ static int inode_go_lock(struct gfs2_holder *gh)
|
|||
return error;
|
||||
}
|
||||
|
||||
if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
|
||||
if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
|
||||
(gl->gl_state == LM_ST_EXCLUSIVE) &&
|
||||
(gh->gh_state == LM_ST_EXCLUSIVE))
|
||||
error = gfs2_truncatei_resume(ip);
|
||||
(gh->gh_state == LM_ST_EXCLUSIVE)) {
|
||||
spin_lock(&sdp->sd_trunc_lock);
|
||||
if (list_empty(&ip->i_trunc_list))
|
||||
list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
|
||||
spin_unlock(&sdp->sd_trunc_lock);
|
||||
wake_up(&sdp->sd_quota_wait);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -260,10 +260,13 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
|
|||
const struct gfs2_inode *ip = gl->gl_object;
|
||||
if (ip == NULL)
|
||||
return 0;
|
||||
gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n",
|
||||
gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n",
|
||||
(unsigned long long)ip->i_no_formal_ino,
|
||||
(unsigned long long)ip->i_no_addr,
|
||||
IF2DT(ip->i_inode.i_mode), ip->i_flags);
|
||||
IF2DT(ip->i_inode.i_mode), ip->i_flags,
|
||||
(unsigned int)ip->i_diskflags,
|
||||
(unsigned long long)ip->i_inode.i_size,
|
||||
(unsigned long long)ip->i_disksize);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -274,7 +277,7 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
|
|||
* Returns: 1 if it's ok
|
||||
*/
|
||||
|
||||
static int rgrp_go_demote_ok(struct gfs2_glock *gl)
|
||||
static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
|
||||
{
|
||||
return !gl->gl_aspace->i_mapping->nrpages;
|
||||
}
|
||||
|
@ -318,7 +321,9 @@ static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
|
|||
const struct gfs2_rgrpd *rgd = gl->gl_object;
|
||||
if (rgd == NULL)
|
||||
return 0;
|
||||
gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr);
|
||||
gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u\n",
|
||||
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
|
||||
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -373,6 +378,18 @@ static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* trans_go_demote_ok
|
||||
* @gl: the glock
|
||||
*
|
||||
* Always returns 0
|
||||
*/
|
||||
|
||||
static int trans_go_demote_ok(const struct gfs2_glock *gl)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
|
||||
* @gl: the glock
|
||||
|
@ -380,7 +397,7 @@ static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
|
|||
* Returns: 1 if it's ok
|
||||
*/
|
||||
|
||||
static int quota_go_demote_ok(struct gfs2_glock *gl)
|
||||
static int quota_go_demote_ok(const struct gfs2_glock *gl)
|
||||
{
|
||||
return !atomic_read(&gl->gl_lvb_count);
|
||||
}
|
||||
|
@ -414,6 +431,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
|||
const struct gfs2_glock_operations gfs2_trans_glops = {
|
||||
.go_xmote_th = trans_go_sync,
|
||||
.go_xmote_bh = trans_go_xmote_bh,
|
||||
.go_demote_ok = trans_go_demote_ok,
|
||||
.go_type = LM_TYPE_NONDISK,
|
||||
};
|
||||
|
||||
|
|
|
@ -68,12 +68,6 @@ struct gfs2_bitmap {
|
|||
u32 bi_len;
|
||||
};
|
||||
|
||||
struct gfs2_rgrp_host {
|
||||
u32 rg_free;
|
||||
u32 rg_dinodes;
|
||||
u64 rg_igeneration;
|
||||
};
|
||||
|
||||
struct gfs2_rgrpd {
|
||||
struct list_head rd_list; /* Link with superblock */
|
||||
struct list_head rd_list_mru;
|
||||
|
@ -83,14 +77,16 @@ struct gfs2_rgrpd {
|
|||
u32 rd_length; /* length of rgrp header in fs blocks */
|
||||
u32 rd_data; /* num of data blocks in rgrp */
|
||||
u32 rd_bitbytes; /* number of bytes in data bitmaps */
|
||||
struct gfs2_rgrp_host rd_rg;
|
||||
struct gfs2_bitmap *rd_bits;
|
||||
unsigned int rd_bh_count;
|
||||
struct mutex rd_mutex;
|
||||
u32 rd_free;
|
||||
u32 rd_free_clone;
|
||||
u32 rd_dinodes;
|
||||
u64 rd_igeneration;
|
||||
struct gfs2_bitmap *rd_bits;
|
||||
struct mutex rd_mutex;
|
||||
struct gfs2_log_element rd_le;
|
||||
u32 rd_last_alloc;
|
||||
struct gfs2_sbd *rd_sbd;
|
||||
unsigned int rd_bh_count;
|
||||
u32 rd_last_alloc;
|
||||
unsigned char rd_flags;
|
||||
#define GFS2_RDF_CHECK 0x01 /* Need to check for unlinked inodes */
|
||||
#define GFS2_RDF_NOALLOC 0x02 /* rg prohibits allocation */
|
||||
|
@ -129,7 +125,7 @@ struct gfs2_glock_operations {
|
|||
void (*go_xmote_th) (struct gfs2_glock *gl);
|
||||
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
|
||||
void (*go_inval) (struct gfs2_glock *gl, int flags);
|
||||
int (*go_demote_ok) (struct gfs2_glock *gl);
|
||||
int (*go_demote_ok) (const struct gfs2_glock *gl);
|
||||
int (*go_lock) (struct gfs2_holder *gh);
|
||||
void (*go_unlock) (struct gfs2_holder *gh);
|
||||
int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
|
||||
|
@ -159,7 +155,6 @@ struct gfs2_holder {
|
|||
|
||||
enum {
|
||||
GLF_LOCK = 1,
|
||||
GLF_STICKY = 2,
|
||||
GLF_DEMOTE = 3,
|
||||
GLF_PENDING_DEMOTE = 4,
|
||||
GLF_DEMOTE_IN_PROGRESS = 5,
|
||||
|
@ -194,7 +189,7 @@ struct gfs2_glock {
|
|||
unsigned long gl_tchange;
|
||||
void *gl_object;
|
||||
|
||||
struct list_head gl_reclaim;
|
||||
struct list_head gl_lru;
|
||||
|
||||
struct gfs2_sbd *gl_sbd;
|
||||
|
||||
|
@ -233,29 +228,24 @@ enum {
|
|||
GIF_USER = 4, /* user inode, not metadata addr space */
|
||||
};
|
||||
|
||||
struct gfs2_dinode_host {
|
||||
u64 di_size; /* number of bytes in file */
|
||||
u64 di_generation; /* generation number for NFS */
|
||||
u32 di_flags; /* GFS2_DIF_... */
|
||||
/* These only apply to directories */
|
||||
u32 di_entries; /* The number of entries in the directory */
|
||||
u64 di_eattr; /* extended attribute block number */
|
||||
};
|
||||
|
||||
struct gfs2_inode {
|
||||
struct inode i_inode;
|
||||
u64 i_no_addr;
|
||||
u64 i_no_formal_ino;
|
||||
u64 i_generation;
|
||||
u64 i_eattr;
|
||||
loff_t i_disksize;
|
||||
unsigned long i_flags; /* GIF_... */
|
||||
|
||||
struct gfs2_dinode_host i_di; /* To be replaced by ref to block */
|
||||
|
||||
struct gfs2_glock *i_gl; /* Move into i_gh? */
|
||||
struct gfs2_holder i_iopen_gh;
|
||||
struct gfs2_holder i_gh; /* for prepare/commit_write only */
|
||||
struct gfs2_alloc *i_alloc;
|
||||
u64 i_goal; /* goal block for allocations */
|
||||
struct rw_semaphore i_rw_mutex;
|
||||
struct list_head i_trunc_list;
|
||||
u32 i_entries;
|
||||
u32 i_diskflags;
|
||||
u8 i_height;
|
||||
u8 i_depth;
|
||||
};
|
||||
|
@ -406,13 +396,11 @@ struct gfs2_args {
|
|||
struct gfs2_tune {
|
||||
spinlock_t gt_spin;
|
||||
|
||||
unsigned int gt_demote_secs; /* Cache retention for unheld glock */
|
||||
unsigned int gt_incore_log_blocks;
|
||||
unsigned int gt_log_flush_secs;
|
||||
|
||||
unsigned int gt_recoverd_secs;
|
||||
unsigned int gt_logd_secs;
|
||||
unsigned int gt_quotad_secs;
|
||||
|
||||
unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
|
||||
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
|
||||
|
@ -488,10 +476,6 @@ struct gfs2_sbd {
|
|||
/* Lock Stuff */
|
||||
|
||||
struct lm_lockstruct sd_lockstruct;
|
||||
struct list_head sd_reclaim_list;
|
||||
spinlock_t sd_reclaim_lock;
|
||||
wait_queue_head_t sd_reclaim_wq;
|
||||
atomic_t sd_reclaim_count;
|
||||
struct gfs2_holder sd_live_gh;
|
||||
struct gfs2_glock *sd_rename_gl;
|
||||
struct gfs2_glock *sd_trans_gl;
|
||||
|
@ -519,7 +503,6 @@ struct gfs2_sbd {
|
|||
spinlock_t sd_statfs_spin;
|
||||
struct gfs2_statfs_change_host sd_statfs_master;
|
||||
struct gfs2_statfs_change_host sd_statfs_local;
|
||||
unsigned long sd_statfs_sync_time;
|
||||
|
||||
/* Resource group stuff */
|
||||
|
||||
|
@ -552,8 +535,6 @@ struct gfs2_sbd {
|
|||
struct task_struct *sd_recoverd_process;
|
||||
struct task_struct *sd_logd_process;
|
||||
struct task_struct *sd_quotad_process;
|
||||
struct task_struct *sd_glockd_process[GFS2_GLOCKD_MAX];
|
||||
unsigned int sd_glockd_num;
|
||||
|
||||
/* Quota stuff */
|
||||
|
||||
|
@ -561,13 +542,15 @@ struct gfs2_sbd {
|
|||
atomic_t sd_quota_count;
|
||||
spinlock_t sd_quota_spin;
|
||||
struct mutex sd_quota_mutex;
|
||||
wait_queue_head_t sd_quota_wait;
|
||||
struct list_head sd_trunc_list;
|
||||
spinlock_t sd_trunc_lock;
|
||||
|
||||
unsigned int sd_quota_slots;
|
||||
unsigned int sd_quota_chunks;
|
||||
unsigned char **sd_quota_bitmap;
|
||||
|
||||
u64 sd_quota_sync_gen;
|
||||
unsigned long sd_quota_sync_time;
|
||||
|
||||
/* Log stuff */
|
||||
|
||||
|
@ -624,10 +607,6 @@ struct gfs2_sbd {
|
|||
struct mutex sd_freeze_lock;
|
||||
unsigned int sd_freeze_count;
|
||||
|
||||
/* Counters */
|
||||
|
||||
atomic_t sd_reclaimed;
|
||||
|
||||
char sd_fsname[GFS2_FSNAME_LEN];
|
||||
char sd_table_name[GFS2_FSNAME_LEN];
|
||||
char sd_proto_name[GFS2_FSNAME_LEN];
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include "log.h"
|
||||
#include "meta_io.h"
|
||||
#include "ops_address.h"
|
||||
#include "ops_inode.h"
|
||||
#include "quota.h"
|
||||
#include "rgrp.h"
|
||||
#include "trans.h"
|
||||
|
@ -248,7 +247,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
|
|||
|
||||
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
|
||||
{
|
||||
struct gfs2_dinode_host *di = &ip->i_di;
|
||||
const struct gfs2_dinode *str = buf;
|
||||
struct timespec atime;
|
||||
u16 height, depth;
|
||||
|
@ -274,8 +272,8 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
|
|||
* to do that.
|
||||
*/
|
||||
ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
|
||||
di->di_size = be64_to_cpu(str->di_size);
|
||||
i_size_write(&ip->i_inode, di->di_size);
|
||||
ip->i_disksize = be64_to_cpu(str->di_size);
|
||||
i_size_write(&ip->i_inode, ip->i_disksize);
|
||||
gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
|
||||
atime.tv_sec = be64_to_cpu(str->di_atime);
|
||||
atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
|
||||
|
@ -287,9 +285,9 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
|
|||
ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
|
||||
|
||||
ip->i_goal = be64_to_cpu(str->di_goal_meta);
|
||||
di->di_generation = be64_to_cpu(str->di_generation);
|
||||
ip->i_generation = be64_to_cpu(str->di_generation);
|
||||
|
||||
di->di_flags = be32_to_cpu(str->di_flags);
|
||||
ip->i_diskflags = be32_to_cpu(str->di_flags);
|
||||
gfs2_set_inode_flags(&ip->i_inode);
|
||||
height = be16_to_cpu(str->di_height);
|
||||
if (unlikely(height > GFS2_MAX_META_HEIGHT))
|
||||
|
@ -300,9 +298,9 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
|
|||
if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
|
||||
goto corrupt;
|
||||
ip->i_depth = (u8)depth;
|
||||
di->di_entries = be32_to_cpu(str->di_entries);
|
||||
ip->i_entries = be32_to_cpu(str->di_entries);
|
||||
|
||||
di->di_eattr = be64_to_cpu(str->di_eattr);
|
||||
ip->i_eattr = be64_to_cpu(str->di_eattr);
|
||||
if (S_ISREG(ip->i_inode.i_mode))
|
||||
gfs2_set_aops(&ip->i_inode);
|
||||
|
||||
|
@ -388,7 +386,6 @@ int gfs2_dinode_dealloc(struct gfs2_inode *ip)
|
|||
gfs2_free_di(rgd, ip);
|
||||
|
||||
gfs2_trans_end(sdp);
|
||||
clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
|
||||
|
||||
out_rg_gunlock:
|
||||
gfs2_glock_dq_uninit(&al->al_rgd_gh);
|
||||
|
@ -690,7 +687,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
|
|||
return error;
|
||||
}
|
||||
|
||||
if (dip->i_di.di_entries == (u32)-1)
|
||||
if (dip->i_entries == (u32)-1)
|
||||
return -EFBIG;
|
||||
if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
|
||||
return -EMLINK;
|
||||
|
@ -790,11 +787,11 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
|
|||
di->di_flags = 0;
|
||||
|
||||
if (S_ISREG(mode)) {
|
||||
if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
|
||||
if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) ||
|
||||
gfs2_tune_get(sdp, gt_new_files_jdata))
|
||||
di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
|
||||
} else if (S_ISDIR(mode)) {
|
||||
di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
|
||||
di->di_flags |= cpu_to_be32(dip->i_diskflags &
|
||||
GFS2_DIF_INHERIT_JDATA);
|
||||
}
|
||||
|
||||
|
@ -1068,7 +1065,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
|
|||
struct qstr dotname;
|
||||
int error;
|
||||
|
||||
if (ip->i_di.di_entries != 2) {
|
||||
if (ip->i_entries != 2) {
|
||||
if (gfs2_consist_inode(ip))
|
||||
gfs2_dinode_print(ip);
|
||||
return -EIO;
|
||||
|
@ -1168,7 +1165,7 @@ int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
|
|||
return error;
|
||||
}
|
||||
|
||||
if (!ip->i_di.di_size) {
|
||||
if (!ip->i_disksize) {
|
||||
gfs2_consist_inode(ip);
|
||||
error = -EIO;
|
||||
goto out;
|
||||
|
@ -1178,7 +1175,7 @@ int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
|
|||
if (error)
|
||||
goto out;
|
||||
|
||||
x = ip->i_di.di_size + 1;
|
||||
x = ip->i_disksize + 1;
|
||||
if (x > *len) {
|
||||
*buf = kmalloc(x, GFP_NOFS);
|
||||
if (!*buf) {
|
||||
|
@ -1242,7 +1239,6 @@ int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
|
|||
|
||||
void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
|
||||
{
|
||||
const struct gfs2_dinode_host *di = &ip->i_di;
|
||||
struct gfs2_dinode *str = buf;
|
||||
|
||||
str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
|
||||
|
@ -1256,7 +1252,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
|
|||
str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
|
||||
str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
|
||||
str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
|
||||
str->di_size = cpu_to_be64(di->di_size);
|
||||
str->di_size = cpu_to_be64(ip->i_disksize);
|
||||
str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
|
||||
str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
|
||||
str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
|
||||
|
@ -1264,17 +1260,17 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
|
|||
|
||||
str->di_goal_meta = cpu_to_be64(ip->i_goal);
|
||||
str->di_goal_data = cpu_to_be64(ip->i_goal);
|
||||
str->di_generation = cpu_to_be64(di->di_generation);
|
||||
str->di_generation = cpu_to_be64(ip->i_generation);
|
||||
|
||||
str->di_flags = cpu_to_be32(di->di_flags);
|
||||
str->di_flags = cpu_to_be32(ip->i_diskflags);
|
||||
str->di_height = cpu_to_be16(ip->i_height);
|
||||
str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
|
||||
!(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
|
||||
!(ip->i_diskflags & GFS2_DIF_EXHASH) ?
|
||||
GFS2_FORMAT_DE : 0);
|
||||
str->di_depth = cpu_to_be16(ip->i_depth);
|
||||
str->di_entries = cpu_to_be32(di->di_entries);
|
||||
str->di_entries = cpu_to_be32(ip->i_entries);
|
||||
|
||||
str->di_eattr = cpu_to_be64(di->di_eattr);
|
||||
str->di_eattr = cpu_to_be64(ip->i_eattr);
|
||||
str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
|
||||
str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
|
||||
str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
|
||||
|
@ -1282,22 +1278,21 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
|
|||
|
||||
void gfs2_dinode_print(const struct gfs2_inode *ip)
|
||||
{
|
||||
const struct gfs2_dinode_host *di = &ip->i_di;
|
||||
|
||||
printk(KERN_INFO " no_formal_ino = %llu\n",
|
||||
(unsigned long long)ip->i_no_formal_ino);
|
||||
printk(KERN_INFO " no_addr = %llu\n",
|
||||
(unsigned long long)ip->i_no_addr);
|
||||
printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
|
||||
printk(KERN_INFO " i_disksize = %llu\n",
|
||||
(unsigned long long)ip->i_disksize);
|
||||
printk(KERN_INFO " blocks = %llu\n",
|
||||
(unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
|
||||
printk(KERN_INFO " i_goal = %llu\n",
|
||||
(unsigned long long)ip->i_goal);
|
||||
printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
|
||||
printk(KERN_INFO " i_diskflags = 0x%.8X\n", ip->i_diskflags);
|
||||
printk(KERN_INFO " i_height = %u\n", ip->i_height);
|
||||
printk(KERN_INFO " i_depth = %u\n", ip->i_depth);
|
||||
printk(KERN_INFO " di_entries = %u\n", di->di_entries);
|
||||
printk(KERN_INFO " di_eattr = %llu\n",
|
||||
(unsigned long long)di->di_eattr);
|
||||
printk(KERN_INFO " i_entries = %u\n", ip->i_entries);
|
||||
printk(KERN_INFO " i_eattr = %llu\n",
|
||||
(unsigned long long)ip->i_eattr);
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#ifndef __INODE_DOT_H__
|
||||
#define __INODE_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include "util.h"
|
||||
|
||||
static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
|
||||
|
@ -19,7 +20,7 @@ static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
|
|||
|
||||
static inline int gfs2_is_jdata(const struct gfs2_inode *ip)
|
||||
{
|
||||
return ip->i_di.di_flags & GFS2_DIF_JDATA;
|
||||
return ip->i_diskflags & GFS2_DIF_JDATA;
|
||||
}
|
||||
|
||||
static inline int gfs2_is_writeback(const struct gfs2_inode *ip)
|
||||
|
@ -97,5 +98,15 @@ struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
|
|||
void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
|
||||
void gfs2_dinode_print(const struct gfs2_inode *ip);
|
||||
|
||||
extern const struct inode_operations gfs2_file_iops;
|
||||
extern const struct inode_operations gfs2_dir_iops;
|
||||
extern const struct inode_operations gfs2_symlink_iops;
|
||||
extern const struct file_operations gfs2_file_fops;
|
||||
extern const struct file_operations gfs2_dir_fops;
|
||||
extern const struct file_operations gfs2_file_fops_nolock;
|
||||
extern const struct file_operations gfs2_dir_fops_nolock;
|
||||
|
||||
extern void gfs2_set_inode_flags(struct inode *inode);
|
||||
|
||||
#endif /* __INODE_DOT_H__ */
|
||||
|
||||
|
|
|
@ -194,17 +194,25 @@ static void gdlm_unmount(void *lockspace)
|
|||
static void gdlm_recovery_done(void *lockspace, unsigned int jid,
|
||||
unsigned int message)
|
||||
{
|
||||
char env_jid[20];
|
||||
char env_status[20];
|
||||
char *envp[] = { env_jid, env_status, NULL };
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
ls->recover_jid_done = jid;
|
||||
ls->recover_jid_status = message;
|
||||
kobject_uevent(&ls->kobj, KOBJ_CHANGE);
|
||||
sprintf(env_jid, "JID=%d", jid);
|
||||
sprintf(env_status, "RECOVERY=%s",
|
||||
message == LM_RD_SUCCESS ? "Done" : "Failed");
|
||||
kobject_uevent_env(&ls->kobj, KOBJ_CHANGE, envp);
|
||||
}
|
||||
|
||||
static void gdlm_others_may_mount(void *lockspace)
|
||||
{
|
||||
char *message = "FIRSTMOUNT=Done";
|
||||
char *envp[] = { message, NULL };
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
ls->first_done = 1;
|
||||
kobject_uevent(&ls->kobj, KOBJ_CHANGE);
|
||||
kobject_uevent_env(&ls->kobj, KOBJ_CHANGE, envp);
|
||||
}
|
||||
|
||||
/* Userspace gets the offline uevent, blocks new gfs locks on
|
||||
|
|
|
@ -195,9 +195,23 @@ void gdlm_kobject_release(struct gdlm_ls *ls)
|
|||
kobject_put(&ls->kobj);
|
||||
}
|
||||
|
||||
static int gdlm_uevent(struct kset *kset, struct kobject *kobj,
|
||||
struct kobj_uevent_env *env)
|
||||
{
|
||||
struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
|
||||
add_uevent_var(env, "LOCKTABLE=%s:%s", ls->clustername, ls->fsname);
|
||||
add_uevent_var(env, "LOCKPROTO=lock_dlm");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kset_uevent_ops gdlm_uevent_ops = {
|
||||
.uevent = gdlm_uevent,
|
||||
};
|
||||
|
||||
|
||||
int gdlm_sysfs_init(void)
|
||||
{
|
||||
gdlm_kset = kset_create_and_add("lock_dlm", NULL, kernel_kobj);
|
||||
gdlm_kset = kset_create_and_add("lock_dlm", &gdlm_uevent_ops, kernel_kobj);
|
||||
if (!gdlm_kset) {
|
||||
printk(KERN_WARNING "%s: can not create kset\n", __func__);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "ops_fstype.h"
|
||||
#include "super.h"
|
||||
#include "sys.h"
|
||||
#include "util.h"
|
||||
#include "glock.h"
|
||||
|
@ -30,6 +30,7 @@ static void gfs2_init_inode_once(void *foo)
|
|||
|
||||
inode_init_once(&ip->i_inode);
|
||||
init_rwsem(&ip->i_rw_mutex);
|
||||
INIT_LIST_HEAD(&ip->i_trunc_list);
|
||||
ip->i_alloc = NULL;
|
||||
}
|
||||
|
||||
|
@ -42,7 +43,7 @@ static void gfs2_init_glock_once(void *foo)
|
|||
INIT_LIST_HEAD(&gl->gl_holders);
|
||||
gl->gl_lvb = NULL;
|
||||
atomic_set(&gl->gl_lvb_count, 0);
|
||||
INIT_LIST_HEAD(&gl->gl_reclaim);
|
||||
INIT_LIST_HEAD(&gl->gl_lru);
|
||||
INIT_LIST_HEAD(&gl->gl_ail_list);
|
||||
atomic_set(&gl->gl_ail_count, 0);
|
||||
}
|
||||
|
@ -93,6 +94,12 @@ static int __init init_gfs2_fs(void)
|
|||
if (!gfs2_rgrpd_cachep)
|
||||
goto fail;
|
||||
|
||||
gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
|
||||
sizeof(struct gfs2_quota_data),
|
||||
0, 0, NULL);
|
||||
if (!gfs2_quotad_cachep)
|
||||
goto fail;
|
||||
|
||||
error = register_filesystem(&gfs2_fs_type);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
@ -112,6 +119,9 @@ static int __init init_gfs2_fs(void)
|
|||
fail:
|
||||
gfs2_glock_exit();
|
||||
|
||||
if (gfs2_quotad_cachep)
|
||||
kmem_cache_destroy(gfs2_quotad_cachep);
|
||||
|
||||
if (gfs2_rgrpd_cachep)
|
||||
kmem_cache_destroy(gfs2_rgrpd_cachep);
|
||||
|
||||
|
@ -140,6 +150,7 @@ static void __exit exit_gfs2_fs(void)
|
|||
unregister_filesystem(&gfs2_fs_type);
|
||||
unregister_filesystem(&gfs2meta_fs_type);
|
||||
|
||||
kmem_cache_destroy(gfs2_quotad_cachep);
|
||||
kmem_cache_destroy(gfs2_rgrpd_cachep);
|
||||
kmem_cache_destroy(gfs2_bufdata_cachep);
|
||||
kmem_cache_destroy(gfs2_inode_cachep);
|
||||
|
|
|
@ -32,7 +32,6 @@ enum {
|
|||
Opt_debug,
|
||||
Opt_nodebug,
|
||||
Opt_upgrade,
|
||||
Opt_num_glockd,
|
||||
Opt_acl,
|
||||
Opt_noacl,
|
||||
Opt_quota_off,
|
||||
|
@ -57,7 +56,6 @@ static const match_table_t tokens = {
|
|||
{Opt_debug, "debug"},
|
||||
{Opt_nodebug, "nodebug"},
|
||||
{Opt_upgrade, "upgrade"},
|
||||
{Opt_num_glockd, "num_glockd=%d"},
|
||||
{Opt_acl, "acl"},
|
||||
{Opt_noacl, "noacl"},
|
||||
{Opt_quota_off, "quota=off"},
|
||||
|
@ -87,16 +85,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
|
|||
int error = 0;
|
||||
|
||||
if (!remount) {
|
||||
/* If someone preloaded options, use those instead */
|
||||
spin_lock(&gfs2_sys_margs_lock);
|
||||
if (gfs2_sys_margs) {
|
||||
data = gfs2_sys_margs;
|
||||
gfs2_sys_margs = NULL;
|
||||
}
|
||||
spin_unlock(&gfs2_sys_margs_lock);
|
||||
|
||||
/* Set some defaults */
|
||||
args->ar_num_glockd = GFS2_GLOCKD_DEFAULT;
|
||||
args->ar_quota = GFS2_QUOTA_DEFAULT;
|
||||
args->ar_data = GFS2_DATA_DEFAULT;
|
||||
}
|
||||
|
@ -105,7 +94,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
|
|||
process them */
|
||||
|
||||
for (options = data; (o = strsep(&options, ",")); ) {
|
||||
int token, option;
|
||||
int token;
|
||||
substring_t tmp[MAX_OPT_ARGS];
|
||||
|
||||
if (!*o)
|
||||
|
@ -196,22 +185,6 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
|
|||
goto cant_remount;
|
||||
args->ar_upgrade = 1;
|
||||
break;
|
||||
case Opt_num_glockd:
|
||||
if ((error = match_int(&tmp[0], &option))) {
|
||||
fs_info(sdp, "problem getting num_glockd\n");
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
if (remount && option != args->ar_num_glockd)
|
||||
goto cant_remount;
|
||||
if (!option || option > GFS2_GLOCKD_MAX) {
|
||||
fs_info(sdp, "0 < num_glockd <= %u (not %u)\n",
|
||||
GFS2_GLOCKD_MAX, option);
|
||||
error = -EINVAL;
|
||||
goto out_error;
|
||||
}
|
||||
args->ar_num_glockd = option;
|
||||
break;
|
||||
case Opt_acl:
|
||||
args->ar_posix_acl = 1;
|
||||
sdp->sd_vfs->s_flags |= MS_POSIXACL;
|
||||
|
|
|
@ -210,25 +210,23 @@ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc
|
|||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
int error;
|
||||
int ret;
|
||||
int done_trans = 0;
|
||||
|
||||
error = gfs2_writepage_common(page, wbc);
|
||||
if (error <= 0)
|
||||
return error;
|
||||
|
||||
if (PageChecked(page)) {
|
||||
if (wbc->sync_mode != WB_SYNC_ALL)
|
||||
goto out_ignore;
|
||||
error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
|
||||
if (error)
|
||||
ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
|
||||
if (ret)
|
||||
goto out_ignore;
|
||||
done_trans = 1;
|
||||
}
|
||||
error = __gfs2_jdata_writepage(page, wbc);
|
||||
ret = gfs2_writepage_common(page, wbc);
|
||||
if (ret > 0)
|
||||
ret = __gfs2_jdata_writepage(page, wbc);
|
||||
if (done_trans)
|
||||
gfs2_trans_end(sdp);
|
||||
return error;
|
||||
return ret;
|
||||
|
||||
out_ignore:
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
|
@ -453,8 +451,8 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
|
|||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
|
||||
ip->i_di.di_size);
|
||||
memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
|
||||
ip->i_disksize);
|
||||
memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
flush_dcache_page(page);
|
||||
brelse(dibh);
|
||||
|
@ -627,7 +625,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
|
|||
{
|
||||
struct gfs2_inode *ip = GFS2_I(mapping->host);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
|
||||
unsigned int data_blocks, ind_blocks, rblocks;
|
||||
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
|
||||
int alloc_required;
|
||||
int error = 0;
|
||||
struct gfs2_alloc *al;
|
||||
|
@ -641,11 +639,13 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
|
|||
if (unlikely(error))
|
||||
goto out_uninit;
|
||||
|
||||
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
|
||||
error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
if (alloc_required || gfs2_is_jdata(ip))
|
||||
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
|
||||
|
||||
if (alloc_required) {
|
||||
al = gfs2_alloc_get(ip);
|
||||
if (!al) {
|
||||
|
@ -782,7 +782,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
|
|||
|
||||
if (inode->i_size < to) {
|
||||
i_size_write(inode, to);
|
||||
ip->i_di.di_size = inode->i_size;
|
||||
ip->i_disksize = inode->i_size;
|
||||
di->di_size = cpu_to_be64(inode->i_size);
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
|
@ -847,9 +847,9 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
|
|||
|
||||
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||||
|
||||
if (likely(ret >= 0) && (inode->i_size > ip->i_di.di_size)) {
|
||||
if (likely(ret >= 0) && (inode->i_size > ip->i_disksize)) {
|
||||
di = (struct gfs2_dinode *)dibh->b_data;
|
||||
ip->i_di.di_size = inode->i_size;
|
||||
ip->i_disksize = inode->i_size;
|
||||
di->di_size = cpu_to_be64(inode->i_size);
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "incore.h"
|
||||
#include "dir.h"
|
||||
#include "glock.h"
|
||||
#include "ops_dentry.h"
|
||||
#include "super.h"
|
||||
#include "util.h"
|
||||
#include "inode.h"
|
||||
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_DENTRY_DOT_H__
|
||||
#define __OPS_DENTRY_DOT_H__
|
||||
|
||||
#include <linux/dcache.h>
|
||||
|
||||
extern struct dentry_operations gfs2_dops;
|
||||
|
||||
#endif /* __OPS_DENTRY_DOT_H__ */
|
|
@ -22,8 +22,7 @@
|
|||
#include "glock.h"
|
||||
#include "glops.h"
|
||||
#include "inode.h"
|
||||
#include "ops_dentry.h"
|
||||
#include "ops_fstype.h"
|
||||
#include "super.h"
|
||||
#include "rgrp.h"
|
||||
#include "util.h"
|
||||
|
||||
|
@ -214,7 +213,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
|
|||
}
|
||||
|
||||
error = -EIO;
|
||||
if (GFS2_I(inode)->i_di.di_flags & GFS2_DIF_SYSTEM) {
|
||||
if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM) {
|
||||
iput(inode);
|
||||
goto fail;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include "util.h"
|
||||
#include "eaops.h"
|
||||
#include "ops_address.h"
|
||||
#include "ops_inode.h"
|
||||
|
||||
/**
|
||||
* gfs2_llseek - seek to a location in a file
|
||||
|
@ -158,8 +157,8 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags);
|
||||
if (!S_ISDIR(inode->i_mode) && ip->i_di.di_flags & GFS2_DIF_JDATA)
|
||||
fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
|
||||
if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
|
||||
fsflags |= FS_JOURNAL_DATA_FL;
|
||||
if (put_user(fsflags, ptr))
|
||||
error = -EFAULT;
|
||||
|
@ -172,17 +171,16 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
|
|||
void gfs2_set_inode_flags(struct inode *inode)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_dinode_host *di = &ip->i_di;
|
||||
unsigned int flags = inode->i_flags;
|
||||
|
||||
flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
|
||||
if (di->di_flags & GFS2_DIF_IMMUTABLE)
|
||||
if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
|
||||
flags |= S_IMMUTABLE;
|
||||
if (di->di_flags & GFS2_DIF_APPENDONLY)
|
||||
if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
|
||||
flags |= S_APPEND;
|
||||
if (di->di_flags & GFS2_DIF_NOATIME)
|
||||
if (ip->i_diskflags & GFS2_DIF_NOATIME)
|
||||
flags |= S_NOATIME;
|
||||
if (di->di_flags & GFS2_DIF_SYNC)
|
||||
if (ip->i_diskflags & GFS2_DIF_SYNC)
|
||||
flags |= S_SYNC;
|
||||
inode->i_flags = flags;
|
||||
}
|
||||
|
@ -221,7 +219,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
|
|||
if (error)
|
||||
goto out_drop_write;
|
||||
|
||||
flags = ip->i_di.di_flags;
|
||||
flags = ip->i_diskflags;
|
||||
new_flags = (flags & ~mask) | (reqflags & mask);
|
||||
if ((new_flags ^ flags) == 0)
|
||||
goto out;
|
||||
|
@ -260,7 +258,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
|
|||
if (error)
|
||||
goto out_trans_end;
|
||||
gfs2_trans_add_bh(ip->i_gl, bh, 1);
|
||||
ip->i_di.di_flags = new_flags;
|
||||
ip->i_diskflags = new_flags;
|
||||
gfs2_dinode_out(ip, bh->b_data);
|
||||
brelse(bh);
|
||||
gfs2_set_inode_flags(inode);
|
||||
|
@ -357,7 +355,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
|
|||
goto out;
|
||||
|
||||
set_bit(GIF_SW_PAGED, &ip->i_flags);
|
||||
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
|
||||
ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required);
|
||||
if (ret || !alloc_required)
|
||||
goto out_unlock;
|
||||
|
@ -369,6 +366,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
|
|||
ret = gfs2_quota_lock_check(ip);
|
||||
if (ret)
|
||||
goto out_alloc_put;
|
||||
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
|
||||
al->al_requested = data_blocks + ind_blocks;
|
||||
ret = gfs2_inplace_reserve(ip);
|
||||
if (ret)
|
||||
|
@ -479,7 +477,7 @@ static int gfs2_open(struct inode *inode, struct file *file)
|
|||
goto fail;
|
||||
|
||||
if (!(file->f_flags & O_LARGEFILE) &&
|
||||
ip->i_di.di_size > MAX_NON_LFS) {
|
||||
ip->i_disksize > MAX_NON_LFS) {
|
||||
error = -EOVERFLOW;
|
||||
goto fail_gunlock;
|
||||
}
|
||||
|
|
|
@ -22,20 +22,18 @@
|
|||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
#include "bmap.h"
|
||||
#include "daemon.h"
|
||||
#include "glock.h"
|
||||
#include "glops.h"
|
||||
#include "inode.h"
|
||||
#include "mount.h"
|
||||
#include "ops_fstype.h"
|
||||
#include "ops_dentry.h"
|
||||
#include "ops_super.h"
|
||||
#include "recovery.h"
|
||||
#include "rgrp.h"
|
||||
#include "super.h"
|
||||
#include "sys.h"
|
||||
#include "util.h"
|
||||
#include "log.h"
|
||||
#include "quota.h"
|
||||
#include "dir.h"
|
||||
|
||||
#define DO 0
|
||||
#define UNDO 1
|
||||
|
@ -58,12 +56,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
|
|||
{
|
||||
spin_lock_init(>->gt_spin);
|
||||
|
||||
gt->gt_demote_secs = 300;
|
||||
gt->gt_incore_log_blocks = 1024;
|
||||
gt->gt_log_flush_secs = 60;
|
||||
gt->gt_recoverd_secs = 60;
|
||||
gt->gt_logd_secs = 1;
|
||||
gt->gt_quotad_secs = 5;
|
||||
gt->gt_quota_simul_sync = 64;
|
||||
gt->gt_quota_warn_period = 10;
|
||||
gt->gt_quota_scale_num = 1;
|
||||
|
@ -91,10 +87,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
|
||||
gfs2_tune_init(&sdp->sd_tune);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_reclaim_list);
|
||||
spin_lock_init(&sdp->sd_reclaim_lock);
|
||||
init_waitqueue_head(&sdp->sd_reclaim_wq);
|
||||
|
||||
mutex_init(&sdp->sd_inum_mutex);
|
||||
spin_lock_init(&sdp->sd_statfs_spin);
|
||||
|
||||
|
@ -110,6 +102,9 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
INIT_LIST_HEAD(&sdp->sd_quota_list);
|
||||
spin_lock_init(&sdp->sd_quota_spin);
|
||||
mutex_init(&sdp->sd_quota_mutex);
|
||||
init_waitqueue_head(&sdp->sd_quota_wait);
|
||||
INIT_LIST_HEAD(&sdp->sd_trunc_list);
|
||||
spin_lock_init(&sdp->sd_trunc_lock);
|
||||
|
||||
spin_lock_init(&sdp->sd_log_lock);
|
||||
|
||||
|
@ -443,24 +438,11 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
|
|||
static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
|
||||
int undo)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int error = 0;
|
||||
|
||||
if (undo)
|
||||
goto fail_trans;
|
||||
|
||||
for (sdp->sd_glockd_num = 0;
|
||||
sdp->sd_glockd_num < sdp->sd_args.ar_num_glockd;
|
||||
sdp->sd_glockd_num++) {
|
||||
p = kthread_run(gfs2_glockd, sdp, "gfs2_glockd");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't start glockd thread: %d\n", error);
|
||||
goto fail;
|
||||
}
|
||||
sdp->sd_glockd_process[sdp->sd_glockd_num] = p;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_num(sdp,
|
||||
GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
|
||||
LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
|
||||
|
@ -493,7 +475,6 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
|
|||
fs_err(sdp, "can't create transaction glock: %d\n", error);
|
||||
goto fail_rename;
|
||||
}
|
||||
set_bit(GLF_STICKY, &sdp->sd_trans_gl->gl_flags);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -506,9 +487,6 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
|
|||
fail_mount:
|
||||
gfs2_glock_dq_uninit(mount_gh);
|
||||
fail:
|
||||
while (sdp->sd_glockd_num--)
|
||||
kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -620,7 +598,7 @@ static int map_journal_extents(struct gfs2_sbd *sdp)
|
|||
|
||||
prev_db = 0;
|
||||
|
||||
for (lb = 0; lb < ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift; lb++) {
|
||||
for (lb = 0; lb < ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; lb++) {
|
||||
bh.b_state = 0;
|
||||
bh.b_blocknr = 0;
|
||||
bh.b_size = 1 << ip->i_inode.i_blkbits;
|
||||
|
@ -661,6 +639,72 @@ static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
|
|||
sdp->sd_lockstruct.ls_lockspace);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_jindex_hold - Grab a lock on the jindex
|
||||
* @sdp: The GFS2 superblock
|
||||
* @ji_gh: the holder for the jindex glock
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
|
||||
{
|
||||
struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
|
||||
struct qstr name;
|
||||
char buf[20];
|
||||
struct gfs2_jdesc *jd;
|
||||
int error;
|
||||
|
||||
name.name = buf;
|
||||
|
||||
mutex_lock(&sdp->sd_jindex_mutex);
|
||||
|
||||
for (;;) {
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
name.len = sprintf(buf, "journal%u", sdp->sd_journals);
|
||||
name.hash = gfs2_disk_hash(name.name, name.len);
|
||||
|
||||
error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
|
||||
if (error == -ENOENT) {
|
||||
error = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
gfs2_glock_dq_uninit(ji_gh);
|
||||
|
||||
if (error)
|
||||
break;
|
||||
|
||||
error = -ENOMEM;
|
||||
jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
|
||||
if (!jd)
|
||||
break;
|
||||
|
||||
INIT_LIST_HEAD(&jd->extent_list);
|
||||
jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
|
||||
if (!jd->jd_inode || IS_ERR(jd->jd_inode)) {
|
||||
if (!jd->jd_inode)
|
||||
error = -ENOENT;
|
||||
else
|
||||
error = PTR_ERR(jd->jd_inode);
|
||||
kfree(jd);
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
jd->jd_jid = sdp->sd_journals++;
|
||||
list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
}
|
||||
|
||||
mutex_unlock(&sdp->sd_jindex_mutex);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int init_journal(struct gfs2_sbd *sdp, int undo)
|
||||
{
|
||||
struct inode *master = sdp->sd_master_dir->d_inode;
|
||||
|
@ -681,7 +725,6 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
|
|||
return PTR_ERR(sdp->sd_jindex);
|
||||
}
|
||||
ip = GFS2_I(sdp->sd_jindex);
|
||||
set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
|
||||
|
||||
/* Load in the journal index special file */
|
||||
|
||||
|
@ -832,7 +875,6 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
|
|||
goto fail_statfs;
|
||||
}
|
||||
ip = GFS2_I(sdp->sd_rindex);
|
||||
set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
|
||||
sdp->sd_rindex_uptodate = 0;
|
||||
|
||||
/* Read in the quota inode */
|
||||
|
@ -973,9 +1015,6 @@ static int init_threads(struct gfs2_sbd *sdp, int undo)
|
|||
}
|
||||
sdp->sd_logd_process = p;
|
||||
|
||||
sdp->sd_statfs_sync_time = jiffies;
|
||||
sdp->sd_quota_sync_time = jiffies;
|
||||
|
||||
p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
|
@ -1224,17 +1263,21 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
|
|||
static void gfs2_kill_sb(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
if (sdp) {
|
||||
gfs2_meta_syncfs(sdp);
|
||||
dput(sdp->sd_root_dir);
|
||||
dput(sdp->sd_master_dir);
|
||||
sdp->sd_root_dir = NULL;
|
||||
sdp->sd_master_dir = NULL;
|
||||
|
||||
if (sdp == NULL) {
|
||||
kill_block_super(sb);
|
||||
return;
|
||||
}
|
||||
|
||||
gfs2_meta_syncfs(sdp);
|
||||
dput(sdp->sd_root_dir);
|
||||
dput(sdp->sd_master_dir);
|
||||
sdp->sd_root_dir = NULL;
|
||||
sdp->sd_master_dir = NULL;
|
||||
shrink_dcache_sb(sb);
|
||||
kill_block_super(sb);
|
||||
if (sdp)
|
||||
gfs2_delete_debugfs_file(sdp);
|
||||
gfs2_delete_debugfs_file(sdp);
|
||||
kfree(sdp);
|
||||
}
|
||||
|
||||
struct file_system_type gfs2_fs_type = {
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_FSTYPE_DOT_H__
|
||||
#define __OPS_FSTYPE_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
extern struct file_system_type gfs2_fs_type;
|
||||
extern struct file_system_type gfs2meta_fs_type;
|
||||
extern const struct export_operations gfs2_export_ops;
|
||||
|
||||
#endif /* __OPS_FSTYPE_DOT_H__ */
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/lm_interface.h>
|
||||
#include <linux/fiemap.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
|
@ -31,12 +32,11 @@
|
|||
#include "glock.h"
|
||||
#include "inode.h"
|
||||
#include "meta_io.h"
|
||||
#include "ops_dentry.h"
|
||||
#include "ops_inode.h"
|
||||
#include "quota.h"
|
||||
#include "rgrp.h"
|
||||
#include "trans.h"
|
||||
#include "util.h"
|
||||
#include "super.h"
|
||||
|
||||
/**
|
||||
* gfs2_create - Create a file
|
||||
|
@ -185,7 +185,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
|
|||
if (!dip->i_inode.i_nlink)
|
||||
goto out_gunlock;
|
||||
error = -EFBIG;
|
||||
if (dip->i_di.di_entries == (u32)-1)
|
||||
if (dip->i_entries == (u32)-1)
|
||||
goto out_gunlock;
|
||||
error = -EPERM;
|
||||
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
|
||||
|
@ -371,7 +371,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
|
|||
|
||||
ip = ghs[1].gh_gl->gl_object;
|
||||
|
||||
ip->i_di.di_size = size;
|
||||
ip->i_disksize = size;
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
|
||||
|
@ -425,9 +425,9 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
|||
ip = ghs[1].gh_gl->gl_object;
|
||||
|
||||
ip->i_inode.i_nlink = 2;
|
||||
ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
|
||||
ip->i_di.di_flags |= GFS2_DIF_JDATA;
|
||||
ip->i_di.di_entries = 2;
|
||||
ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
|
||||
ip->i_diskflags |= GFS2_DIF_JDATA;
|
||||
ip->i_entries = 2;
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
|
||||
|
@ -517,13 +517,13 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
if (error)
|
||||
goto out_gunlock;
|
||||
|
||||
if (ip->i_di.di_entries < 2) {
|
||||
if (ip->i_entries < 2) {
|
||||
if (gfs2_consist_inode(ip))
|
||||
gfs2_dinode_print(ip);
|
||||
error = -EIO;
|
||||
goto out_gunlock;
|
||||
}
|
||||
if (ip->i_di.di_entries > 2) {
|
||||
if (ip->i_entries > 2) {
|
||||
error = -ENOTEMPTY;
|
||||
goto out_gunlock;
|
||||
}
|
||||
|
@ -726,13 +726,13 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
|
|||
goto out_gunlock;
|
||||
|
||||
if (S_ISDIR(nip->i_inode.i_mode)) {
|
||||
if (nip->i_di.di_entries < 2) {
|
||||
if (nip->i_entries < 2) {
|
||||
if (gfs2_consist_inode(nip))
|
||||
gfs2_dinode_print(nip);
|
||||
error = -EIO;
|
||||
goto out_gunlock;
|
||||
}
|
||||
if (nip->i_di.di_entries > 2) {
|
||||
if (nip->i_entries > 2) {
|
||||
error = -ENOTEMPTY;
|
||||
goto out_gunlock;
|
||||
}
|
||||
|
@ -758,7 +758,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
|
|||
error = -EINVAL;
|
||||
goto out_gunlock;
|
||||
}
|
||||
if (ndip->i_di.di_entries == (u32)-1) {
|
||||
if (ndip->i_entries == (u32)-1) {
|
||||
error = -EFBIG;
|
||||
goto out_gunlock;
|
||||
}
|
||||
|
@ -990,7 +990,7 @@ static int setattr_size(struct inode *inode, struct iattr *attr)
|
|||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
int error;
|
||||
|
||||
if (attr->ia_size != ip->i_di.di_size) {
|
||||
if (attr->ia_size != ip->i_disksize) {
|
||||
error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -1001,8 +1001,8 @@ static int setattr_size(struct inode *inode, struct iattr *attr)
|
|||
}
|
||||
|
||||
error = gfs2_truncatei(ip, attr->ia_size);
|
||||
if (error && (inode->i_size != ip->i_di.di_size))
|
||||
i_size_write(inode, ip->i_di.di_size);
|
||||
if (error && (inode->i_size != ip->i_disksize))
|
||||
i_size_write(inode, ip->i_disksize);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -1212,6 +1212,48 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
|
|||
return gfs2_ea_remove(GFS2_I(dentry->d_inode), &er);
|
||||
}
|
||||
|
||||
static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_holder gh;
|
||||
int ret;
|
||||
|
||||
ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (gfs2_is_stuffed(ip)) {
|
||||
u64 phys = ip->i_no_addr << inode->i_blkbits;
|
||||
u64 size = i_size_read(inode);
|
||||
u32 flags = FIEMAP_EXTENT_LAST|FIEMAP_EXTENT_NOT_ALIGNED|
|
||||
FIEMAP_EXTENT_DATA_INLINE;
|
||||
phys += sizeof(struct gfs2_dinode);
|
||||
phys += start;
|
||||
if (start + len > size)
|
||||
len = size - start;
|
||||
if (start < size)
|
||||
ret = fiemap_fill_next_extent(fieinfo, start, phys,
|
||||
len, flags);
|
||||
if (ret == 1)
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = __generic_block_fiemap(inode, fieinfo, start, len,
|
||||
gfs2_block_map);
|
||||
}
|
||||
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct inode_operations gfs2_file_iops = {
|
||||
.permission = gfs2_permission,
|
||||
.setattr = gfs2_setattr,
|
||||
|
@ -1220,6 +1262,7 @@ const struct inode_operations gfs2_file_iops = {
|
|||
.getxattr = gfs2_getxattr,
|
||||
.listxattr = gfs2_listxattr,
|
||||
.removexattr = gfs2_removexattr,
|
||||
.fiemap = gfs2_fiemap,
|
||||
};
|
||||
|
||||
const struct inode_operations gfs2_dir_iops = {
|
||||
|
@ -1239,6 +1282,7 @@ const struct inode_operations gfs2_dir_iops = {
|
|||
.getxattr = gfs2_getxattr,
|
||||
.listxattr = gfs2_listxattr,
|
||||
.removexattr = gfs2_removexattr,
|
||||
.fiemap = gfs2_fiemap,
|
||||
};
|
||||
|
||||
const struct inode_operations gfs2_symlink_iops = {
|
||||
|
@ -1251,5 +1295,6 @@ const struct inode_operations gfs2_symlink_iops = {
|
|||
.getxattr = gfs2_getxattr,
|
||||
.listxattr = gfs2_listxattr,
|
||||
.removexattr = gfs2_removexattr,
|
||||
.fiemap = gfs2_fiemap,
|
||||
};
|
||||
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_INODE_DOT_H__
|
||||
#define __OPS_INODE_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
extern const struct inode_operations gfs2_file_iops;
|
||||
extern const struct inode_operations gfs2_dir_iops;
|
||||
extern const struct inode_operations gfs2_symlink_iops;
|
||||
extern const struct file_operations gfs2_file_fops;
|
||||
extern const struct file_operations gfs2_dir_fops;
|
||||
extern const struct file_operations gfs2_file_fops_nolock;
|
||||
extern const struct file_operations gfs2_dir_fops_nolock;
|
||||
|
||||
extern void gfs2_set_inode_flags(struct inode *inode);
|
||||
|
||||
#endif /* __OPS_INODE_DOT_H__ */
|
|
@ -28,7 +28,6 @@
|
|||
#include "inode.h"
|
||||
#include "log.h"
|
||||
#include "mount.h"
|
||||
#include "ops_super.h"
|
||||
#include "quota.h"
|
||||
#include "recovery.h"
|
||||
#include "rgrp.h"
|
||||
|
@ -143,8 +142,6 @@ static void gfs2_put_super(struct super_block *sb)
|
|||
kthread_stop(sdp->sd_quotad_process);
|
||||
kthread_stop(sdp->sd_logd_process);
|
||||
kthread_stop(sdp->sd_recoverd_process);
|
||||
while (sdp->sd_glockd_num--)
|
||||
kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
|
||||
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
error = gfs2_make_fs_ro(sdp);
|
||||
|
@ -185,7 +182,6 @@ static void gfs2_put_super(struct super_block *sb)
|
|||
|
||||
/* At this point, we're through participating in the lockspace */
|
||||
gfs2_sys_fs_del(sdp);
|
||||
kfree(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -259,6 +255,137 @@ static void gfs2_unlockfs(struct super_block *sb)
|
|||
gfs2_unfreeze_fs(sb->s_fs_info);
|
||||
}
|
||||
|
||||
/**
|
||||
* statfs_fill - fill in the sg for a given RG
|
||||
* @rgd: the RG
|
||||
* @sc: the sc structure
|
||||
*
|
||||
* Returns: 0 on success, -ESTALE if the LVB is invalid
|
||||
*/
|
||||
|
||||
static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
|
||||
struct gfs2_statfs_change_host *sc)
|
||||
{
|
||||
gfs2_rgrp_verify(rgd);
|
||||
sc->sc_total += rgd->rd_data;
|
||||
sc->sc_free += rgd->rd_free;
|
||||
sc->sc_dinodes += rgd->rd_dinodes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_statfs_slow - Stat a filesystem using asynchronous locking
|
||||
* @sdp: the filesystem
|
||||
* @sc: the sc info that will be returned
|
||||
*
|
||||
* Any error (other than a signal) will cause this routine to fall back
|
||||
* to the synchronous version.
|
||||
*
|
||||
* FIXME: This really shouldn't busy wait like this.
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
|
||||
{
|
||||
struct gfs2_holder ri_gh;
|
||||
struct gfs2_rgrpd *rgd_next;
|
||||
struct gfs2_holder *gha, *gh;
|
||||
unsigned int slots = 64;
|
||||
unsigned int x;
|
||||
int done;
|
||||
int error = 0, err;
|
||||
|
||||
memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
|
||||
gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
|
||||
if (!gha)
|
||||
return -ENOMEM;
|
||||
|
||||
error = gfs2_rindex_hold(sdp, &ri_gh);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
rgd_next = gfs2_rgrpd_get_first(sdp);
|
||||
|
||||
for (;;) {
|
||||
done = 1;
|
||||
|
||||
for (x = 0; x < slots; x++) {
|
||||
gh = gha + x;
|
||||
|
||||
if (gh->gh_gl && gfs2_glock_poll(gh)) {
|
||||
err = gfs2_glock_wait(gh);
|
||||
if (err) {
|
||||
gfs2_holder_uninit(gh);
|
||||
error = err;
|
||||
} else {
|
||||
if (!error)
|
||||
error = statfs_slow_fill(
|
||||
gh->gh_gl->gl_object, sc);
|
||||
gfs2_glock_dq_uninit(gh);
|
||||
}
|
||||
}
|
||||
|
||||
if (gh->gh_gl)
|
||||
done = 0;
|
||||
else if (rgd_next && !error) {
|
||||
error = gfs2_glock_nq_init(rgd_next->rd_gl,
|
||||
LM_ST_SHARED,
|
||||
GL_ASYNC,
|
||||
gh);
|
||||
rgd_next = gfs2_rgrpd_get_next(rgd_next);
|
||||
done = 0;
|
||||
}
|
||||
|
||||
if (signal_pending(current))
|
||||
error = -ERESTARTSYS;
|
||||
}
|
||||
|
||||
if (done)
|
||||
break;
|
||||
|
||||
yield();
|
||||
}
|
||||
|
||||
gfs2_glock_dq_uninit(&ri_gh);
|
||||
|
||||
out:
|
||||
kfree(gha);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_statfs_i - Do a statfs
|
||||
* @sdp: the filesystem
|
||||
* @sg: the sg structure
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
|
||||
{
|
||||
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
|
||||
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
|
||||
|
||||
spin_lock(&sdp->sd_statfs_spin);
|
||||
|
||||
*sc = *m_sc;
|
||||
sc->sc_total += l_sc->sc_total;
|
||||
sc->sc_free += l_sc->sc_free;
|
||||
sc->sc_dinodes += l_sc->sc_dinodes;
|
||||
|
||||
spin_unlock(&sdp->sd_statfs_spin);
|
||||
|
||||
if (sc->sc_free < 0)
|
||||
sc->sc_free = 0;
|
||||
if (sc->sc_free > sc->sc_total)
|
||||
sc->sc_free = sc->sc_total;
|
||||
if (sc->sc_dinodes < 0)
|
||||
sc->sc_dinodes = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_statfs - Gather and return stats about the filesystem
|
||||
* @sb: The superblock
|
||||
|
@ -370,7 +497,6 @@ static void gfs2_clear_inode(struct inode *inode)
|
|||
*/
|
||||
if (test_bit(GIF_USER, &ip->i_flags)) {
|
||||
ip->i_gl->gl_object = NULL;
|
||||
gfs2_glock_schedule_for_reclaim(ip->i_gl);
|
||||
gfs2_glock_put(ip->i_gl);
|
||||
ip->i_gl = NULL;
|
||||
if (ip->i_iopen_gh.gh_gl) {
|
||||
|
@ -423,8 +549,6 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
|
|||
seq_printf(s, ",debug");
|
||||
if (args->ar_upgrade)
|
||||
seq_printf(s, ",upgrade");
|
||||
if (args->ar_num_glockd != GFS2_GLOCKD_DEFAULT)
|
||||
seq_printf(s, ",num_glockd=%u", args->ar_num_glockd);
|
||||
if (args->ar_posix_acl)
|
||||
seq_printf(s, ",acl");
|
||||
if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
|
||||
|
@ -494,16 +618,16 @@ static void gfs2_delete_inode(struct inode *inode)
|
|||
gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
|
||||
error = gfs2_glock_nq(&ip->i_iopen_gh);
|
||||
if (error)
|
||||
goto out_uninit;
|
||||
goto out_truncate;
|
||||
|
||||
if (S_ISDIR(inode->i_mode) &&
|
||||
(ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
|
||||
(ip->i_diskflags & GFS2_DIF_EXHASH)) {
|
||||
error = gfs2_dir_exhash_dealloc(ip);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (ip->i_di.di_eattr) {
|
||||
if (ip->i_eattr) {
|
||||
error = gfs2_ea_dealloc(ip);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
@ -519,6 +643,7 @@ static void gfs2_delete_inode(struct inode *inode)
|
|||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
out_truncate:
|
||||
error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
@ -527,8 +652,8 @@ static void gfs2_delete_inode(struct inode *inode)
|
|||
gfs2_trans_end(sdp);
|
||||
|
||||
out_unlock:
|
||||
gfs2_glock_dq(&ip->i_iopen_gh);
|
||||
out_uninit:
|
||||
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
|
||||
gfs2_glock_dq(&ip->i_iopen_gh);
|
||||
gfs2_holder_uninit(&ip->i_iopen_gh);
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
if (error && error != GLR_TRYFAILED)
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#ifndef __OPS_SUPER_DOT_H__
|
||||
#define __OPS_SUPER_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
extern const struct super_operations gfs2_super_ops;
|
||||
|
||||
#endif /* __OPS_SUPER_DOT_H__ */
|
113
fs/gfs2/quota.c
113
fs/gfs2/quota.c
|
@ -46,6 +46,8 @@
|
|||
#include <linux/bio.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
|
@ -94,7 +96,7 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
|
|||
struct gfs2_quota_data *qd;
|
||||
int error;
|
||||
|
||||
qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_NOFS);
|
||||
qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
|
||||
if (!qd)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -119,7 +121,7 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
|
|||
return 0;
|
||||
|
||||
fail:
|
||||
kfree(qd);
|
||||
kmem_cache_free(gfs2_quotad_cachep, qd);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -158,7 +160,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
|
|||
if (qd || !create) {
|
||||
if (new_qd) {
|
||||
gfs2_lvb_unhold(new_qd->qd_gl);
|
||||
kfree(new_qd);
|
||||
kmem_cache_free(gfs2_quotad_cachep, new_qd);
|
||||
}
|
||||
*qdp = qd;
|
||||
return 0;
|
||||
|
@ -1013,7 +1015,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
|
|||
|
||||
if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
|
||||
return;
|
||||
if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
|
||||
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
|
||||
return;
|
||||
|
||||
for (x = 0; x < al->al_qd_num; x++) {
|
||||
|
@ -1100,15 +1102,15 @@ static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *
|
|||
int gfs2_quota_init(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
|
||||
unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
|
||||
unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
|
||||
unsigned int x, slot = 0;
|
||||
unsigned int found = 0;
|
||||
u64 dblock;
|
||||
u32 extlen = 0;
|
||||
int error;
|
||||
|
||||
if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
|
||||
ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
|
||||
if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
|
||||
ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
|
||||
gfs2_consist_inode(ip);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -1195,7 +1197,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
|
|||
return error;
|
||||
}
|
||||
|
||||
void gfs2_quota_scan(struct gfs2_sbd *sdp)
|
||||
static void gfs2_quota_scan(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_quota_data *qd, *safe;
|
||||
LIST_HEAD(dead);
|
||||
|
@ -1222,7 +1224,7 @@ void gfs2_quota_scan(struct gfs2_sbd *sdp)
|
|||
gfs2_assert_warn(sdp, !qd->qd_bh_count);
|
||||
|
||||
gfs2_lvb_unhold(qd->qd_gl);
|
||||
kfree(qd);
|
||||
kmem_cache_free(gfs2_quotad_cachep, qd);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1257,7 +1259,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
|
|||
gfs2_assert_warn(sdp, !qd->qd_bh_count);
|
||||
|
||||
gfs2_lvb_unhold(qd->qd_gl);
|
||||
kfree(qd);
|
||||
kmem_cache_free(gfs2_quotad_cachep, qd);
|
||||
|
||||
spin_lock(&sdp->sd_quota_spin);
|
||||
}
|
||||
|
@ -1272,3 +1274,94 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
|
|||
}
|
||||
}
|
||||
|
||||
static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
|
||||
{
|
||||
if (error == 0 || error == -EROFS)
|
||||
return;
|
||||
if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
|
||||
fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
|
||||
}
|
||||
|
||||
static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
|
||||
int (*fxn)(struct gfs2_sbd *sdp),
|
||||
unsigned long t, unsigned long *timeo,
|
||||
unsigned int *new_timeo)
|
||||
{
|
||||
if (t >= *timeo) {
|
||||
int error = fxn(sdp);
|
||||
quotad_error(sdp, msg, error);
|
||||
*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
|
||||
} else {
|
||||
*timeo -= t;
|
||||
}
|
||||
}
|
||||
|
||||
static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_inode *ip;
|
||||
|
||||
while(1) {
|
||||
ip = NULL;
|
||||
spin_lock(&sdp->sd_trunc_lock);
|
||||
if (!list_empty(&sdp->sd_trunc_list)) {
|
||||
ip = list_entry(sdp->sd_trunc_list.next,
|
||||
struct gfs2_inode, i_trunc_list);
|
||||
list_del_init(&ip->i_trunc_list);
|
||||
}
|
||||
spin_unlock(&sdp->sd_trunc_lock);
|
||||
if (ip == NULL)
|
||||
return;
|
||||
gfs2_glock_finish_truncate(ip);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_quotad - Write cached quota changes into the quota file
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
*/
|
||||
|
||||
int gfs2_quotad(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
struct gfs2_tune *tune = &sdp->sd_tune;
|
||||
unsigned long statfs_timeo = 0;
|
||||
unsigned long quotad_timeo = 0;
|
||||
unsigned long t = 0;
|
||||
DEFINE_WAIT(wait);
|
||||
int empty;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
|
||||
/* Update the master statfs file */
|
||||
quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
|
||||
&statfs_timeo, &tune->gt_statfs_quantum);
|
||||
|
||||
/* Update quota file */
|
||||
quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
|
||||
"ad_timeo, &tune->gt_quota_quantum);
|
||||
|
||||
/* FIXME: This should be turned into a shrinker */
|
||||
gfs2_quota_scan(sdp);
|
||||
|
||||
/* Check for & recover partially truncated inodes */
|
||||
quotad_check_trunc_list(sdp);
|
||||
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
t = min(quotad_timeo, statfs_timeo);
|
||||
|
||||
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||
spin_lock(&sdp->sd_trunc_lock);
|
||||
empty = list_empty(&sdp->sd_trunc_list);
|
||||
spin_unlock(&sdp->sd_trunc_lock);
|
||||
if (empty)
|
||||
t -= schedule_timeout(t);
|
||||
else
|
||||
t = 0;
|
||||
finish_wait(&sdp->sd_quota_wait, &wait);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,22 +15,22 @@ struct gfs2_sbd;
|
|||
|
||||
#define NO_QUOTA_CHANGE ((u32)-1)
|
||||
|
||||
int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid);
|
||||
void gfs2_quota_unhold(struct gfs2_inode *ip);
|
||||
extern int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid);
|
||||
extern void gfs2_quota_unhold(struct gfs2_inode *ip);
|
||||
|
||||
int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid);
|
||||
void gfs2_quota_unlock(struct gfs2_inode *ip);
|
||||
extern int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid);
|
||||
extern void gfs2_quota_unlock(struct gfs2_inode *ip);
|
||||
|
||||
int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
|
||||
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
|
||||
u32 uid, u32 gid);
|
||||
extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
|
||||
extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
|
||||
u32 uid, u32 gid);
|
||||
|
||||
int gfs2_quota_sync(struct gfs2_sbd *sdp);
|
||||
int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
|
||||
extern int gfs2_quota_sync(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
|
||||
|
||||
int gfs2_quota_init(struct gfs2_sbd *sdp);
|
||||
void gfs2_quota_scan(struct gfs2_sbd *sdp);
|
||||
void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_quota_init(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_quotad(void *data);
|
||||
|
||||
static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
|
||||
{
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/lm_interface.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
|
@ -583,13 +585,35 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
|
|||
return error;
|
||||
}
|
||||
|
||||
static struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_jdesc *jd;
|
||||
int found = 0;
|
||||
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
|
||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||
if (jd->jd_dirty) {
|
||||
jd->jd_dirty = 0;
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
|
||||
if (!found)
|
||||
jd = NULL;
|
||||
|
||||
return jd;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_check_journals - Recover any dirty journals
|
||||
* @sdp: the filesystem
|
||||
*
|
||||
*/
|
||||
|
||||
void gfs2_check_journals(struct gfs2_sbd *sdp)
|
||||
static void gfs2_check_journals(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_jdesc *jd;
|
||||
|
||||
|
@ -603,3 +627,25 @@ void gfs2_check_journals(struct gfs2_sbd *sdp)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_recoverd - Recover dead machine's journals
|
||||
* @sdp: Pointer to GFS2 superblock
|
||||
*
|
||||
*/
|
||||
|
||||
int gfs2_recoverd(void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = data;
|
||||
unsigned long t;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
gfs2_check_journals(sdp);
|
||||
t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ;
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
schedule_timeout_interruptible(t);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,17 +18,17 @@ static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk)
|
|||
*blk = 0;
|
||||
}
|
||||
|
||||
int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
|
||||
extern int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
|
||||
struct buffer_head **bh);
|
||||
|
||||
int gfs2_revoke_add(struct gfs2_sbd *sdp, u64 blkno, unsigned int where);
|
||||
int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where);
|
||||
void gfs2_revoke_clean(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_revoke_add(struct gfs2_sbd *sdp, u64 blkno, unsigned int where);
|
||||
extern int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where);
|
||||
extern void gfs2_revoke_clean(struct gfs2_sbd *sdp);
|
||||
|
||||
int gfs2_find_jhead(struct gfs2_jdesc *jd,
|
||||
extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
|
||||
struct gfs2_log_header_host *head);
|
||||
int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd);
|
||||
void gfs2_check_journals(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd);
|
||||
extern int gfs2_recoverd(void *data);
|
||||
|
||||
#endif /* __RECOVERY_DOT_H__ */
|
||||
|
||||
|
|
|
@ -269,16 +269,14 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
|
|||
bi->bi_len, x);
|
||||
}
|
||||
|
||||
if (count[0] != rgd->rd_rg.rg_free) {
|
||||
if (count[0] != rgd->rd_free) {
|
||||
if (gfs2_consist_rgrpd(rgd))
|
||||
fs_err(sdp, "free data mismatch: %u != %u\n",
|
||||
count[0], rgd->rd_rg.rg_free);
|
||||
count[0], rgd->rd_free);
|
||||
return;
|
||||
}
|
||||
|
||||
tmp = rgd->rd_data -
|
||||
rgd->rd_rg.rg_free -
|
||||
rgd->rd_rg.rg_dinodes;
|
||||
tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
|
||||
if (count[1] + count[2] != tmp) {
|
||||
if (gfs2_consist_rgrpd(rgd))
|
||||
fs_err(sdp, "used data mismatch: %u != %u\n",
|
||||
|
@ -286,10 +284,10 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
|
|||
return;
|
||||
}
|
||||
|
||||
if (count[3] != rgd->rd_rg.rg_dinodes) {
|
||||
if (count[3] != rgd->rd_dinodes) {
|
||||
if (gfs2_consist_rgrpd(rgd))
|
||||
fs_err(sdp, "used metadata mismatch: %u != %u\n",
|
||||
count[3], rgd->rd_rg.rg_dinodes);
|
||||
count[3], rgd->rd_dinodes);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -501,7 +499,7 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp)
|
|||
for (rgrps = 0;; rgrps++) {
|
||||
loff_t pos = rgrps * sizeof(struct gfs2_rindex);
|
||||
|
||||
if (pos + sizeof(struct gfs2_rindex) >= ip->i_di.di_size)
|
||||
if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize)
|
||||
break;
|
||||
error = gfs2_internal_read(ip, &ra_state, buf, &pos,
|
||||
sizeof(struct gfs2_rindex));
|
||||
|
@ -590,7 +588,7 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
|
|||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct inode *inode = &ip->i_inode;
|
||||
struct file_ra_state ra_state;
|
||||
u64 rgrp_count = ip->i_di.di_size;
|
||||
u64 rgrp_count = ip->i_disksize;
|
||||
int error;
|
||||
|
||||
if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) {
|
||||
|
@ -634,7 +632,7 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip)
|
|||
for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
|
||||
/* Ignore partials */
|
||||
if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) >
|
||||
ip->i_di.di_size)
|
||||
ip->i_disksize)
|
||||
break;
|
||||
error = read_rindex_entry(ip, &ra_state);
|
||||
if (error) {
|
||||
|
@ -692,7 +690,6 @@ int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
|
|||
static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
|
||||
{
|
||||
const struct gfs2_rgrp *str = buf;
|
||||
struct gfs2_rgrp_host *rg = &rgd->rd_rg;
|
||||
u32 rg_flags;
|
||||
|
||||
rg_flags = be32_to_cpu(str->rg_flags);
|
||||
|
@ -700,24 +697,23 @@ static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
|
|||
rgd->rd_flags |= GFS2_RDF_NOALLOC;
|
||||
else
|
||||
rgd->rd_flags &= ~GFS2_RDF_NOALLOC;
|
||||
rg->rg_free = be32_to_cpu(str->rg_free);
|
||||
rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
|
||||
rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
|
||||
rgd->rd_free = be32_to_cpu(str->rg_free);
|
||||
rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
|
||||
rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
|
||||
}
|
||||
|
||||
static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
|
||||
{
|
||||
struct gfs2_rgrp *str = buf;
|
||||
struct gfs2_rgrp_host *rg = &rgd->rd_rg;
|
||||
u32 rg_flags = 0;
|
||||
|
||||
if (rgd->rd_flags & GFS2_RDF_NOALLOC)
|
||||
rg_flags |= GFS2_RGF_NOALLOC;
|
||||
str->rg_flags = cpu_to_be32(rg_flags);
|
||||
str->rg_free = cpu_to_be32(rg->rg_free);
|
||||
str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
|
||||
str->rg_free = cpu_to_be32(rgd->rd_free);
|
||||
str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
|
||||
str->__pad = cpu_to_be32(0);
|
||||
str->rg_igeneration = cpu_to_be64(rg->rg_igeneration);
|
||||
str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
|
||||
memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
|
||||
}
|
||||
|
||||
|
@ -776,7 +772,7 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
|
|||
}
|
||||
|
||||
spin_lock(&sdp->sd_rindex_spin);
|
||||
rgd->rd_free_clone = rgd->rd_rg.rg_free;
|
||||
rgd->rd_free_clone = rgd->rd_free;
|
||||
rgd->rd_bh_count++;
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
|
||||
|
@ -850,7 +846,7 @@ void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
|
|||
}
|
||||
|
||||
spin_lock(&sdp->sd_rindex_spin);
|
||||
rgd->rd_free_clone = rgd->rd_rg.rg_free;
|
||||
rgd->rd_free_clone = rgd->rd_free;
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
}
|
||||
|
||||
|
@ -1403,8 +1399,8 @@ u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n)
|
|||
block = rgd->rd_data0 + blk;
|
||||
ip->i_goal = block;
|
||||
|
||||
gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free >= *n);
|
||||
rgd->rd_rg.rg_free -= *n;
|
||||
gfs2_assert_withdraw(sdp, rgd->rd_free >= *n);
|
||||
rgd->rd_free -= *n;
|
||||
|
||||
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
|
||||
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
|
||||
|
@ -1445,10 +1441,10 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
|
|||
|
||||
block = rgd->rd_data0 + blk;
|
||||
|
||||
gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
|
||||
rgd->rd_rg.rg_free--;
|
||||
rgd->rd_rg.rg_dinodes++;
|
||||
*generation = rgd->rd_rg.rg_igeneration++;
|
||||
gfs2_assert_withdraw(sdp, rgd->rd_free);
|
||||
rgd->rd_free--;
|
||||
rgd->rd_dinodes++;
|
||||
*generation = rgd->rd_igeneration++;
|
||||
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
|
||||
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
|
||||
|
||||
|
@ -1481,7 +1477,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
|
|||
if (!rgd)
|
||||
return;
|
||||
|
||||
rgd->rd_rg.rg_free += blen;
|
||||
rgd->rd_free += blen;
|
||||
|
||||
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
|
||||
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
|
||||
|
@ -1509,7 +1505,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
|
|||
if (!rgd)
|
||||
return;
|
||||
|
||||
rgd->rd_rg.rg_free += blen;
|
||||
rgd->rd_free += blen;
|
||||
|
||||
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
|
||||
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
|
||||
|
@ -1546,10 +1542,10 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
|
|||
return;
|
||||
gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
|
||||
|
||||
if (!rgd->rd_rg.rg_dinodes)
|
||||
if (!rgd->rd_dinodes)
|
||||
gfs2_consist_rgrpd(rgd);
|
||||
rgd->rd_rg.rg_dinodes--;
|
||||
rgd->rd_rg.rg_free++;
|
||||
rgd->rd_dinodes--;
|
||||
rgd->rd_free++;
|
||||
|
||||
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
|
||||
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
|
||||
|
|
246
fs/gfs2/super.c
246
fs/gfs2/super.c
|
@ -33,76 +33,6 @@
|
|||
#include "trans.h"
|
||||
#include "util.h"
|
||||
|
||||
/**
|
||||
* gfs2_jindex_hold - Grab a lock on the jindex
|
||||
* @sdp: The GFS2 superblock
|
||||
* @ji_gh: the holder for the jindex glock
|
||||
*
|
||||
* This is very similar to the gfs2_rindex_hold() function, except that
|
||||
* in general we hold the jindex lock for longer periods of time and
|
||||
* we grab it far less frequently (in general) then the rgrp lock.
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
|
||||
{
|
||||
struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
|
||||
struct qstr name;
|
||||
char buf[20];
|
||||
struct gfs2_jdesc *jd;
|
||||
int error;
|
||||
|
||||
name.name = buf;
|
||||
|
||||
mutex_lock(&sdp->sd_jindex_mutex);
|
||||
|
||||
for (;;) {
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
name.len = sprintf(buf, "journal%u", sdp->sd_journals);
|
||||
name.hash = gfs2_disk_hash(name.name, name.len);
|
||||
|
||||
error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
|
||||
if (error == -ENOENT) {
|
||||
error = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
gfs2_glock_dq_uninit(ji_gh);
|
||||
|
||||
if (error)
|
||||
break;
|
||||
|
||||
error = -ENOMEM;
|
||||
jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
|
||||
if (!jd)
|
||||
break;
|
||||
|
||||
INIT_LIST_HEAD(&jd->extent_list);
|
||||
jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
|
||||
if (!jd->jd_inode || IS_ERR(jd->jd_inode)) {
|
||||
if (!jd->jd_inode)
|
||||
error = -ENOENT;
|
||||
else
|
||||
error = PTR_ERR(jd->jd_inode);
|
||||
kfree(jd);
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
jd->jd_jid = sdp->sd_journals++;
|
||||
list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
}
|
||||
|
||||
mutex_unlock(&sdp->sd_jindex_mutex);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_jindex_free - Clear all the journal index information
|
||||
* @sdp: The GFS2 superblock
|
||||
|
@ -166,39 +96,6 @@ struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
|
|||
return jd;
|
||||
}
|
||||
|
||||
void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid)
|
||||
{
|
||||
struct gfs2_jdesc *jd;
|
||||
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
|
||||
if (jd)
|
||||
jd->jd_dirty = 1;
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
}
|
||||
|
||||
struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_jdesc *jd;
|
||||
int found = 0;
|
||||
|
||||
spin_lock(&sdp->sd_jindex_spin);
|
||||
|
||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||
if (jd->jd_dirty) {
|
||||
jd->jd_dirty = 0;
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
|
||||
if (!found)
|
||||
jd = NULL;
|
||||
|
||||
return jd;
|
||||
}
|
||||
|
||||
int gfs2_jdesc_check(struct gfs2_jdesc *jd)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
|
||||
|
@ -206,14 +103,14 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
|
|||
int ar;
|
||||
int error;
|
||||
|
||||
if (ip->i_di.di_size < (8 << 20) || ip->i_di.di_size > (1 << 30) ||
|
||||
(ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1))) {
|
||||
if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
|
||||
(ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
|
||||
gfs2_consist_inode(ip);
|
||||
return -EIO;
|
||||
}
|
||||
jd->jd_blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
|
||||
jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
|
||||
|
||||
error = gfs2_write_alloc_required(ip, 0, ip->i_di.di_size, &ar);
|
||||
error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar);
|
||||
if (!error && ar) {
|
||||
gfs2_consist_inode(ip);
|
||||
error = -EIO;
|
||||
|
@ -423,137 +320,6 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
|
|||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_statfs_i - Do a statfs
|
||||
* @sdp: the filesystem
|
||||
* @sg: the sg structure
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
|
||||
{
|
||||
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
|
||||
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
|
||||
|
||||
spin_lock(&sdp->sd_statfs_spin);
|
||||
|
||||
*sc = *m_sc;
|
||||
sc->sc_total += l_sc->sc_total;
|
||||
sc->sc_free += l_sc->sc_free;
|
||||
sc->sc_dinodes += l_sc->sc_dinodes;
|
||||
|
||||
spin_unlock(&sdp->sd_statfs_spin);
|
||||
|
||||
if (sc->sc_free < 0)
|
||||
sc->sc_free = 0;
|
||||
if (sc->sc_free > sc->sc_total)
|
||||
sc->sc_free = sc->sc_total;
|
||||
if (sc->sc_dinodes < 0)
|
||||
sc->sc_dinodes = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* statfs_fill - fill in the sg for a given RG
|
||||
* @rgd: the RG
|
||||
* @sc: the sc structure
|
||||
*
|
||||
* Returns: 0 on success, -ESTALE if the LVB is invalid
|
||||
*/
|
||||
|
||||
static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
|
||||
struct gfs2_statfs_change_host *sc)
|
||||
{
|
||||
gfs2_rgrp_verify(rgd);
|
||||
sc->sc_total += rgd->rd_data;
|
||||
sc->sc_free += rgd->rd_rg.rg_free;
|
||||
sc->sc_dinodes += rgd->rd_rg.rg_dinodes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_statfs_slow - Stat a filesystem using asynchronous locking
|
||||
* @sdp: the filesystem
|
||||
* @sc: the sc info that will be returned
|
||||
*
|
||||
* Any error (other than a signal) will cause this routine to fall back
|
||||
* to the synchronous version.
|
||||
*
|
||||
* FIXME: This really shouldn't busy wait like this.
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
|
||||
{
|
||||
struct gfs2_holder ri_gh;
|
||||
struct gfs2_rgrpd *rgd_next;
|
||||
struct gfs2_holder *gha, *gh;
|
||||
unsigned int slots = 64;
|
||||
unsigned int x;
|
||||
int done;
|
||||
int error = 0, err;
|
||||
|
||||
memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
|
||||
gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
|
||||
if (!gha)
|
||||
return -ENOMEM;
|
||||
|
||||
error = gfs2_rindex_hold(sdp, &ri_gh);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
rgd_next = gfs2_rgrpd_get_first(sdp);
|
||||
|
||||
for (;;) {
|
||||
done = 1;
|
||||
|
||||
for (x = 0; x < slots; x++) {
|
||||
gh = gha + x;
|
||||
|
||||
if (gh->gh_gl && gfs2_glock_poll(gh)) {
|
||||
err = gfs2_glock_wait(gh);
|
||||
if (err) {
|
||||
gfs2_holder_uninit(gh);
|
||||
error = err;
|
||||
} else {
|
||||
if (!error)
|
||||
error = statfs_slow_fill(
|
||||
gh->gh_gl->gl_object, sc);
|
||||
gfs2_glock_dq_uninit(gh);
|
||||
}
|
||||
}
|
||||
|
||||
if (gh->gh_gl)
|
||||
done = 0;
|
||||
else if (rgd_next && !error) {
|
||||
error = gfs2_glock_nq_init(rgd_next->rd_gl,
|
||||
LM_ST_SHARED,
|
||||
GL_ASYNC,
|
||||
gh);
|
||||
rgd_next = gfs2_rgrpd_get_next(rgd_next);
|
||||
done = 0;
|
||||
}
|
||||
|
||||
if (signal_pending(current))
|
||||
error = -ERESTARTSYS;
|
||||
}
|
||||
|
||||
if (done)
|
||||
break;
|
||||
|
||||
yield();
|
||||
}
|
||||
|
||||
gfs2_glock_dq_uninit(&ri_gh);
|
||||
|
||||
out:
|
||||
kfree(gha);
|
||||
return error;
|
||||
}
|
||||
|
||||
struct lfcc {
|
||||
struct list_head list;
|
||||
struct gfs2_holder gh;
|
||||
|
@ -580,10 +346,6 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
|
|||
struct gfs2_log_header_host lh;
|
||||
int error;
|
||||
|
||||
error = gfs2_jindex_hold(sdp, &ji_gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||
lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
|
||||
if (!lfcc) {
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#ifndef __SUPER_DOT_H__
|
||||
#define __SUPER_DOT_H__
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/dcache.h>
|
||||
#include "incore.h"
|
||||
|
||||
void gfs2_lm_unmount(struct gfs2_sbd *sdp);
|
||||
|
@ -23,12 +25,9 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
|
|||
return x;
|
||||
}
|
||||
|
||||
int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh);
|
||||
void gfs2_jindex_free(struct gfs2_sbd *sdp);
|
||||
|
||||
struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
|
||||
void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid);
|
||||
struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp);
|
||||
int gfs2_jdesc_check(struct gfs2_jdesc *jd);
|
||||
|
||||
int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
|
||||
|
@ -40,11 +39,15 @@ int gfs2_statfs_init(struct gfs2_sbd *sdp);
|
|||
void gfs2_statfs_change(struct gfs2_sbd *sdp,
|
||||
s64 total, s64 free, s64 dinodes);
|
||||
int gfs2_statfs_sync(struct gfs2_sbd *sdp);
|
||||
int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
|
||||
int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
|
||||
|
||||
int gfs2_freeze_fs(struct gfs2_sbd *sdp);
|
||||
void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
|
||||
|
||||
extern struct file_system_type gfs2_fs_type;
|
||||
extern struct file_system_type gfs2meta_fs_type;
|
||||
extern const struct export_operations gfs2_export_ops;
|
||||
extern const struct super_operations gfs2_super_ops;
|
||||
extern struct dentry_operations gfs2_dops;
|
||||
|
||||
#endif /* __SUPER_DOT_H__ */
|
||||
|
||||
|
|
|
@ -26,9 +26,6 @@
|
|||
#include "quota.h"
|
||||
#include "util.h"
|
||||
|
||||
char *gfs2_sys_margs;
|
||||
spinlock_t gfs2_sys_margs_lock;
|
||||
|
||||
static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%u:%u\n",
|
||||
|
@ -263,7 +260,6 @@ ARGS_ATTR(localcaching, "%d\n");
|
|||
ARGS_ATTR(localflocks, "%d\n");
|
||||
ARGS_ATTR(debug, "%d\n");
|
||||
ARGS_ATTR(upgrade, "%d\n");
|
||||
ARGS_ATTR(num_glockd, "%u\n");
|
||||
ARGS_ATTR(posix_acl, "%d\n");
|
||||
ARGS_ATTR(quota, "%u\n");
|
||||
ARGS_ATTR(suiddir, "%d\n");
|
||||
|
@ -279,7 +275,6 @@ static struct attribute *args_attrs[] = {
|
|||
&args_attr_localflocks.attr,
|
||||
&args_attr_debug.attr,
|
||||
&args_attr_upgrade.attr,
|
||||
&args_attr_num_glockd.attr,
|
||||
&args_attr_posix_acl.attr,
|
||||
&args_attr_quota.attr,
|
||||
&args_attr_suiddir.attr,
|
||||
|
@ -287,30 +282,6 @@ static struct attribute *args_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* display counters from superblock
|
||||
*/
|
||||
|
||||
struct counters_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct gfs2_sbd *, char *);
|
||||
};
|
||||
|
||||
#define COUNTERS_ATTR(name, fmt) \
|
||||
static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
|
||||
{ \
|
||||
return snprintf(buf, PAGE_SIZE, fmt, \
|
||||
(unsigned int)atomic_read(&sdp->sd_##name)); \
|
||||
} \
|
||||
static struct counters_attr counters_attr_##name = __ATTR_RO(name)
|
||||
|
||||
COUNTERS_ATTR(reclaimed, "%u\n");
|
||||
|
||||
static struct attribute *counters_attrs[] = {
|
||||
&counters_attr_reclaimed.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* get and set struct gfs2_tune fields
|
||||
*/
|
||||
|
@ -393,7 +364,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
|
|||
} \
|
||||
TUNE_ATTR_2(name, name##_store)
|
||||
|
||||
TUNE_ATTR(demote_secs, 0);
|
||||
TUNE_ATTR(incore_log_blocks, 0);
|
||||
TUNE_ATTR(log_flush_secs, 0);
|
||||
TUNE_ATTR(quota_warn_period, 0);
|
||||
|
@ -408,11 +378,9 @@ TUNE_ATTR(stall_secs, 1);
|
|||
TUNE_ATTR(statfs_quantum, 1);
|
||||
TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process);
|
||||
TUNE_ATTR_DAEMON(logd_secs, logd_process);
|
||||
TUNE_ATTR_DAEMON(quotad_secs, quotad_process);
|
||||
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
|
||||
|
||||
static struct attribute *tune_attrs[] = {
|
||||
&tune_attr_demote_secs.attr,
|
||||
&tune_attr_incore_log_blocks.attr,
|
||||
&tune_attr_log_flush_secs.attr,
|
||||
&tune_attr_quota_warn_period.attr,
|
||||
|
@ -426,7 +394,6 @@ static struct attribute *tune_attrs[] = {
|
|||
&tune_attr_statfs_quantum.attr,
|
||||
&tune_attr_recoverd_secs.attr,
|
||||
&tune_attr_logd_secs.attr,
|
||||
&tune_attr_quotad_secs.attr,
|
||||
&tune_attr_quota_scale.attr,
|
||||
&tune_attr_new_files_jdata.attr,
|
||||
NULL,
|
||||
|
@ -437,11 +404,6 @@ static struct attribute_group lockstruct_group = {
|
|||
.attrs = lockstruct_attrs,
|
||||
};
|
||||
|
||||
static struct attribute_group counters_group = {
|
||||
.name = "counters",
|
||||
.attrs = counters_attrs,
|
||||
};
|
||||
|
||||
static struct attribute_group args_group = {
|
||||
.name = "args",
|
||||
.attrs = args_attrs,
|
||||
|
@ -466,13 +428,9 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
|
|||
if (error)
|
||||
goto fail_reg;
|
||||
|
||||
error = sysfs_create_group(&sdp->sd_kobj, &counters_group);
|
||||
if (error)
|
||||
goto fail_lockstruct;
|
||||
|
||||
error = sysfs_create_group(&sdp->sd_kobj, &args_group);
|
||||
if (error)
|
||||
goto fail_counters;
|
||||
goto fail_lockstruct;
|
||||
|
||||
error = sysfs_create_group(&sdp->sd_kobj, &tune_group);
|
||||
if (error)
|
||||
|
@ -483,8 +441,6 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
|
|||
|
||||
fail_args:
|
||||
sysfs_remove_group(&sdp->sd_kobj, &args_group);
|
||||
fail_counters:
|
||||
sysfs_remove_group(&sdp->sd_kobj, &counters_group);
|
||||
fail_lockstruct:
|
||||
sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group);
|
||||
fail_reg:
|
||||
|
@ -498,16 +454,27 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
|
|||
{
|
||||
sysfs_remove_group(&sdp->sd_kobj, &tune_group);
|
||||
sysfs_remove_group(&sdp->sd_kobj, &args_group);
|
||||
sysfs_remove_group(&sdp->sd_kobj, &counters_group);
|
||||
sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group);
|
||||
kobject_put(&sdp->sd_kobj);
|
||||
}
|
||||
|
||||
static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
|
||||
struct kobj_uevent_env *env)
|
||||
{
|
||||
struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
|
||||
add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
|
||||
add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kset_uevent_ops gfs2_uevent_ops = {
|
||||
.uevent = gfs2_uevent,
|
||||
};
|
||||
|
||||
|
||||
int gfs2_sys_init(void)
|
||||
{
|
||||
gfs2_sys_margs = NULL;
|
||||
spin_lock_init(&gfs2_sys_margs_lock);
|
||||
gfs2_kset = kset_create_and_add("gfs2", NULL, fs_kobj);
|
||||
gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj);
|
||||
if (!gfs2_kset)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
|
@ -515,7 +482,6 @@ int gfs2_sys_init(void)
|
|||
|
||||
void gfs2_sys_uninit(void)
|
||||
{
|
||||
kfree(gfs2_sys_margs);
|
||||
kset_unregister(gfs2_kset);
|
||||
}
|
||||
|
||||
|
|
|
@ -13,10 +13,6 @@
|
|||
#include <linux/spinlock.h>
|
||||
struct gfs2_sbd;
|
||||
|
||||
/* Allow args to be passed to GFS2 when using an initial ram disk */
|
||||
extern char *gfs2_sys_margs;
|
||||
extern spinlock_t gfs2_sys_margs_lock;
|
||||
|
||||
int gfs2_sys_fs_add(struct gfs2_sbd *sdp);
|
||||
void gfs2_sys_fs_del(struct gfs2_sbd *sdp);
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ struct kmem_cache *gfs2_glock_cachep __read_mostly;
|
|||
struct kmem_cache *gfs2_inode_cachep __read_mostly;
|
||||
struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
|
||||
struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
|
||||
struct kmem_cache *gfs2_quotad_cachep __read_mostly;
|
||||
|
||||
void gfs2_assert_i(struct gfs2_sbd *sdp)
|
||||
{
|
||||
|
|
|
@ -148,6 +148,7 @@ extern struct kmem_cache *gfs2_glock_cachep;
|
|||
extern struct kmem_cache *gfs2_inode_cachep;
|
||||
extern struct kmem_cache *gfs2_bufdata_cachep;
|
||||
extern struct kmem_cache *gfs2_rgrpd_cachep;
|
||||
extern struct kmem_cache *gfs2_quotad_cachep;
|
||||
|
||||
static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
|
||||
unsigned int *p)
|
||||
|
|
44
fs/ioctl.c
44
fs/ioctl.c
|
@ -231,7 +231,8 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
|
|||
#define blk_to_logical(inode, blk) (blk << (inode)->i_blkbits)
|
||||
#define logical_to_blk(inode, offset) (offset >> (inode)->i_blkbits);
|
||||
|
||||
/*
|
||||
/**
|
||||
* __generic_block_fiemap - FIEMAP for block based inodes (no locking)
|
||||
* @inode - the inode to map
|
||||
* @arg - the pointer to userspace where we copy everything to
|
||||
* @get_block - the fs's get_block function
|
||||
|
@ -242,11 +243,15 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
|
|||
*
|
||||
* If it is possible to have data blocks beyond a hole past @inode->i_size, then
|
||||
* please do not use this function, it will stop at the first unmapped block
|
||||
* beyond i_size
|
||||
* beyond i_size.
|
||||
*
|
||||
* If you use this function directly, you need to do your own locking. Use
|
||||
* generic_block_fiemap if you want the locking done for you.
|
||||
*/
|
||||
int generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo, u64 start,
|
||||
u64 len, get_block_t *get_block)
|
||||
|
||||
int __generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo, u64 start,
|
||||
u64 len, get_block_t *get_block)
|
||||
{
|
||||
struct buffer_head tmp;
|
||||
unsigned int start_blk;
|
||||
|
@ -260,9 +265,6 @@ int generic_block_fiemap(struct inode *inode,
|
|||
|
||||
start_blk = logical_to_blk(inode, start);
|
||||
|
||||
/* guard against change */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
||||
length = (long long)min_t(u64, len, i_size_read(inode));
|
||||
map_len = length;
|
||||
|
||||
|
@ -334,14 +336,36 @@ int generic_block_fiemap(struct inode *inode,
|
|||
cond_resched();
|
||||
} while (1);
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
/* if ret is 1 then we just hit the end of the extent array */
|
||||
if (ret == 1)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__generic_block_fiemap);
|
||||
|
||||
/**
|
||||
* generic_block_fiemap - FIEMAP for block based inodes
|
||||
* @inode: The inode to map
|
||||
* @fieinfo: The mapping information
|
||||
* @start: The initial block to map
|
||||
* @len: The length of the extect to attempt to map
|
||||
* @get_block: The block mapping function for the fs
|
||||
*
|
||||
* Calls __generic_block_fiemap to map the inode, after taking
|
||||
* the inode's mutex lock.
|
||||
*/
|
||||
|
||||
int generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo, u64 start,
|
||||
u64 len, get_block_t *get_block)
|
||||
{
|
||||
int ret;
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(generic_block_fiemap);
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
|
|
@ -2059,6 +2059,9 @@ extern int vfs_fstat(unsigned int, struct kstat *);
|
|||
|
||||
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
extern int __generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo, u64 start,
|
||||
u64 len, get_block_t *get_block);
|
||||
extern int generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo, u64 start,
|
||||
u64 len, get_block_t *get_block);
|
||||
|
|
Loading…
Reference in New Issue