mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6: (69 commits) fix handling of offsets in cris eeprom.c, get rid of fake on-stack files get rid of home-grown mutex in cris eeprom.c switch ecryptfs_write() to struct inode *, kill on-stack fake files switch ecryptfs_get_locked_page() to struct inode * simplify access to ecryptfs inodes in ->readpage() and friends AFS: Don't put struct file on the stack Ban ecryptfs over ecryptfs logfs: replace inode uid,gid,mode initialization with helper function ufs: replace inode uid,gid,mode initialization with helper function udf: replace inode uid,gid,mode init with helper ubifs: replace inode uid,gid,mode initialization with helper function sysv: replace inode uid,gid,mode initialization with helper function reiserfs: replace inode uid,gid,mode initialization with helper function ramfs: replace inode uid,gid,mode initialization with helper function omfs: replace inode uid,gid,mode initialization with helper function bfs: replace inode uid,gid,mode initialization with helper function ocfs2: replace inode uid,gid,mode initialization with helper function nilfs2: replace inode uid,gid,mode initialization with helper function minix: replace inode uid,gid,mode init with helper ext4: replace inode uid,gid,mode init with helper ... Trivial conflict in fs/fs-writeback.c (mark bitfields unsigned)
This commit is contained in:
commit
e8bebe2f71
|
@ -73,8 +73,7 @@ struct eeprom_type
|
|||
int adapt_state; /* 1 = To high , 0 = Even, -1 = To low */
|
||||
|
||||
/* this one is to keep the read/write operations atomic */
|
||||
wait_queue_head_t wait_q;
|
||||
volatile int busy;
|
||||
struct mutex lock;
|
||||
int retry_cnt_addr; /* Used to keep track of number of retries for
|
||||
adaptive timing adjustments */
|
||||
int retry_cnt_read;
|
||||
|
@ -115,8 +114,7 @@ const struct file_operations eeprom_fops =
|
|||
|
||||
int __init eeprom_init(void)
|
||||
{
|
||||
init_waitqueue_head(&eeprom.wait_q);
|
||||
eeprom.busy = 0;
|
||||
mutex_init(&eeprom.lock);
|
||||
|
||||
#ifdef CONFIG_ETRAX_I2C_EEPROM_PROBE
|
||||
#define EETEXT "Found"
|
||||
|
@ -439,10 +437,7 @@ static loff_t eeprom_lseek(struct file * file, loff_t offset, int orig)
|
|||
|
||||
static int eeprom_read_buf(loff_t addr, char * buf, int count)
|
||||
{
|
||||
struct file f;
|
||||
|
||||
f.f_pos = addr;
|
||||
return eeprom_read(&f, buf, count, &addr);
|
||||
return eeprom_read(NULL, buf, count, &addr);
|
||||
}
|
||||
|
||||
|
||||
|
@ -452,7 +447,7 @@ static int eeprom_read_buf(loff_t addr, char * buf, int count)
|
|||
static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t *off)
|
||||
{
|
||||
int read=0;
|
||||
unsigned long p = file->f_pos;
|
||||
unsigned long p = *off;
|
||||
|
||||
unsigned char page;
|
||||
|
||||
|
@ -461,12 +456,9 @@ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
wait_event_interruptible(eeprom.wait_q, !eeprom.busy);
|
||||
if (signal_pending(current))
|
||||
if (mutex_lock_interruptible(&eeprom.lock))
|
||||
return -EINTR;
|
||||
|
||||
eeprom.busy++;
|
||||
|
||||
page = (unsigned char) (p >> 8);
|
||||
|
||||
if(!eeprom_address(p))
|
||||
|
@ -476,8 +468,7 @@ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t
|
|||
i2c_stop();
|
||||
|
||||
/* don't forget to wake them up */
|
||||
eeprom.busy--;
|
||||
wake_up_interruptible(&eeprom.wait_q);
|
||||
mutex_unlock(&eeprom.lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -501,11 +492,10 @@ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t
|
|||
|
||||
if(read > 0)
|
||||
{
|
||||
file->f_pos += read;
|
||||
*off += read;
|
||||
}
|
||||
|
||||
eeprom.busy--;
|
||||
wake_up_interruptible(&eeprom.wait_q);
|
||||
mutex_unlock(&eeprom.lock);
|
||||
return read;
|
||||
}
|
||||
|
||||
|
@ -513,11 +503,7 @@ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t
|
|||
|
||||
static int eeprom_write_buf(loff_t addr, const char * buf, int count)
|
||||
{
|
||||
struct file f;
|
||||
|
||||
f.f_pos = addr;
|
||||
|
||||
return eeprom_write(&f, buf, count, &addr);
|
||||
return eeprom_write(NULL, buf, count, &addr);
|
||||
}
|
||||
|
||||
|
||||
|
@ -534,16 +520,14 @@ static ssize_t eeprom_write(struct file * file, const char * buf, size_t count,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
wait_event_interruptible(eeprom.wait_q, !eeprom.busy);
|
||||
/* bail out if we get interrupted */
|
||||
if (signal_pending(current))
|
||||
if (mutex_lock_interruptible(&eeprom.lock))
|
||||
return -EINTR;
|
||||
eeprom.busy++;
|
||||
for(i = 0; (i < EEPROM_RETRIES) && (restart > 0); i++)
|
||||
{
|
||||
restart = 0;
|
||||
written = 0;
|
||||
p = file->f_pos;
|
||||
p = *off;
|
||||
|
||||
|
||||
while( (written < count) && (p < eeprom.size))
|
||||
|
@ -556,8 +540,7 @@ static ssize_t eeprom_write(struct file * file, const char * buf, size_t count,
|
|||
i2c_stop();
|
||||
|
||||
/* don't forget to wake them up */
|
||||
eeprom.busy--;
|
||||
wake_up_interruptible(&eeprom.wait_q);
|
||||
mutex_unlock(&eeprom.lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
#ifdef EEPROM_ADAPTIVE_TIMING
|
||||
|
@ -669,12 +652,11 @@ static ssize_t eeprom_write(struct file * file, const char * buf, size_t count,
|
|||
} /* while */
|
||||
} /* for */
|
||||
|
||||
eeprom.busy--;
|
||||
wake_up_interruptible(&eeprom.wait_q);
|
||||
if (written == 0 && file->f_pos >= eeprom.size){
|
||||
mutex_unlock(&eeprom.lock);
|
||||
if (written == 0 && p >= eeprom.size){
|
||||
return -ENOSPC;
|
||||
}
|
||||
file->f_pos += written;
|
||||
*off = p;
|
||||
return written;
|
||||
}
|
||||
|
||||
|
|
|
@ -485,7 +485,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = vfs_fsync(file, file->f_path.dentry, 0);
|
||||
ret = vfs_fsync(file, 0);
|
||||
if (unlikely(ret)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
|
@ -495,7 +495,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
|||
ret = lo_send(lo, bio, pos);
|
||||
|
||||
if (barrier && !ret) {
|
||||
ret = vfs_fsync(file, file->f_path.dentry, 0);
|
||||
ret = vfs_fsync(file, 0);
|
||||
if (unlikely(ret))
|
||||
ret = -EIO;
|
||||
}
|
||||
|
|
|
@ -1699,7 +1699,7 @@ int bitmap_create(mddev_t *mddev)
|
|||
* and bypass the page cache, we must sync the file
|
||||
* first.
|
||||
*/
|
||||
vfs_fsync(file, file->f_dentry, 1);
|
||||
vfs_fsync(file, 1);
|
||||
}
|
||||
/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
|
||||
if (!mddev->bitmap_info.external)
|
||||
|
|
|
@ -654,7 +654,7 @@ static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
|
|||
|
||||
if (curlun->ro || !filp)
|
||||
return 0;
|
||||
return vfs_fsync(filp, filp->f_path.dentry, 1);
|
||||
return vfs_fsync(filp, 1);
|
||||
}
|
||||
|
||||
static void store_cdrom_address(u8 *dest, int msf, u32 addr)
|
||||
|
|
|
@ -253,9 +253,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
inode->i_mode = mode;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = current_fsgid();
|
||||
inode_init_owner(inode, NULL, mode);
|
||||
inode->i_blocks = 0;
|
||||
inode->i_rdev = 0;
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
|
|
|
@ -11,7 +11,7 @@ obj-y := open.o read_write.o file_table.o super.o \
|
|||
attr.o bad_inode.o file.o filesystems.o namespace.o \
|
||||
seq_file.o xattr.o libfs.o fs-writeback.o \
|
||||
pnode.o drop_caches.o splice.o sync.o utimes.o \
|
||||
stack.o fs_struct.o
|
||||
stack.o fs_struct.o statfs.o
|
||||
|
||||
ifeq ($(CONFIG_BLOCK),y)
|
||||
obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
|
||||
|
|
|
@ -189,13 +189,9 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index,
|
|||
struct key *key)
|
||||
{
|
||||
struct page *page;
|
||||
struct file file = {
|
||||
.private_data = key,
|
||||
};
|
||||
|
||||
_enter("{%lu},%lu", dir->i_ino, index);
|
||||
|
||||
page = read_mapping_page(dir->i_mapping, index, &file);
|
||||
page = read_cache_page(dir->i_mapping, index, afs_page_filler, key);
|
||||
if (!IS_ERR(page)) {
|
||||
kmap(page);
|
||||
if (!PageChecked(page))
|
||||
|
|
|
@ -121,34 +121,19 @@ static void afs_file_readpage_read_complete(struct page *page,
|
|||
#endif
|
||||
|
||||
/*
|
||||
* AFS read page from file, directory or symlink
|
||||
* read page from file, directory or symlink, given a key to use
|
||||
*/
|
||||
static int afs_readpage(struct file *file, struct page *page)
|
||||
int afs_page_filler(void *data, struct page *page)
|
||||
{
|
||||
struct afs_vnode *vnode;
|
||||
struct inode *inode;
|
||||
struct key *key;
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct afs_vnode *vnode = AFS_FS_I(inode);
|
||||
struct key *key = data;
|
||||
size_t len;
|
||||
off_t offset;
|
||||
int ret;
|
||||
|
||||
inode = page->mapping->host;
|
||||
|
||||
if (file) {
|
||||
key = file->private_data;
|
||||
ASSERT(key != NULL);
|
||||
} else {
|
||||
key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell);
|
||||
if (IS_ERR(key)) {
|
||||
ret = PTR_ERR(key);
|
||||
goto error_nokey;
|
||||
}
|
||||
}
|
||||
|
||||
_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
|
||||
|
||||
vnode = AFS_FS_I(inode);
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
|
||||
ret = -ESTALE;
|
||||
|
@ -214,31 +199,56 @@ static int afs_readpage(struct file *file, struct page *page)
|
|||
unlock_page(page);
|
||||
}
|
||||
|
||||
if (!file)
|
||||
key_put(key);
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
error:
|
||||
SetPageError(page);
|
||||
unlock_page(page);
|
||||
if (!file)
|
||||
key_put(key);
|
||||
error_nokey:
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* read page from file, directory or symlink, given a file to nominate the key
|
||||
* to be used
|
||||
*/
|
||||
static int afs_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
struct key *key;
|
||||
int ret;
|
||||
|
||||
if (file) {
|
||||
key = file->private_data;
|
||||
ASSERT(key != NULL);
|
||||
ret = afs_page_filler(key, page);
|
||||
} else {
|
||||
struct inode *inode = page->mapping->host;
|
||||
key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell);
|
||||
if (IS_ERR(key)) {
|
||||
ret = PTR_ERR(key);
|
||||
} else {
|
||||
ret = afs_page_filler(key, page);
|
||||
key_put(key);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* read a set of pages
|
||||
*/
|
||||
static int afs_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
{
|
||||
struct key *key = file->private_data;
|
||||
struct afs_vnode *vnode;
|
||||
int ret = 0;
|
||||
|
||||
_enter(",{%lu},,%d", mapping->host->i_ino, nr_pages);
|
||||
_enter("{%d},{%lu},,%d",
|
||||
key_serial(key), mapping->host->i_ino, nr_pages);
|
||||
|
||||
ASSERT(key != NULL);
|
||||
|
||||
vnode = AFS_FS_I(mapping->host);
|
||||
if (vnode->flags & AFS_VNODE_DELETED) {
|
||||
|
@ -279,7 +289,7 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
|
|||
}
|
||||
|
||||
/* load the missing pages from the network */
|
||||
ret = read_cache_pages(mapping, pages, (void *) afs_readpage, file);
|
||||
ret = read_cache_pages(mapping, pages, afs_page_filler, key);
|
||||
|
||||
_leave(" = %d [netting]", ret);
|
||||
return ret;
|
||||
|
|
|
@ -494,6 +494,7 @@ extern const struct file_operations afs_file_operations;
|
|||
|
||||
extern int afs_open(struct inode *, struct file *);
|
||||
extern int afs_release(struct inode *, struct file *);
|
||||
extern int afs_page_filler(void *, struct page *);
|
||||
|
||||
/*
|
||||
* flock.c
|
||||
|
|
|
@ -49,9 +49,6 @@ static unsigned long afs_mntpt_expiry_timeout = 10 * 60;
|
|||
*/
|
||||
int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
|
||||
{
|
||||
struct file file = {
|
||||
.private_data = key,
|
||||
};
|
||||
struct page *page;
|
||||
size_t size;
|
||||
char *buf;
|
||||
|
@ -61,7 +58,8 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
|
|||
vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
|
||||
|
||||
/* read the contents of the symlink into the pagecache */
|
||||
page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, &file);
|
||||
page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
|
||||
afs_page_filler, key);
|
||||
if (IS_ERR(page)) {
|
||||
ret = PTR_ERR(page);
|
||||
goto out;
|
||||
|
|
|
@ -205,7 +205,7 @@ static struct inode *anon_inode_mkinode(void)
|
|||
* that it already _is_ on the dirty list.
|
||||
*/
|
||||
inode->i_state = I_DIRTY;
|
||||
inode->i_mode = S_IRUSR | S_IWUSR;
|
||||
inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = current_fsgid();
|
||||
inode->i_flags |= S_PRIVATE;
|
||||
|
|
|
@ -105,14 +105,12 @@ static int bfs_create(struct inode *dir, struct dentry *dentry, int mode,
|
|||
}
|
||||
set_bit(ino, info->si_imap);
|
||||
info->si_freei--;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current_fsgid();
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
|
||||
inode->i_blocks = 0;
|
||||
inode->i_op = &bfs_file_inops;
|
||||
inode->i_fop = &bfs_file_operations;
|
||||
inode->i_mapping->a_ops = &bfs_aops;
|
||||
inode->i_mode = mode;
|
||||
inode->i_ino = ino;
|
||||
BFS_I(inode)->i_dsk_ino = ino;
|
||||
BFS_I(inode)->i_sblock = 0;
|
||||
|
|
|
@ -245,37 +245,14 @@ struct super_block *freeze_bdev(struct block_device *bdev)
|
|||
sb = get_active_super(bdev);
|
||||
if (!sb)
|
||||
goto out;
|
||||
if (sb->s_flags & MS_RDONLY) {
|
||||
sb->s_frozen = SB_FREEZE_TRANS;
|
||||
up_write(&sb->s_umount);
|
||||
error = freeze_super(sb);
|
||||
if (error) {
|
||||
deactivate_super(sb);
|
||||
bdev->bd_fsfreeze_count--;
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
return sb;
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
sb->s_frozen = SB_FREEZE_WRITE;
|
||||
smp_wmb();
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
sb->s_frozen = SB_FREEZE_TRANS;
|
||||
smp_wmb();
|
||||
|
||||
sync_blockdev(sb->s_bdev);
|
||||
|
||||
if (sb->s_op->freeze_fs) {
|
||||
error = sb->s_op->freeze_fs(sb);
|
||||
if (error) {
|
||||
printk(KERN_ERR
|
||||
"VFS:Filesystem freeze failed\n");
|
||||
sb->s_frozen = SB_UNFROZEN;
|
||||
deactivate_locked_super(sb);
|
||||
bdev->bd_fsfreeze_count--;
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
}
|
||||
up_write(&sb->s_umount);
|
||||
|
||||
deactivate_super(sb);
|
||||
out:
|
||||
sync_blockdev(bdev);
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
|
@ -296,40 +273,22 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
|||
|
||||
mutex_lock(&bdev->bd_fsfreeze_mutex);
|
||||
if (!bdev->bd_fsfreeze_count)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
error = 0;
|
||||
if (--bdev->bd_fsfreeze_count > 0)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
if (!sb)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
BUG_ON(sb->s_bdev != bdev);
|
||||
down_write(&sb->s_umount);
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
goto out_unfrozen;
|
||||
|
||||
if (sb->s_op->unfreeze_fs) {
|
||||
error = sb->s_op->unfreeze_fs(sb);
|
||||
if (error) {
|
||||
printk(KERN_ERR
|
||||
"VFS:Filesystem thaw failed\n");
|
||||
sb->s_frozen = SB_FREEZE_TRANS;
|
||||
bdev->bd_fsfreeze_count++;
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
return error;
|
||||
}
|
||||
error = thaw_super(sb);
|
||||
if (error) {
|
||||
bdev->bd_fsfreeze_count++;
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
out_unfrozen:
|
||||
sb->s_frozen = SB_UNFROZEN;
|
||||
smp_wmb();
|
||||
wake_up(&sb->s_wait_unfrozen);
|
||||
|
||||
if (sb)
|
||||
deactivate_locked_super(sb);
|
||||
out_unlock:
|
||||
out:
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -282,14 +282,14 @@ int btrfs_acl_chmod(struct inode *inode)
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct xattr_handler btrfs_xattr_acl_default_handler = {
|
||||
const struct xattr_handler btrfs_xattr_acl_default_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_DEFAULT,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
.get = btrfs_xattr_acl_get,
|
||||
.set = btrfs_xattr_acl_set,
|
||||
};
|
||||
|
||||
struct xattr_handler btrfs_xattr_acl_access_handler = {
|
||||
const struct xattr_handler btrfs_xattr_acl_access_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_ACCESS,
|
||||
.flags = ACL_TYPE_ACCESS,
|
||||
.get = btrfs_xattr_acl_get,
|
||||
|
|
|
@ -4121,16 +4121,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
|
|||
if (ret != 0)
|
||||
goto fail;
|
||||
|
||||
inode->i_uid = current_fsuid();
|
||||
|
||||
if (dir && (dir->i_mode & S_ISGID)) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
|
||||
inode->i_mode = mode;
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_ino = objectid;
|
||||
inode_set_bytes(inode, 0);
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
|
|
|
@ -282,7 +282,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
|||
* List of handlers for synthetic system.* attributes. All real ondisk
|
||||
* attributes are handled directly.
|
||||
*/
|
||||
struct xattr_handler *btrfs_xattr_handlers[] = {
|
||||
const struct xattr_handler *btrfs_xattr_handlers[] = {
|
||||
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
|
||||
&btrfs_xattr_acl_access_handler,
|
||||
&btrfs_xattr_acl_default_handler,
|
||||
|
|
|
@ -21,9 +21,9 @@
|
|||
|
||||
#include <linux/xattr.h>
|
||||
|
||||
extern struct xattr_handler btrfs_xattr_acl_access_handler;
|
||||
extern struct xattr_handler btrfs_xattr_acl_default_handler;
|
||||
extern struct xattr_handler *btrfs_xattr_handlers[];
|
||||
extern const struct xattr_handler btrfs_xattr_acl_access_handler;
|
||||
extern const struct xattr_handler btrfs_xattr_acl_default_handler;
|
||||
extern const struct xattr_handler *btrfs_xattr_handlers[];
|
||||
|
||||
extern ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
|
||||
void *buffer, size_t size);
|
||||
|
|
27
fs/buffer.c
27
fs/buffer.c
|
@ -561,26 +561,17 @@ static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void do_thaw_one(struct super_block *sb, void *unused)
|
||||
{
|
||||
char b[BDEVNAME_SIZE];
|
||||
while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
|
||||
printk(KERN_WARNING "Emergency Thaw on %s\n",
|
||||
bdevname(sb->s_bdev, b));
|
||||
}
|
||||
|
||||
static void do_thaw_all(struct work_struct *work)
|
||||
{
|
||||
struct super_block *sb;
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
restart:
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
down_read(&sb->s_umount);
|
||||
while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
|
||||
printk(KERN_WARNING "Emergency Thaw on %s\n",
|
||||
bdevname(sb->s_bdev, b));
|
||||
up_read(&sb->s_umount);
|
||||
spin_lock(&sb_lock);
|
||||
if (__put_super_and_need_restart(sb))
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
iterate_supers(do_thaw_one, NULL);
|
||||
kfree(work);
|
||||
printk(KERN_WARNING "Emergency Thaw complete\n");
|
||||
}
|
||||
|
|
|
@ -844,8 +844,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|||
if ((ret >= 0 || ret == -EIOCBQUEUED) &&
|
||||
((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
|
||||
|| ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
|
||||
err = vfs_fsync_range(file, file->f_path.dentry,
|
||||
pos, pos + ret - 1, 1);
|
||||
err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
|
||||
if (err < 0)
|
||||
ret = err;
|
||||
}
|
||||
|
|
|
@ -952,8 +952,7 @@ static int ceph_get_sb(struct file_system_type *fs_type,
|
|||
|
||||
out_splat:
|
||||
ceph_mdsc_close_sessions(&client->mdsc);
|
||||
up_write(&sb->s_umount);
|
||||
deactivate_super(sb);
|
||||
deactivate_locked_super(sb);
|
||||
goto out_final;
|
||||
|
||||
out:
|
||||
|
|
|
@ -217,7 +217,7 @@ int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
|
|||
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
|
||||
host_file = cfi->cfi_container;
|
||||
|
||||
err = vfs_fsync(host_file, host_file->f_path.dentry, datasync);
|
||||
err = vfs_fsync(host_file, datasync);
|
||||
if ( !err && !datasync ) {
|
||||
lock_kernel();
|
||||
err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode));
|
||||
|
|
20
fs/dcache.c
20
fs/dcache.c
|
@ -536,7 +536,7 @@ static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
|
|||
*/
|
||||
static void prune_dcache(int count)
|
||||
{
|
||||
struct super_block *sb;
|
||||
struct super_block *sb, *n;
|
||||
int w_count;
|
||||
int unused = dentry_stat.nr_unused;
|
||||
int prune_ratio;
|
||||
|
@ -545,13 +545,14 @@ static void prune_dcache(int count)
|
|||
if (unused == 0 || count == 0)
|
||||
return;
|
||||
spin_lock(&dcache_lock);
|
||||
restart:
|
||||
if (count >= unused)
|
||||
prune_ratio = 1;
|
||||
else
|
||||
prune_ratio = unused / count;
|
||||
spin_lock(&sb_lock);
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
|
||||
if (list_empty(&sb->s_instances))
|
||||
continue;
|
||||
if (sb->s_nr_dentry_unused == 0)
|
||||
continue;
|
||||
sb->s_count++;
|
||||
|
@ -590,14 +591,10 @@ static void prune_dcache(int count)
|
|||
}
|
||||
spin_lock(&sb_lock);
|
||||
count -= pruned;
|
||||
/*
|
||||
* restart only when sb is no longer on the list and
|
||||
* we have more work to do.
|
||||
*/
|
||||
if (__put_super_and_need_restart(sb) && count > 0) {
|
||||
spin_unlock(&sb_lock);
|
||||
goto restart;
|
||||
}
|
||||
__put_super(sb);
|
||||
/* more work left to do? */
|
||||
if (count <= 0)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
|
@ -1529,6 +1526,7 @@ void d_delete(struct dentry * dentry)
|
|||
spin_lock(&dentry->d_lock);
|
||||
isdir = S_ISDIR(dentry->d_inode->i_mode);
|
||||
if (atomic_read(&dentry->d_count) == 1) {
|
||||
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
|
||||
dentry_iput(dentry);
|
||||
fsnotify_nameremove(dentry, isdir);
|
||||
return;
|
||||
|
|
|
@ -384,19 +384,16 @@ static int devpts_get_sb(struct file_system_type *fs_type,
|
|||
s->s_flags |= MS_ACTIVE;
|
||||
}
|
||||
|
||||
simple_set_mnt(mnt, s);
|
||||
|
||||
memcpy(&(DEVPTS_SB(s))->mount_opts, &opts, sizeof(opts));
|
||||
|
||||
error = mknod_ptmx(s);
|
||||
if (error)
|
||||
goto out_dput;
|
||||
goto out_undo_sget;
|
||||
|
||||
simple_set_mnt(mnt, s);
|
||||
|
||||
return 0;
|
||||
|
||||
out_dput:
|
||||
dput(s->s_root); /* undo dget() in simple_set_mnt() */
|
||||
|
||||
out_undo_sget:
|
||||
deactivate_locked_super(s);
|
||||
return error;
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
/* A global variable is a bit ugly, but it keeps the code simple */
|
||||
int sysctl_drop_caches;
|
||||
|
||||
static void drop_pagecache_sb(struct super_block *sb)
|
||||
static void drop_pagecache_sb(struct super_block *sb, void *unused)
|
||||
{
|
||||
struct inode *inode, *toput_inode = NULL;
|
||||
|
||||
|
@ -33,26 +33,6 @@ static void drop_pagecache_sb(struct super_block *sb)
|
|||
iput(toput_inode);
|
||||
}
|
||||
|
||||
static void drop_pagecache(void)
|
||||
{
|
||||
struct super_block *sb;
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
restart:
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
down_read(&sb->s_umount);
|
||||
if (sb->s_root)
|
||||
drop_pagecache_sb(sb);
|
||||
up_read(&sb->s_umount);
|
||||
spin_lock(&sb_lock);
|
||||
if (__put_super_and_need_restart(sb))
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
}
|
||||
|
||||
static void drop_slab(void)
|
||||
{
|
||||
int nr_objects;
|
||||
|
@ -68,7 +48,7 @@ int drop_caches_sysctl_handler(ctl_table *table, int write,
|
|||
proc_dointvec_minmax(table, write, buffer, length, ppos);
|
||||
if (write) {
|
||||
if (sysctl_drop_caches & 1)
|
||||
drop_pagecache();
|
||||
iterate_supers(drop_pagecache_sb, NULL);
|
||||
if (sysctl_drop_caches & 2)
|
||||
drop_slab();
|
||||
}
|
||||
|
|
|
@ -731,15 +731,14 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
|
|||
int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
|
||||
struct page *page_for_lower,
|
||||
size_t offset_in_page, size_t size);
|
||||
int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
|
||||
size_t size);
|
||||
int ecryptfs_write(struct inode *inode, char *data, loff_t offset, size_t size);
|
||||
int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
|
||||
struct inode *ecryptfs_inode);
|
||||
int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
|
||||
pgoff_t page_index,
|
||||
size_t offset_in_page, size_t size,
|
||||
struct inode *ecryptfs_inode);
|
||||
struct page *ecryptfs_get_locked_page(struct file *file, loff_t index);
|
||||
struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index);
|
||||
int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon);
|
||||
int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid,
|
||||
struct user_namespace *user_ns);
|
||||
|
|
|
@ -276,9 +276,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
|
|||
static int
|
||||
ecryptfs_fsync(struct file *file, struct dentry *dentry, int datasync)
|
||||
{
|
||||
return vfs_fsync(ecryptfs_file_to_lower(file),
|
||||
ecryptfs_dentry_to_lower(dentry),
|
||||
datasync);
|
||||
return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
|
||||
}
|
||||
|
||||
static int ecryptfs_fasync(int fd, struct file *file, int flag)
|
||||
|
|
|
@ -142,19 +142,10 @@ ecryptfs_do_create(struct inode *directory_inode,
|
|||
static int grow_file(struct dentry *ecryptfs_dentry)
|
||||
{
|
||||
struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode;
|
||||
struct file fake_file;
|
||||
struct ecryptfs_file_info tmp_file_info;
|
||||
char zero_virt[] = { 0x00 };
|
||||
int rc = 0;
|
||||
|
||||
memset(&fake_file, 0, sizeof(fake_file));
|
||||
fake_file.f_path.dentry = ecryptfs_dentry;
|
||||
memset(&tmp_file_info, 0, sizeof(tmp_file_info));
|
||||
ecryptfs_set_file_private(&fake_file, &tmp_file_info);
|
||||
ecryptfs_set_file_lower(
|
||||
&fake_file,
|
||||
ecryptfs_inode_to_private(ecryptfs_inode)->lower_file);
|
||||
rc = ecryptfs_write(&fake_file, zero_virt, 0, 1);
|
||||
rc = ecryptfs_write(ecryptfs_inode, zero_virt, 0, 1);
|
||||
i_size_write(ecryptfs_inode, 0);
|
||||
rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
|
||||
ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat.flags |=
|
||||
|
@ -784,8 +775,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
|
|||
{
|
||||
int rc = 0;
|
||||
struct inode *inode = dentry->d_inode;
|
||||
struct dentry *lower_dentry;
|
||||
struct file fake_ecryptfs_file;
|
||||
struct ecryptfs_crypt_stat *crypt_stat;
|
||||
loff_t i_size = i_size_read(inode);
|
||||
loff_t lower_size_before_truncate;
|
||||
|
@ -796,23 +785,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
|
|||
goto out;
|
||||
}
|
||||
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
|
||||
/* Set up a fake ecryptfs file, this is used to interface with
|
||||
* the file in the underlying filesystem so that the
|
||||
* truncation has an effect there as well. */
|
||||
memset(&fake_ecryptfs_file, 0, sizeof(fake_ecryptfs_file));
|
||||
fake_ecryptfs_file.f_path.dentry = dentry;
|
||||
/* Released at out_free: label */
|
||||
ecryptfs_set_file_private(&fake_ecryptfs_file,
|
||||
kmem_cache_alloc(ecryptfs_file_info_cache,
|
||||
GFP_KERNEL));
|
||||
if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
lower_dentry = ecryptfs_dentry_to_lower(dentry);
|
||||
ecryptfs_set_file_lower(
|
||||
&fake_ecryptfs_file,
|
||||
ecryptfs_inode_to_private(dentry->d_inode)->lower_file);
|
||||
/* Switch on growing or shrinking file */
|
||||
if (ia->ia_size > i_size) {
|
||||
char zero[] = { 0x00 };
|
||||
|
@ -822,7 +794,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
|
|||
* this triggers code that will fill in 0's throughout
|
||||
* the intermediate portion of the previous end of the
|
||||
* file and the new and of the file */
|
||||
rc = ecryptfs_write(&fake_ecryptfs_file, zero,
|
||||
rc = ecryptfs_write(inode, zero,
|
||||
(ia->ia_size - 1), 1);
|
||||
} else { /* ia->ia_size < i_size_read(inode) */
|
||||
/* We're chopping off all the pages down to the page
|
||||
|
@ -835,10 +807,10 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
|
|||
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
|
||||
rc = vmtruncate(inode, ia->ia_size);
|
||||
if (rc)
|
||||
goto out_free;
|
||||
goto out;
|
||||
lower_ia->ia_size = ia->ia_size;
|
||||
lower_ia->ia_valid |= ATTR_SIZE;
|
||||
goto out_free;
|
||||
goto out;
|
||||
}
|
||||
if (num_zeros) {
|
||||
char *zeros_virt;
|
||||
|
@ -846,16 +818,16 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
|
|||
zeros_virt = kzalloc(num_zeros, GFP_KERNEL);
|
||||
if (!zeros_virt) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
goto out;
|
||||
}
|
||||
rc = ecryptfs_write(&fake_ecryptfs_file, zeros_virt,
|
||||
rc = ecryptfs_write(inode, zeros_virt,
|
||||
ia->ia_size, num_zeros);
|
||||
kfree(zeros_virt);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "Error attempting to zero out "
|
||||
"the remainder of the end page on "
|
||||
"reducing truncate; rc = [%d]\n", rc);
|
||||
goto out_free;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
vmtruncate(inode, ia->ia_size);
|
||||
|
@ -864,7 +836,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
|
|||
printk(KERN_ERR "Problem with "
|
||||
"ecryptfs_write_inode_size_to_metadata; "
|
||||
"rc = [%d]\n", rc);
|
||||
goto out_free;
|
||||
goto out;
|
||||
}
|
||||
/* We are reducing the size of the ecryptfs file, and need to
|
||||
* know if we need to reduce the size of the lower file. */
|
||||
|
@ -878,10 +850,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
|
|||
} else
|
||||
lower_ia->ia_valid &= ~ATTR_SIZE;
|
||||
}
|
||||
out_free:
|
||||
if (ecryptfs_file_to_private(&fake_ecryptfs_file))
|
||||
kmem_cache_free(ecryptfs_file_info_cache,
|
||||
ecryptfs_file_to_private(&fake_ecryptfs_file));
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -281,7 +281,7 @@ static void ecryptfs_init_mount_crypt_stat(
|
|||
*
|
||||
* Returns zero on success; non-zero on error
|
||||
*/
|
||||
static int ecryptfs_parse_options(struct super_block *sb, char *options)
|
||||
static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options)
|
||||
{
|
||||
char *p;
|
||||
int rc = 0;
|
||||
|
@ -293,7 +293,7 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
|
|||
int fn_cipher_key_bytes;
|
||||
int fn_cipher_key_bytes_set = 0;
|
||||
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
|
||||
&ecryptfs_superblock_to_private(sb)->mount_crypt_stat;
|
||||
&sbi->mount_crypt_stat;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int token;
|
||||
char *sig_src;
|
||||
|
@ -483,68 +483,7 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
|
|||
}
|
||||
|
||||
struct kmem_cache *ecryptfs_sb_info_cache;
|
||||
|
||||
/**
|
||||
* ecryptfs_fill_super
|
||||
* @sb: The ecryptfs super block
|
||||
* @raw_data: The options passed to mount
|
||||
* @silent: Not used but required by function prototype
|
||||
*
|
||||
* Sets up what we can of the sb, rest is done in ecryptfs_read_super
|
||||
*
|
||||
* Returns zero on success; non-zero otherwise
|
||||
*/
|
||||
static int
|
||||
ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
|
||||
{
|
||||
struct ecryptfs_sb_info *esi;
|
||||
int rc = 0;
|
||||
|
||||
/* Released in ecryptfs_put_super() */
|
||||
ecryptfs_set_superblock_private(sb,
|
||||
kmem_cache_zalloc(ecryptfs_sb_info_cache,
|
||||
GFP_KERNEL));
|
||||
esi = ecryptfs_superblock_to_private(sb);
|
||||
if (!esi) {
|
||||
ecryptfs_printk(KERN_WARNING, "Out of memory\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = bdi_setup_and_register(&esi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
sb->s_bdi = &esi->bdi;
|
||||
sb->s_op = &ecryptfs_sops;
|
||||
/* Released through deactivate_super(sb) from get_sb_nodev */
|
||||
sb->s_root = d_alloc(NULL, &(const struct qstr) {
|
||||
.hash = 0,.name = "/",.len = 1});
|
||||
if (!sb->s_root) {
|
||||
ecryptfs_printk(KERN_ERR, "d_alloc failed\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
sb->s_root->d_op = &ecryptfs_dops;
|
||||
sb->s_root->d_sb = sb;
|
||||
sb->s_root->d_parent = sb->s_root;
|
||||
/* Released in d_release when dput(sb->s_root) is called */
|
||||
/* through deactivate_super(sb) from get_sb_nodev() */
|
||||
ecryptfs_set_dentry_private(sb->s_root,
|
||||
kmem_cache_zalloc(ecryptfs_dentry_info_cache,
|
||||
GFP_KERNEL));
|
||||
if (!ecryptfs_dentry_to_private(sb->s_root)) {
|
||||
ecryptfs_printk(KERN_ERR,
|
||||
"dentry_info_cache alloc failed\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
rc = 0;
|
||||
out:
|
||||
/* Should be able to rely on deactivate_super called from
|
||||
* get_sb_nodev */
|
||||
return rc;
|
||||
}
|
||||
static struct file_system_type ecryptfs_fs_type;
|
||||
|
||||
/**
|
||||
* ecryptfs_read_super
|
||||
|
@ -565,6 +504,13 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
|
|||
ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
|
||||
goto out;
|
||||
}
|
||||
if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
|
||||
rc = -EINVAL;
|
||||
printk(KERN_ERR "Mount on filesystem of type "
|
||||
"eCryptfs explicitly disallowed due to "
|
||||
"known incompatibilities\n");
|
||||
goto out_free;
|
||||
}
|
||||
ecryptfs_set_superblock_lower(sb, path.dentry->d_sb);
|
||||
sb->s_maxbytes = path.dentry->d_sb->s_maxbytes;
|
||||
sb->s_blocksize = path.dentry->d_sb->s_blocksize;
|
||||
|
@ -588,11 +534,8 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
|
|||
* @dev_name: The path to mount over
|
||||
* @raw_data: The options passed into the kernel
|
||||
*
|
||||
* The whole ecryptfs_get_sb process is broken into 4 functions:
|
||||
* The whole ecryptfs_get_sb process is broken into 3 functions:
|
||||
* ecryptfs_parse_options(): handle options passed to ecryptfs, if any
|
||||
* ecryptfs_fill_super(): used by get_sb_nodev, fills out the super_block
|
||||
* with as much information as it can before needing
|
||||
* the lower filesystem.
|
||||
* ecryptfs_read_super(): this accesses the lower filesystem and uses
|
||||
* ecryptfs_interpose to perform most of the linking
|
||||
* ecryptfs_interpose(): links the lower filesystem into ecryptfs (inode.c)
|
||||
|
@ -601,30 +544,78 @@ static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags,
|
|||
const char *dev_name, void *raw_data,
|
||||
struct vfsmount *mnt)
|
||||
{
|
||||
struct super_block *s;
|
||||
struct ecryptfs_sb_info *sbi;
|
||||
struct ecryptfs_dentry_info *root_info;
|
||||
const char *err = "Getting sb failed";
|
||||
int rc;
|
||||
struct super_block *sb;
|
||||
|
||||
rc = get_sb_nodev(fs_type, flags, raw_data, ecryptfs_fill_super, mnt);
|
||||
if (rc < 0) {
|
||||
printk(KERN_ERR "Getting sb failed; rc = [%d]\n", rc);
|
||||
sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
|
||||
if (!sbi) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
sb = mnt->mnt_sb;
|
||||
rc = ecryptfs_parse_options(sb, raw_data);
|
||||
|
||||
rc = ecryptfs_parse_options(sbi, raw_data);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "Error parsing options; rc = [%d]\n", rc);
|
||||
goto out_abort;
|
||||
err = "Error parsing options";
|
||||
goto out;
|
||||
}
|
||||
rc = ecryptfs_read_super(sb, dev_name);
|
||||
|
||||
s = sget(fs_type, NULL, set_anon_super, NULL);
|
||||
if (IS_ERR(s)) {
|
||||
rc = PTR_ERR(s);
|
||||
goto out;
|
||||
}
|
||||
|
||||
s->s_flags = flags;
|
||||
rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "Reading sb failed; rc = [%d]\n", rc);
|
||||
goto out_abort;
|
||||
deactivate_locked_super(s);
|
||||
goto out;
|
||||
}
|
||||
goto out;
|
||||
out_abort:
|
||||
dput(sb->s_root); /* aka mnt->mnt_root, as set by get_sb_nodev() */
|
||||
deactivate_locked_super(sb);
|
||||
|
||||
ecryptfs_set_superblock_private(s, sbi);
|
||||
s->s_bdi = &sbi->bdi;
|
||||
|
||||
/* ->kill_sb() will take care of sbi after that point */
|
||||
sbi = NULL;
|
||||
s->s_op = &ecryptfs_sops;
|
||||
|
||||
rc = -ENOMEM;
|
||||
s->s_root = d_alloc(NULL, &(const struct qstr) {
|
||||
.hash = 0,.name = "/",.len = 1});
|
||||
if (!s->s_root) {
|
||||
deactivate_locked_super(s);
|
||||
goto out;
|
||||
}
|
||||
s->s_root->d_op = &ecryptfs_dops;
|
||||
s->s_root->d_sb = s;
|
||||
s->s_root->d_parent = s->s_root;
|
||||
|
||||
root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
|
||||
if (!root_info) {
|
||||
deactivate_locked_super(s);
|
||||
goto out;
|
||||
}
|
||||
/* ->kill_sb() will take care of root_info */
|
||||
ecryptfs_set_dentry_private(s->s_root, root_info);
|
||||
s->s_flags |= MS_ACTIVE;
|
||||
rc = ecryptfs_read_super(s, dev_name);
|
||||
if (rc) {
|
||||
deactivate_locked_super(s);
|
||||
err = "Reading sb failed";
|
||||
goto out;
|
||||
}
|
||||
simple_set_mnt(mnt, s);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (sbi) {
|
||||
ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
|
||||
kmem_cache_free(ecryptfs_sb_info_cache, sbi);
|
||||
}
|
||||
printk(KERN_ERR "%s; rc = [%d]\n", err, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -633,11 +624,16 @@ static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags,
|
|||
* @sb: The ecryptfs super block
|
||||
*
|
||||
* Used to bring the superblock down and free the private data.
|
||||
* Private data is free'd in ecryptfs_put_super()
|
||||
*/
|
||||
static void ecryptfs_kill_block_super(struct super_block *sb)
|
||||
{
|
||||
generic_shutdown_super(sb);
|
||||
struct ecryptfs_sb_info *sb_info = ecryptfs_superblock_to_private(sb);
|
||||
kill_anon_super(sb);
|
||||
if (!sb_info)
|
||||
return;
|
||||
ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat);
|
||||
bdi_destroy(&sb_info->bdi);
|
||||
kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
|
||||
}
|
||||
|
||||
static struct file_system_type ecryptfs_fs_type = {
|
||||
|
|
|
@ -44,17 +44,9 @@
|
|||
* Returns locked and up-to-date page (if ok), with increased
|
||||
* refcnt.
|
||||
*/
|
||||
struct page *ecryptfs_get_locked_page(struct file *file, loff_t index)
|
||||
struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
struct inode *inode;
|
||||
struct address_space *mapping;
|
||||
struct page *page;
|
||||
|
||||
dentry = file->f_path.dentry;
|
||||
inode = dentry->d_inode;
|
||||
mapping = inode->i_mapping;
|
||||
page = read_mapping_page(mapping, index, (void *)file);
|
||||
struct page *page = read_mapping_page(inode->i_mapping, index, NULL);
|
||||
if (!IS_ERR(page))
|
||||
lock_page(page);
|
||||
return page;
|
||||
|
@ -198,7 +190,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
|
|||
static int ecryptfs_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
struct ecryptfs_crypt_stat *crypt_stat =
|
||||
&ecryptfs_inode_to_private(file->f_path.dentry->d_inode)->crypt_stat;
|
||||
&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
|
||||
int rc = 0;
|
||||
|
||||
if (!crypt_stat
|
||||
|
@ -300,8 +292,7 @@ static int ecryptfs_write_begin(struct file *file,
|
|||
|
||||
if (!PageUptodate(page)) {
|
||||
struct ecryptfs_crypt_stat *crypt_stat =
|
||||
&ecryptfs_inode_to_private(
|
||||
file->f_path.dentry->d_inode)->crypt_stat;
|
||||
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
|
||||
|
||||
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
|
||||
|| (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
|
||||
|
@ -487,7 +478,7 @@ static int ecryptfs_write_end(struct file *file,
|
|||
unsigned to = from + copied;
|
||||
struct inode *ecryptfs_inode = mapping->host;
|
||||
struct ecryptfs_crypt_stat *crypt_stat =
|
||||
&ecryptfs_inode_to_private(file->f_path.dentry->d_inode)->crypt_stat;
|
||||
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
|
||||
int rc;
|
||||
|
||||
if (crypt_stat->flags & ECRYPTFS_NEW_FILE) {
|
||||
|
|
|
@ -93,7 +93,7 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
|
|||
|
||||
/**
|
||||
* ecryptfs_write
|
||||
* @ecryptfs_file: The eCryptfs file into which to write
|
||||
* @ecryptfs_inode: The eCryptfs file into which to write
|
||||
* @data: Virtual address where data to write is located
|
||||
* @offset: Offset in the eCryptfs file at which to begin writing the
|
||||
* data from @data
|
||||
|
@ -109,12 +109,11 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
|
|||
*
|
||||
* Returns zero on success; non-zero otherwise
|
||||
*/
|
||||
int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
|
||||
int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
|
||||
size_t size)
|
||||
{
|
||||
struct page *ecryptfs_page;
|
||||
struct ecryptfs_crypt_stat *crypt_stat;
|
||||
struct inode *ecryptfs_inode = ecryptfs_file->f_dentry->d_inode;
|
||||
char *ecryptfs_page_virt;
|
||||
loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode);
|
||||
loff_t data_offset = 0;
|
||||
|
@ -145,7 +144,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
|
|||
if (num_bytes > total_remaining_zeros)
|
||||
num_bytes = total_remaining_zeros;
|
||||
}
|
||||
ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_file,
|
||||
ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode,
|
||||
ecryptfs_page_idx);
|
||||
if (IS_ERR(ecryptfs_page)) {
|
||||
rc = PTR_ERR(ecryptfs_page);
|
||||
|
@ -302,10 +301,10 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
|
|||
int ecryptfs_read(char *data, loff_t offset, size_t size,
|
||||
struct file *ecryptfs_file)
|
||||
{
|
||||
struct inode *ecryptfs_inode = ecryptfs_file->f_dentry->d_inode;
|
||||
struct page *ecryptfs_page;
|
||||
char *ecryptfs_page_virt;
|
||||
loff_t ecryptfs_file_size =
|
||||
i_size_read(ecryptfs_file->f_dentry->d_inode);
|
||||
loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode);
|
||||
loff_t data_offset = 0;
|
||||
loff_t pos;
|
||||
int rc = 0;
|
||||
|
@ -327,7 +326,7 @@ int ecryptfs_read(char *data, loff_t offset, size_t size,
|
|||
|
||||
if (num_bytes > total_remaining_bytes)
|
||||
num_bytes = total_remaining_bytes;
|
||||
ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_file,
|
||||
ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode,
|
||||
ecryptfs_page_idx);
|
||||
if (IS_ERR(ecryptfs_page)) {
|
||||
rc = PTR_ERR(ecryptfs_page);
|
||||
|
|
|
@ -108,27 +108,6 @@ void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode)
|
|||
inode->i_mapping->a_ops = &ecryptfs_aops;
|
||||
}
|
||||
|
||||
/**
|
||||
* ecryptfs_put_super
|
||||
* @sb: Pointer to the ecryptfs super block
|
||||
*
|
||||
* Final actions when unmounting a file system.
|
||||
* This will handle deallocation and release of our private data.
|
||||
*/
|
||||
static void ecryptfs_put_super(struct super_block *sb)
|
||||
{
|
||||
struct ecryptfs_sb_info *sb_info = ecryptfs_superblock_to_private(sb);
|
||||
|
||||
lock_kernel();
|
||||
|
||||
ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat);
|
||||
bdi_destroy(&sb_info->bdi);
|
||||
kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
|
||||
ecryptfs_set_superblock_private(sb, NULL);
|
||||
|
||||
unlock_kernel();
|
||||
}
|
||||
|
||||
/**
|
||||
* ecryptfs_statfs
|
||||
* @sb: The ecryptfs super block
|
||||
|
@ -203,7 +182,6 @@ const struct super_operations ecryptfs_sops = {
|
|||
.alloc_inode = ecryptfs_alloc_inode,
|
||||
.destroy_inode = ecryptfs_destroy_inode,
|
||||
.drop_inode = generic_delete_inode,
|
||||
.put_super = ecryptfs_put_super,
|
||||
.statfs = ecryptfs_statfs,
|
||||
.remount_fs = NULL,
|
||||
.clear_inode = ecryptfs_clear_inode,
|
||||
|
|
|
@ -1123,16 +1123,7 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
|
|||
sbi = sb->s_fs_info;
|
||||
|
||||
sb->s_dirt = 1;
|
||||
inode->i_uid = current->cred->fsuid;
|
||||
if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else {
|
||||
inode->i_gid = current->cred->fsgid;
|
||||
}
|
||||
inode->i_mode = mode;
|
||||
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_ino = sbi->s_nextid++;
|
||||
inode->i_blkbits = EXOFS_BLKSHIFT;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
|
|
|
@ -420,7 +420,7 @@ ext2_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
|
|||
return error;
|
||||
}
|
||||
|
||||
struct xattr_handler ext2_xattr_acl_access_handler = {
|
||||
const struct xattr_handler ext2_xattr_acl_access_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_ACCESS,
|
||||
.flags = ACL_TYPE_ACCESS,
|
||||
.list = ext2_xattr_list_acl_access,
|
||||
|
@ -428,7 +428,7 @@ struct xattr_handler ext2_xattr_acl_access_handler = {
|
|||
.set = ext2_xattr_set_acl,
|
||||
};
|
||||
|
||||
struct xattr_handler ext2_xattr_acl_default_handler = {
|
||||
const struct xattr_handler ext2_xattr_acl_default_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_DEFAULT,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
.list = ext2_xattr_list_acl_default,
|
||||
|
|
|
@ -549,16 +549,12 @@ struct inode *ext2_new_inode(struct inode *dir, int mode)
|
|||
|
||||
sb->s_dirt = 1;
|
||||
mark_buffer_dirty(bh2);
|
||||
inode->i_uid = current_fsuid();
|
||||
if (test_opt (sb, GRPID))
|
||||
if (test_opt(sb, GRPID)) {
|
||||
inode->i_mode = mode;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = dir->i_gid;
|
||||
else if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
inode->i_mode = mode;
|
||||
inode_init_owner(inode, dir, mode);
|
||||
|
||||
inode->i_ino = ino;
|
||||
inode->i_blocks = 0;
|
||||
|
|
|
@ -101,7 +101,7 @@ static void ext2_xattr_rehash(struct ext2_xattr_header *,
|
|||
|
||||
static struct mb_cache *ext2_xattr_cache;
|
||||
|
||||
static struct xattr_handler *ext2_xattr_handler_map[] = {
|
||||
static const struct xattr_handler *ext2_xattr_handler_map[] = {
|
||||
[EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler,
|
||||
#ifdef CONFIG_EXT2_FS_POSIX_ACL
|
||||
[EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext2_xattr_acl_access_handler,
|
||||
|
@ -113,7 +113,7 @@ static struct xattr_handler *ext2_xattr_handler_map[] = {
|
|||
#endif
|
||||
};
|
||||
|
||||
struct xattr_handler *ext2_xattr_handlers[] = {
|
||||
const struct xattr_handler *ext2_xattr_handlers[] = {
|
||||
&ext2_xattr_user_handler,
|
||||
&ext2_xattr_trusted_handler,
|
||||
#ifdef CONFIG_EXT2_FS_POSIX_ACL
|
||||
|
@ -126,10 +126,10 @@ struct xattr_handler *ext2_xattr_handlers[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static inline struct xattr_handler *
|
||||
static inline const struct xattr_handler *
|
||||
ext2_xattr_handler(int name_index)
|
||||
{
|
||||
struct xattr_handler *handler = NULL;
|
||||
const struct xattr_handler *handler = NULL;
|
||||
|
||||
if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
|
||||
handler = ext2_xattr_handler_map[name_index];
|
||||
|
@ -298,7 +298,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
|
|||
/* list the attribute names */
|
||||
for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
|
||||
entry = EXT2_XATTR_NEXT(entry)) {
|
||||
struct xattr_handler *handler =
|
||||
const struct xattr_handler *handler =
|
||||
ext2_xattr_handler(entry->e_name_index);
|
||||
|
||||
if (handler) {
|
||||
|
|
|
@ -55,11 +55,11 @@ struct ext2_xattr_entry {
|
|||
|
||||
# ifdef CONFIG_EXT2_FS_XATTR
|
||||
|
||||
extern struct xattr_handler ext2_xattr_user_handler;
|
||||
extern struct xattr_handler ext2_xattr_trusted_handler;
|
||||
extern struct xattr_handler ext2_xattr_acl_access_handler;
|
||||
extern struct xattr_handler ext2_xattr_acl_default_handler;
|
||||
extern struct xattr_handler ext2_xattr_security_handler;
|
||||
extern const struct xattr_handler ext2_xattr_user_handler;
|
||||
extern const struct xattr_handler ext2_xattr_trusted_handler;
|
||||
extern const struct xattr_handler ext2_xattr_acl_access_handler;
|
||||
extern const struct xattr_handler ext2_xattr_acl_default_handler;
|
||||
extern const struct xattr_handler ext2_xattr_security_handler;
|
||||
|
||||
extern ssize_t ext2_listxattr(struct dentry *, char *, size_t);
|
||||
|
||||
|
@ -72,7 +72,7 @@ extern void ext2_xattr_put_super(struct super_block *);
|
|||
extern int init_ext2_xattr(void);
|
||||
extern void exit_ext2_xattr(void);
|
||||
|
||||
extern struct xattr_handler *ext2_xattr_handlers[];
|
||||
extern const struct xattr_handler *ext2_xattr_handlers[];
|
||||
|
||||
# else /* CONFIG_EXT2_FS_XATTR */
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ ext2_init_security(struct inode *inode, struct inode *dir)
|
|||
return err;
|
||||
}
|
||||
|
||||
struct xattr_handler ext2_xattr_security_handler = {
|
||||
const struct xattr_handler ext2_xattr_security_handler = {
|
||||
.prefix = XATTR_SECURITY_PREFIX,
|
||||
.list = ext2_xattr_security_list,
|
||||
.get = ext2_xattr_security_get,
|
||||
|
|
|
@ -50,7 +50,7 @@ ext2_xattr_trusted_set(struct dentry *dentry, const char *name,
|
|||
value, size, flags);
|
||||
}
|
||||
|
||||
struct xattr_handler ext2_xattr_trusted_handler = {
|
||||
const struct xattr_handler ext2_xattr_trusted_handler = {
|
||||
.prefix = XATTR_TRUSTED_PREFIX,
|
||||
.list = ext2_xattr_trusted_list,
|
||||
.get = ext2_xattr_trusted_get,
|
||||
|
|
|
@ -54,7 +54,7 @@ ext2_xattr_user_set(struct dentry *dentry, const char *name,
|
|||
name, value, size, flags);
|
||||
}
|
||||
|
||||
struct xattr_handler ext2_xattr_user_handler = {
|
||||
const struct xattr_handler ext2_xattr_user_handler = {
|
||||
.prefix = XATTR_USER_PREFIX,
|
||||
.list = ext2_xattr_user_list,
|
||||
.get = ext2_xattr_user_get,
|
||||
|
|
|
@ -456,7 +456,7 @@ ext3_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
|
|||
return error;
|
||||
}
|
||||
|
||||
struct xattr_handler ext3_xattr_acl_access_handler = {
|
||||
const struct xattr_handler ext3_xattr_acl_access_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_ACCESS,
|
||||
.flags = ACL_TYPE_ACCESS,
|
||||
.list = ext3_xattr_list_acl_access,
|
||||
|
@ -464,7 +464,7 @@ struct xattr_handler ext3_xattr_acl_access_handler = {
|
|||
.set = ext3_xattr_set_acl,
|
||||
};
|
||||
|
||||
struct xattr_handler ext3_xattr_acl_default_handler = {
|
||||
const struct xattr_handler ext3_xattr_acl_default_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_DEFAULT,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
.list = ext3_xattr_list_acl_default,
|
||||
|
|
|
@ -538,16 +538,13 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
|
|||
if (S_ISDIR(mode))
|
||||
percpu_counter_inc(&sbi->s_dirs_counter);
|
||||
|
||||
inode->i_uid = current_fsuid();
|
||||
if (test_opt (sb, GRPID))
|
||||
|
||||
if (test_opt(sb, GRPID)) {
|
||||
inode->i_mode = mode;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = dir->i_gid;
|
||||
else if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
inode->i_mode = mode;
|
||||
inode_init_owner(inode, dir, mode);
|
||||
|
||||
inode->i_ino = ino;
|
||||
/* This is the optimal IO size (for stat), not the fs block size */
|
||||
|
|
|
@ -104,7 +104,7 @@ static int ext3_xattr_list(struct dentry *dentry, char *buffer,
|
|||
|
||||
static struct mb_cache *ext3_xattr_cache;
|
||||
|
||||
static struct xattr_handler *ext3_xattr_handler_map[] = {
|
||||
static const struct xattr_handler *ext3_xattr_handler_map[] = {
|
||||
[EXT3_XATTR_INDEX_USER] = &ext3_xattr_user_handler,
|
||||
#ifdef CONFIG_EXT3_FS_POSIX_ACL
|
||||
[EXT3_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext3_xattr_acl_access_handler,
|
||||
|
@ -116,7 +116,7 @@ static struct xattr_handler *ext3_xattr_handler_map[] = {
|
|||
#endif
|
||||
};
|
||||
|
||||
struct xattr_handler *ext3_xattr_handlers[] = {
|
||||
const struct xattr_handler *ext3_xattr_handlers[] = {
|
||||
&ext3_xattr_user_handler,
|
||||
&ext3_xattr_trusted_handler,
|
||||
#ifdef CONFIG_EXT3_FS_POSIX_ACL
|
||||
|
@ -129,10 +129,10 @@ struct xattr_handler *ext3_xattr_handlers[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static inline struct xattr_handler *
|
||||
static inline const struct xattr_handler *
|
||||
ext3_xattr_handler(int name_index)
|
||||
{
|
||||
struct xattr_handler *handler = NULL;
|
||||
const struct xattr_handler *handler = NULL;
|
||||
|
||||
if (name_index > 0 && name_index < ARRAY_SIZE(ext3_xattr_handler_map))
|
||||
handler = ext3_xattr_handler_map[name_index];
|
||||
|
@ -338,7 +338,7 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
|
|||
size_t rest = buffer_size;
|
||||
|
||||
for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
|
||||
struct xattr_handler *handler =
|
||||
const struct xattr_handler *handler =
|
||||
ext3_xattr_handler(entry->e_name_index);
|
||||
|
||||
if (handler) {
|
||||
|
|
|
@ -58,11 +58,11 @@ struct ext3_xattr_entry {
|
|||
|
||||
# ifdef CONFIG_EXT3_FS_XATTR
|
||||
|
||||
extern struct xattr_handler ext3_xattr_user_handler;
|
||||
extern struct xattr_handler ext3_xattr_trusted_handler;
|
||||
extern struct xattr_handler ext3_xattr_acl_access_handler;
|
||||
extern struct xattr_handler ext3_xattr_acl_default_handler;
|
||||
extern struct xattr_handler ext3_xattr_security_handler;
|
||||
extern const struct xattr_handler ext3_xattr_user_handler;
|
||||
extern const struct xattr_handler ext3_xattr_trusted_handler;
|
||||
extern const struct xattr_handler ext3_xattr_acl_access_handler;
|
||||
extern const struct xattr_handler ext3_xattr_acl_default_handler;
|
||||
extern const struct xattr_handler ext3_xattr_security_handler;
|
||||
|
||||
extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
|
||||
|
||||
|
@ -76,7 +76,7 @@ extern void ext3_xattr_put_super(struct super_block *);
|
|||
extern int init_ext3_xattr(void);
|
||||
extern void exit_ext3_xattr(void);
|
||||
|
||||
extern struct xattr_handler *ext3_xattr_handlers[];
|
||||
extern const struct xattr_handler *ext3_xattr_handlers[];
|
||||
|
||||
# else /* CONFIG_EXT3_FS_XATTR */
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir)
|
|||
return err;
|
||||
}
|
||||
|
||||
struct xattr_handler ext3_xattr_security_handler = {
|
||||
const struct xattr_handler ext3_xattr_security_handler = {
|
||||
.prefix = XATTR_SECURITY_PREFIX,
|
||||
.list = ext3_xattr_security_list,
|
||||
.get = ext3_xattr_security_get,
|
||||
|
|
|
@ -51,7 +51,7 @@ ext3_xattr_trusted_set(struct dentry *dentry, const char *name,
|
|||
value, size, flags);
|
||||
}
|
||||
|
||||
struct xattr_handler ext3_xattr_trusted_handler = {
|
||||
const struct xattr_handler ext3_xattr_trusted_handler = {
|
||||
.prefix = XATTR_TRUSTED_PREFIX,
|
||||
.list = ext3_xattr_trusted_list,
|
||||
.get = ext3_xattr_trusted_get,
|
||||
|
|
|
@ -54,7 +54,7 @@ ext3_xattr_user_set(struct dentry *dentry, const char *name,
|
|||
name, value, size, flags);
|
||||
}
|
||||
|
||||
struct xattr_handler ext3_xattr_user_handler = {
|
||||
const struct xattr_handler ext3_xattr_user_handler = {
|
||||
.prefix = XATTR_USER_PREFIX,
|
||||
.list = ext3_xattr_user_list,
|
||||
.get = ext3_xattr_user_get,
|
||||
|
|
|
@ -454,7 +454,7 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
|
|||
return error;
|
||||
}
|
||||
|
||||
struct xattr_handler ext4_xattr_acl_access_handler = {
|
||||
const struct xattr_handler ext4_xattr_acl_access_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_ACCESS,
|
||||
.flags = ACL_TYPE_ACCESS,
|
||||
.list = ext4_xattr_list_acl_access,
|
||||
|
@ -462,7 +462,7 @@ struct xattr_handler ext4_xattr_acl_access_handler = {
|
|||
.set = ext4_xattr_set_acl,
|
||||
};
|
||||
|
||||
struct xattr_handler ext4_xattr_acl_default_handler = {
|
||||
const struct xattr_handler ext4_xattr_acl_default_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_DEFAULT,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
.list = ext4_xattr_list_acl_default,
|
||||
|
|
|
@ -979,16 +979,12 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
|
|||
atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
|
||||
}
|
||||
|
||||
inode->i_uid = current_fsuid();
|
||||
if (test_opt(sb, GRPID))
|
||||
if (test_opt(sb, GRPID)) {
|
||||
inode->i_mode = mode;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = dir->i_gid;
|
||||
else if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
inode->i_mode = mode;
|
||||
inode_init_owner(inode, dir, mode);
|
||||
|
||||
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
|
||||
/* This is the optimal IO size (for stat), not the fs block size */
|
||||
|
|
|
@ -97,7 +97,7 @@ static int ext4_xattr_list(struct dentry *dentry, char *buffer,
|
|||
|
||||
static struct mb_cache *ext4_xattr_cache;
|
||||
|
||||
static struct xattr_handler *ext4_xattr_handler_map[] = {
|
||||
static const struct xattr_handler *ext4_xattr_handler_map[] = {
|
||||
[EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
|
||||
#ifdef CONFIG_EXT4_FS_POSIX_ACL
|
||||
[EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext4_xattr_acl_access_handler,
|
||||
|
@ -109,7 +109,7 @@ static struct xattr_handler *ext4_xattr_handler_map[] = {
|
|||
#endif
|
||||
};
|
||||
|
||||
struct xattr_handler *ext4_xattr_handlers[] = {
|
||||
const struct xattr_handler *ext4_xattr_handlers[] = {
|
||||
&ext4_xattr_user_handler,
|
||||
&ext4_xattr_trusted_handler,
|
||||
#ifdef CONFIG_EXT4_FS_POSIX_ACL
|
||||
|
@ -122,10 +122,10 @@ struct xattr_handler *ext4_xattr_handlers[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static inline struct xattr_handler *
|
||||
static inline const struct xattr_handler *
|
||||
ext4_xattr_handler(int name_index)
|
||||
{
|
||||
struct xattr_handler *handler = NULL;
|
||||
const struct xattr_handler *handler = NULL;
|
||||
|
||||
if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
|
||||
handler = ext4_xattr_handler_map[name_index];
|
||||
|
@ -332,7 +332,7 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
|
|||
size_t rest = buffer_size;
|
||||
|
||||
for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
|
||||
struct xattr_handler *handler =
|
||||
const struct xattr_handler *handler =
|
||||
ext4_xattr_handler(entry->e_name_index);
|
||||
|
||||
if (handler) {
|
||||
|
|
|
@ -65,11 +65,11 @@ struct ext4_xattr_entry {
|
|||
|
||||
# ifdef CONFIG_EXT4_FS_XATTR
|
||||
|
||||
extern struct xattr_handler ext4_xattr_user_handler;
|
||||
extern struct xattr_handler ext4_xattr_trusted_handler;
|
||||
extern struct xattr_handler ext4_xattr_acl_access_handler;
|
||||
extern struct xattr_handler ext4_xattr_acl_default_handler;
|
||||
extern struct xattr_handler ext4_xattr_security_handler;
|
||||
extern const struct xattr_handler ext4_xattr_user_handler;
|
||||
extern const struct xattr_handler ext4_xattr_trusted_handler;
|
||||
extern const struct xattr_handler ext4_xattr_acl_access_handler;
|
||||
extern const struct xattr_handler ext4_xattr_acl_default_handler;
|
||||
extern const struct xattr_handler ext4_xattr_security_handler;
|
||||
|
||||
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
|
||||
|
||||
|
@ -86,7 +86,7 @@ extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
|
|||
extern int init_ext4_xattr(void);
|
||||
extern void exit_ext4_xattr(void);
|
||||
|
||||
extern struct xattr_handler *ext4_xattr_handlers[];
|
||||
extern const struct xattr_handler *ext4_xattr_handlers[];
|
||||
|
||||
# else /* CONFIG_EXT4_FS_XATTR */
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir)
|
|||
return err;
|
||||
}
|
||||
|
||||
struct xattr_handler ext4_xattr_security_handler = {
|
||||
const struct xattr_handler ext4_xattr_security_handler = {
|
||||
.prefix = XATTR_SECURITY_PREFIX,
|
||||
.list = ext4_xattr_security_list,
|
||||
.get = ext4_xattr_security_get,
|
||||
|
|
|
@ -51,7 +51,7 @@ ext4_xattr_trusted_set(struct dentry *dentry, const char *name,
|
|||
name, value, size, flags);
|
||||
}
|
||||
|
||||
struct xattr_handler ext4_xattr_trusted_handler = {
|
||||
const struct xattr_handler ext4_xattr_trusted_handler = {
|
||||
.prefix = XATTR_TRUSTED_PREFIX,
|
||||
.list = ext4_xattr_trusted_list,
|
||||
.get = ext4_xattr_trusted_get,
|
||||
|
|
|
@ -54,7 +54,7 @@ ext4_xattr_user_set(struct dentry *dentry, const char *name,
|
|||
name, value, size, flags);
|
||||
}
|
||||
|
||||
struct xattr_handler ext4_xattr_user_handler = {
|
||||
const struct xattr_handler ext4_xattr_user_handler = {
|
||||
.prefix = XATTR_USER_PREFIX,
|
||||
.list = ext4_xattr_user_list,
|
||||
.get = ext4_xattr_user_get,
|
||||
|
|
|
@ -42,10 +42,10 @@ struct wb_writeback_args {
|
|||
long nr_pages;
|
||||
struct super_block *sb;
|
||||
enum writeback_sync_modes sync_mode;
|
||||
int for_kupdate:1;
|
||||
int range_cyclic:1;
|
||||
int for_background:1;
|
||||
int sb_pinned:1;
|
||||
unsigned int for_kupdate:1;
|
||||
unsigned int range_cyclic:1;
|
||||
unsigned int for_background:1;
|
||||
unsigned int sb_pinned:1;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -201,7 +201,7 @@ generic_check_acl(struct inode *inode, int mask)
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
struct xattr_handler generic_acl_access_handler = {
|
||||
const struct xattr_handler generic_acl_access_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_ACCESS,
|
||||
.flags = ACL_TYPE_ACCESS,
|
||||
.list = generic_acl_list,
|
||||
|
@ -209,7 +209,7 @@ struct xattr_handler generic_acl_access_handler = {
|
|||
.set = generic_acl_set,
|
||||
};
|
||||
|
||||
struct xattr_handler generic_acl_default_handler = {
|
||||
const struct xattr_handler generic_acl_default_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_DEFAULT,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
.list = generic_acl_list,
|
||||
|
|
|
@ -335,7 +335,7 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
|
|||
return error;
|
||||
}
|
||||
|
||||
struct xattr_handler gfs2_xattr_system_handler = {
|
||||
const struct xattr_handler gfs2_xattr_system_handler = {
|
||||
.prefix = XATTR_SYSTEM_PREFIX,
|
||||
.flags = GFS2_EATYPE_SYS,
|
||||
.get = gfs2_xattr_system_get,
|
||||
|
|
|
@ -19,6 +19,6 @@
|
|||
extern int gfs2_check_acl(struct inode *inode, int mask);
|
||||
extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
|
||||
extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
|
||||
extern struct xattr_handler gfs2_xattr_system_handler;
|
||||
extern const struct xattr_handler gfs2_xattr_system_handler;
|
||||
|
||||
#endif /* __ACL_DOT_H__ */
|
||||
|
|
|
@ -54,7 +54,7 @@ extern struct file_system_type gfs2meta_fs_type;
|
|||
extern const struct export_operations gfs2_export_ops;
|
||||
extern const struct super_operations gfs2_super_ops;
|
||||
extern const struct dentry_operations gfs2_dops;
|
||||
extern struct xattr_handler *gfs2_xattr_handlers[];
|
||||
extern const struct xattr_handler *gfs2_xattr_handlers[];
|
||||
|
||||
#endif /* __SUPER_DOT_H__ */
|
||||
|
||||
|
|
|
@ -1535,21 +1535,21 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
|
|||
return error;
|
||||
}
|
||||
|
||||
static struct xattr_handler gfs2_xattr_user_handler = {
|
||||
static const struct xattr_handler gfs2_xattr_user_handler = {
|
||||
.prefix = XATTR_USER_PREFIX,
|
||||
.flags = GFS2_EATYPE_USR,
|
||||
.get = gfs2_xattr_get,
|
||||
.set = gfs2_xattr_set,
|
||||
};
|
||||
|
||||
static struct xattr_handler gfs2_xattr_security_handler = {
|
||||
static const struct xattr_handler gfs2_xattr_security_handler = {
|
||||
.prefix = XATTR_SECURITY_PREFIX,
|
||||
.flags = GFS2_EATYPE_SECURITY,
|
||||
.get = gfs2_xattr_get,
|
||||
.set = gfs2_xattr_set,
|
||||
};
|
||||
|
||||
struct xattr_handler *gfs2_xattr_handlers[] = {
|
||||
const struct xattr_handler *gfs2_xattr_handlers[] = {
|
||||
&gfs2_xattr_user_handler,
|
||||
&gfs2_xattr_security_handler,
|
||||
&gfs2_xattr_system_handler,
|
||||
|
|
26
fs/inode.c
26
fs/inode.c
|
@ -286,11 +286,9 @@ static void init_once(void *foo)
|
|||
*/
|
||||
void __iget(struct inode *inode)
|
||||
{
|
||||
if (atomic_read(&inode->i_count)) {
|
||||
atomic_inc(&inode->i_count);
|
||||
if (atomic_inc_return(&inode->i_count) != 1)
|
||||
return;
|
||||
}
|
||||
atomic_inc(&inode->i_count);
|
||||
|
||||
if (!(inode->i_state & (I_DIRTY|I_SYNC)))
|
||||
list_move(&inode->i_list, &inode_in_use);
|
||||
inodes_stat.nr_unused--;
|
||||
|
@ -1608,3 +1606,23 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
|
|||
inode->i_ino);
|
||||
}
|
||||
EXPORT_SYMBOL(init_special_inode);
|
||||
|
||||
/**
|
||||
* Init uid,gid,mode for new inode according to posix standards
|
||||
* @inode: New inode
|
||||
* @dir: Directory inode
|
||||
* @mode: mode of the new inode
|
||||
*/
|
||||
void inode_init_owner(struct inode *inode, const struct inode *dir,
|
||||
mode_t mode)
|
||||
{
|
||||
inode->i_uid = current_fsuid();
|
||||
if (dir && dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
inode->i_mode = mode;
|
||||
}
|
||||
EXPORT_SYMBOL(inode_init_owner);
|
||||
|
|
|
@ -87,6 +87,8 @@ extern struct file *get_empty_filp(void);
|
|||
* super.c
|
||||
*/
|
||||
extern int do_remount_sb(struct super_block *, int, void *, int);
|
||||
extern void __put_super(struct super_block *sb);
|
||||
extern void put_super(struct super_block *sb);
|
||||
|
||||
/*
|
||||
* open.c
|
||||
|
|
15
fs/ioctl.c
15
fs/ioctl.c
|
@ -525,15 +525,8 @@ static int ioctl_fsfreeze(struct file *filp)
|
|||
if (sb->s_op->freeze_fs == NULL)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* If a blockdevice-backed filesystem isn't specified, return. */
|
||||
if (sb->s_bdev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Freeze */
|
||||
sb = freeze_bdev(sb->s_bdev);
|
||||
if (IS_ERR(sb))
|
||||
return PTR_ERR(sb);
|
||||
return 0;
|
||||
return freeze_super(sb);
|
||||
}
|
||||
|
||||
static int ioctl_fsthaw(struct file *filp)
|
||||
|
@ -543,12 +536,8 @@ static int ioctl_fsthaw(struct file *filp)
|
|||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
/* If a blockdevice-backed filesystem isn't specified, return EINVAL. */
|
||||
if (sb->s_bdev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Thaw */
|
||||
return thaw_bdev(sb->s_bdev, sb);
|
||||
return thaw_super(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -419,7 +419,7 @@ static int jffs2_acl_setxattr(struct dentry *dentry, const char *name,
|
|||
return rc;
|
||||
}
|
||||
|
||||
struct xattr_handler jffs2_acl_access_xattr_handler = {
|
||||
const struct xattr_handler jffs2_acl_access_xattr_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_ACCESS,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
.list = jffs2_acl_access_listxattr,
|
||||
|
@ -427,7 +427,7 @@ struct xattr_handler jffs2_acl_access_xattr_handler = {
|
|||
.set = jffs2_acl_setxattr,
|
||||
};
|
||||
|
||||
struct xattr_handler jffs2_acl_default_xattr_handler = {
|
||||
const struct xattr_handler jffs2_acl_default_xattr_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_DEFAULT,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
.list = jffs2_acl_default_listxattr,
|
||||
|
|
|
@ -31,8 +31,8 @@ extern int jffs2_acl_chmod(struct inode *);
|
|||
extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
|
||||
extern int jffs2_init_acl_post(struct inode *);
|
||||
|
||||
extern struct xattr_handler jffs2_acl_access_xattr_handler;
|
||||
extern struct xattr_handler jffs2_acl_default_xattr_handler;
|
||||
extern const struct xattr_handler jffs2_acl_access_xattr_handler;
|
||||
extern const struct xattr_handler jffs2_acl_default_xattr_handler;
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ static size_t jffs2_security_listxattr(struct dentry *dentry, char *list,
|
|||
return retlen;
|
||||
}
|
||||
|
||||
struct xattr_handler jffs2_security_xattr_handler = {
|
||||
const struct xattr_handler jffs2_security_xattr_handler = {
|
||||
.prefix = XATTR_SECURITY_PREFIX,
|
||||
.list = jffs2_security_listxattr,
|
||||
.set = jffs2_security_setxattr,
|
||||
|
|
|
@ -904,7 +904,7 @@ struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
|
|||
* do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags)
|
||||
* is an implementation of setxattr handler on jffs2.
|
||||
* -------------------------------------------------- */
|
||||
struct xattr_handler *jffs2_xattr_handlers[] = {
|
||||
const struct xattr_handler *jffs2_xattr_handlers[] = {
|
||||
&jffs2_user_xattr_handler,
|
||||
#ifdef CONFIG_JFFS2_FS_SECURITY
|
||||
&jffs2_security_xattr_handler,
|
||||
|
@ -917,8 +917,8 @@ struct xattr_handler *jffs2_xattr_handlers[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct xattr_handler *xprefix_to_handler(int xprefix) {
|
||||
struct xattr_handler *ret;
|
||||
static const struct xattr_handler *xprefix_to_handler(int xprefix) {
|
||||
const struct xattr_handler *ret;
|
||||
|
||||
switch (xprefix) {
|
||||
case JFFS2_XPREFIX_USER:
|
||||
|
@ -955,7 +955,7 @@ ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
|||
struct jffs2_inode_cache *ic = f->inocache;
|
||||
struct jffs2_xattr_ref *ref, **pref;
|
||||
struct jffs2_xattr_datum *xd;
|
||||
struct xattr_handler *xhandle;
|
||||
const struct xattr_handler *xhandle;
|
||||
ssize_t len, rc;
|
||||
int retry = 0;
|
||||
|
||||
|
|
|
@ -93,9 +93,9 @@ extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname
|
|||
extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
|
||||
const char *buffer, size_t size, int flags);
|
||||
|
||||
extern struct xattr_handler *jffs2_xattr_handlers[];
|
||||
extern struct xattr_handler jffs2_user_xattr_handler;
|
||||
extern struct xattr_handler jffs2_trusted_xattr_handler;
|
||||
extern const struct xattr_handler *jffs2_xattr_handlers[];
|
||||
extern const struct xattr_handler jffs2_user_xattr_handler;
|
||||
extern const struct xattr_handler jffs2_trusted_xattr_handler;
|
||||
|
||||
extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
|
||||
#define jffs2_getxattr generic_getxattr
|
||||
|
@ -122,7 +122,7 @@ extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
|
|||
|
||||
#ifdef CONFIG_JFFS2_FS_SECURITY
|
||||
extern int jffs2_init_security(struct inode *inode, struct inode *dir);
|
||||
extern struct xattr_handler jffs2_security_xattr_handler;
|
||||
extern const struct xattr_handler jffs2_security_xattr_handler;
|
||||
#else
|
||||
#define jffs2_init_security(inode,dir) (0)
|
||||
#endif /* CONFIG_JFFS2_FS_SECURITY */
|
||||
|
|
|
@ -47,7 +47,7 @@ static size_t jffs2_trusted_listxattr(struct dentry *dentry, char *list,
|
|||
return retlen;
|
||||
}
|
||||
|
||||
struct xattr_handler jffs2_trusted_xattr_handler = {
|
||||
const struct xattr_handler jffs2_trusted_xattr_handler = {
|
||||
.prefix = XATTR_TRUSTED_PREFIX,
|
||||
.list = jffs2_trusted_listxattr,
|
||||
.set = jffs2_trusted_setxattr,
|
||||
|
|
|
@ -47,7 +47,7 @@ static size_t jffs2_user_listxattr(struct dentry *dentry, char *list,
|
|||
return retlen;
|
||||
}
|
||||
|
||||
struct xattr_handler jffs2_user_xattr_handler = {
|
||||
const struct xattr_handler jffs2_user_xattr_handler = {
|
||||
.prefix = XATTR_USER_PREFIX,
|
||||
.list = jffs2_user_listxattr,
|
||||
.set = jffs2_user_setxattr,
|
||||
|
|
|
@ -98,14 +98,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
|
|||
goto fail_unlock;
|
||||
}
|
||||
|
||||
inode->i_uid = current_fsuid();
|
||||
if (parent->i_mode & S_ISGID) {
|
||||
inode->i_gid = parent->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
|
||||
inode_init_owner(inode, parent, mode);
|
||||
/*
|
||||
* New inodes need to save sane values on disk when
|
||||
* uid & gid mount options are used
|
||||
|
@ -121,7 +114,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
|
|||
if (rc)
|
||||
goto fail_drop;
|
||||
|
||||
inode->i_mode = mode;
|
||||
/* inherit flags from parent */
|
||||
jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT;
|
||||
|
||||
|
@ -134,7 +126,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
|
|||
if (S_ISLNK(mode))
|
||||
jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL);
|
||||
}
|
||||
jfs_inode->mode2 |= mode;
|
||||
jfs_inode->mode2 |= inode->i_mode;
|
||||
|
||||
inode->i_blocks = 0;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
|
|
|
@ -358,14 +358,7 @@ struct inode *logfs_new_inode(struct inode *dir, int mode)
|
|||
inode->i_mode = mode;
|
||||
logfs_set_ino_generation(sb, inode);
|
||||
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = current_fsgid();
|
||||
if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
inode->i_mode |= S_ISGID;
|
||||
}
|
||||
|
||||
inode_init_owner(inode, dir, mode);
|
||||
logfs_inode_setops(inode);
|
||||
insert_inode_hash(inode);
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ void minix_free_inode(struct inode * inode)
|
|||
clear_inode(inode); /* clear in-memory copy */
|
||||
}
|
||||
|
||||
struct inode * minix_new_inode(const struct inode * dir, int * error)
|
||||
struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
|
||||
{
|
||||
struct super_block *sb = dir->i_sb;
|
||||
struct minix_sb_info *sbi = minix_sb(sb);
|
||||
|
@ -263,8 +263,7 @@ struct inode * minix_new_inode(const struct inode * dir, int * error)
|
|||
iput(inode);
|
||||
return NULL;
|
||||
}
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current_fsgid();
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_ino = j;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
|
||||
inode->i_blocks = 0;
|
||||
|
|
|
@ -46,7 +46,7 @@ struct minix_sb_info {
|
|||
extern struct inode *minix_iget(struct super_block *, unsigned long);
|
||||
extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
|
||||
extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
|
||||
extern struct inode * minix_new_inode(const struct inode * dir, int * error);
|
||||
extern struct inode * minix_new_inode(const struct inode *, int, int *);
|
||||
extern void minix_free_inode(struct inode * inode);
|
||||
extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi);
|
||||
extern int minix_new_block(struct inode * inode);
|
||||
|
|
|
@ -46,10 +46,9 @@ static int minix_mknod(struct inode * dir, struct dentry *dentry, int mode, dev_
|
|||
if (!old_valid_dev(rdev))
|
||||
return -EINVAL;
|
||||
|
||||
inode = minix_new_inode(dir, &error);
|
||||
inode = minix_new_inode(dir, mode, &error);
|
||||
|
||||
if (inode) {
|
||||
inode->i_mode = mode;
|
||||
minix_set_inode(inode, rdev);
|
||||
mark_inode_dirty(inode);
|
||||
error = add_nondir(dentry, inode);
|
||||
|
@ -73,11 +72,10 @@ static int minix_symlink(struct inode * dir, struct dentry *dentry,
|
|||
if (i > dir->i_sb->s_blocksize)
|
||||
goto out;
|
||||
|
||||
inode = minix_new_inode(dir, &err);
|
||||
inode = minix_new_inode(dir, S_IFLNK | 0777, &err);
|
||||
if (!inode)
|
||||
goto out;
|
||||
|
||||
inode->i_mode = S_IFLNK | 0777;
|
||||
minix_set_inode(inode, 0);
|
||||
err = page_symlink(inode, symname, i);
|
||||
if (err)
|
||||
|
@ -117,13 +115,10 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
|
|||
|
||||
inode_inc_link_count(dir);
|
||||
|
||||
inode = minix_new_inode(dir, &err);
|
||||
inode = minix_new_inode(dir, mode, &err);
|
||||
if (!inode)
|
||||
goto out_dir;
|
||||
|
||||
inode->i_mode = S_IFDIR | mode;
|
||||
if (dir->i_mode & S_ISGID)
|
||||
inode->i_mode |= S_ISGID;
|
||||
minix_set_inode(inode, 0);
|
||||
|
||||
inode_inc_link_count(inode);
|
||||
|
|
|
@ -523,9 +523,10 @@ static void path_put_conditional(struct path *path, struct nameidata *nd)
|
|||
static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
|
||||
{
|
||||
dput(nd->path.dentry);
|
||||
if (nd->path.mnt != path->mnt)
|
||||
if (nd->path.mnt != path->mnt) {
|
||||
mntput(nd->path.mnt);
|
||||
nd->path.mnt = path->mnt;
|
||||
nd->path.mnt = path->mnt;
|
||||
}
|
||||
nd->path.dentry = path->dentry;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,8 +44,7 @@
|
|||
#define NFSDDBG_FACILITY NFSDDBG_PROC
|
||||
|
||||
/* Globals */
|
||||
static struct path rec_dir;
|
||||
static int rec_dir_init = 0;
|
||||
static struct file *rec_file;
|
||||
|
||||
static int
|
||||
nfs4_save_creds(const struct cred **original_creds)
|
||||
|
@ -117,33 +116,28 @@ nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
|
|||
return status;
|
||||
}
|
||||
|
||||
static void
|
||||
nfsd4_sync_rec_dir(void)
|
||||
{
|
||||
vfs_fsync(NULL, rec_dir.dentry, 0);
|
||||
}
|
||||
|
||||
int
|
||||
nfsd4_create_clid_dir(struct nfs4_client *clp)
|
||||
{
|
||||
const struct cred *original_cred;
|
||||
char *dname = clp->cl_recdir;
|
||||
struct dentry *dentry;
|
||||
struct dentry *dir, *dentry;
|
||||
int status;
|
||||
|
||||
dprintk("NFSD: nfsd4_create_clid_dir for \"%s\"\n", dname);
|
||||
|
||||
if (!rec_dir_init || clp->cl_firststate)
|
||||
if (!rec_file || clp->cl_firststate)
|
||||
return 0;
|
||||
|
||||
status = nfs4_save_creds(&original_cred);
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
dir = rec_file->f_path.dentry;
|
||||
/* lock the parent */
|
||||
mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
|
||||
dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
|
||||
dentry = lookup_one_len(dname, dir, HEXDIR_LEN-1);
|
||||
if (IS_ERR(dentry)) {
|
||||
status = PTR_ERR(dentry);
|
||||
goto out_unlock;
|
||||
|
@ -153,18 +147,18 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
|
|||
dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
|
||||
goto out_put;
|
||||
}
|
||||
status = mnt_want_write(rec_dir.mnt);
|
||||
status = mnt_want_write(rec_file->f_path.mnt);
|
||||
if (status)
|
||||
goto out_put;
|
||||
status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, S_IRWXU);
|
||||
mnt_drop_write(rec_dir.mnt);
|
||||
status = vfs_mkdir(dir->d_inode, dentry, S_IRWXU);
|
||||
mnt_drop_write(rec_file->f_path.mnt);
|
||||
out_put:
|
||||
dput(dentry);
|
||||
out_unlock:
|
||||
mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
if (status == 0) {
|
||||
clp->cl_firststate = 1;
|
||||
nfsd4_sync_rec_dir();
|
||||
vfs_fsync(rec_file, 0);
|
||||
}
|
||||
nfs4_reset_creds(original_cred);
|
||||
dprintk("NFSD: nfsd4_create_clid_dir returns %d\n", status);
|
||||
|
@ -206,14 +200,14 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
|
|||
struct dentry *dentry;
|
||||
int status;
|
||||
|
||||
if (!rec_dir_init)
|
||||
if (!rec_file)
|
||||
return 0;
|
||||
|
||||
status = nfs4_save_creds(&original_cred);
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY,
|
||||
filp = dentry_open(dget(dir), mntget(rec_file->f_path.mnt), O_RDONLY,
|
||||
current_cred());
|
||||
status = PTR_ERR(filp);
|
||||
if (IS_ERR(filp))
|
||||
|
@ -250,13 +244,14 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
|
|||
static int
|
||||
nfsd4_unlink_clid_dir(char *name, int namlen)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
struct dentry *dir, *dentry;
|
||||
int status;
|
||||
|
||||
dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
|
||||
|
||||
mutex_lock_nested(&rec_dir.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
|
||||
dentry = lookup_one_len(name, rec_dir.dentry, namlen);
|
||||
dir = rec_file->f_path.dentry;
|
||||
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
|
||||
dentry = lookup_one_len(name, dir, namlen);
|
||||
if (IS_ERR(dentry)) {
|
||||
status = PTR_ERR(dentry);
|
||||
goto out_unlock;
|
||||
|
@ -264,11 +259,11 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
|
|||
status = -ENOENT;
|
||||
if (!dentry->d_inode)
|
||||
goto out;
|
||||
status = vfs_rmdir(rec_dir.dentry->d_inode, dentry);
|
||||
status = vfs_rmdir(dir->d_inode, dentry);
|
||||
out:
|
||||
dput(dentry);
|
||||
out_unlock:
|
||||
mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
|
||||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -278,10 +273,10 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
|
|||
const struct cred *original_cred;
|
||||
int status;
|
||||
|
||||
if (!rec_dir_init || !clp->cl_firststate)
|
||||
if (!rec_file || !clp->cl_firststate)
|
||||
return;
|
||||
|
||||
status = mnt_want_write(rec_dir.mnt);
|
||||
status = mnt_want_write(rec_file->f_path.mnt);
|
||||
if (status)
|
||||
goto out;
|
||||
clp->cl_firststate = 0;
|
||||
|
@ -293,8 +288,8 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
|
|||
status = nfsd4_unlink_clid_dir(clp->cl_recdir, HEXDIR_LEN-1);
|
||||
nfs4_reset_creds(original_cred);
|
||||
if (status == 0)
|
||||
nfsd4_sync_rec_dir();
|
||||
mnt_drop_write(rec_dir.mnt);
|
||||
vfs_fsync(rec_file, 0);
|
||||
mnt_drop_write(rec_file->f_path.mnt);
|
||||
out:
|
||||
if (status)
|
||||
printk("NFSD: Failed to remove expired client state directory"
|
||||
|
@ -323,19 +318,19 @@ void
|
|||
nfsd4_recdir_purge_old(void) {
|
||||
int status;
|
||||
|
||||
if (!rec_dir_init)
|
||||
if (!rec_file)
|
||||
return;
|
||||
status = mnt_want_write(rec_dir.mnt);
|
||||
status = mnt_want_write(rec_file->f_path.mnt);
|
||||
if (status)
|
||||
goto out;
|
||||
status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old);
|
||||
status = nfsd4_list_rec_dir(rec_file->f_path.dentry, purge_old);
|
||||
if (status == 0)
|
||||
nfsd4_sync_rec_dir();
|
||||
mnt_drop_write(rec_dir.mnt);
|
||||
vfs_fsync(rec_file, 0);
|
||||
mnt_drop_write(rec_file->f_path.mnt);
|
||||
out:
|
||||
if (status)
|
||||
printk("nfsd4: failed to purge old clients from recovery"
|
||||
" directory %s\n", rec_dir.dentry->d_name.name);
|
||||
" directory %s\n", rec_file->f_path.dentry->d_name.name);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -355,10 +350,13 @@ int
|
|||
nfsd4_recdir_load(void) {
|
||||
int status;
|
||||
|
||||
status = nfsd4_list_rec_dir(rec_dir.dentry, load_recdir);
|
||||
if (!rec_file)
|
||||
return 0;
|
||||
|
||||
status = nfsd4_list_rec_dir(rec_file->f_path.dentry, load_recdir);
|
||||
if (status)
|
||||
printk("nfsd4: failed loading clients from recovery"
|
||||
" directory %s\n", rec_dir.dentry->d_name.name);
|
||||
" directory %s\n", rec_file->f_path.dentry->d_name.name);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -375,7 +373,7 @@ nfsd4_init_recdir(char *rec_dirname)
|
|||
printk("NFSD: Using %s as the NFSv4 state recovery directory\n",
|
||||
rec_dirname);
|
||||
|
||||
BUG_ON(rec_dir_init);
|
||||
BUG_ON(rec_file);
|
||||
|
||||
status = nfs4_save_creds(&original_cred);
|
||||
if (status < 0) {
|
||||
|
@ -385,22 +383,21 @@ nfsd4_init_recdir(char *rec_dirname)
|
|||
return;
|
||||
}
|
||||
|
||||
status = kern_path(rec_dirname, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
|
||||
&rec_dir);
|
||||
if (status)
|
||||
rec_file = filp_open(rec_dirname, O_RDONLY | O_DIRECTORY, 0);
|
||||
if (IS_ERR(rec_file)) {
|
||||
printk("NFSD: unable to find recovery directory %s\n",
|
||||
rec_dirname);
|
||||
rec_file = NULL;
|
||||
}
|
||||
|
||||
if (!status)
|
||||
rec_dir_init = 1;
|
||||
nfs4_reset_creds(original_cred);
|
||||
}
|
||||
|
||||
void
|
||||
nfsd4_shutdown_recdir(void)
|
||||
{
|
||||
if (!rec_dir_init)
|
||||
if (!rec_file)
|
||||
return;
|
||||
rec_dir_init = 0;
|
||||
path_put(&rec_dir);
|
||||
fput(rec_file);
|
||||
rec_file = NULL;
|
||||
}
|
||||
|
|
|
@ -999,7 +999,7 @@ static int wait_for_concurrent_writes(struct file *file)
|
|||
|
||||
if (inode->i_state & I_DIRTY) {
|
||||
dprintk("nfsd: write sync %d\n", task_pid_nr(current));
|
||||
err = vfs_fsync(file, file->f_path.dentry, 0);
|
||||
err = vfs_fsync(file, 0);
|
||||
}
|
||||
last_ino = inode->i_ino;
|
||||
last_dev = inode->i_sb->s_dev;
|
||||
|
@ -1175,8 +1175,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
|||
if (err)
|
||||
goto out;
|
||||
if (EX_ISSYNC(fhp->fh_export)) {
|
||||
int err2 = vfs_fsync_range(file, file->f_path.dentry,
|
||||
offset, end, 0);
|
||||
int err2 = vfs_fsync_range(file, offset, end, 0);
|
||||
|
||||
if (err2 != -EINVAL)
|
||||
err = nfserrno(err2);
|
||||
|
|
|
@ -280,16 +280,7 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
|
|||
/* reference count of i_bh inherits from nilfs_mdt_read_block() */
|
||||
|
||||
atomic_inc(&sbi->s_inodes_count);
|
||||
|
||||
inode->i_uid = current_fsuid();
|
||||
if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
|
||||
inode->i_mode = mode;
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_ino = ino;
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
|
||||
|
||||
|
|
|
@ -110,14 +110,10 @@ EXPORT_SYMBOL_GPL(get_inotify_watch);
|
|||
int pin_inotify_watch(struct inotify_watch *watch)
|
||||
{
|
||||
struct super_block *sb = watch->inode->i_sb;
|
||||
spin_lock(&sb_lock);
|
||||
if (sb->s_count >= S_BIAS) {
|
||||
atomic_inc(&sb->s_active);
|
||||
spin_unlock(&sb_lock);
|
||||
if (atomic_inc_not_zero(&sb->s_active)) {
|
||||
atomic_inc(&watch->count);
|
||||
return 1;
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -515,34 +511,8 @@ EXPORT_SYMBOL_GPL(inotify_init_watch);
|
|||
* done. Cleanup is just deactivate_super(). However, that leaves a messy
|
||||
* case - what if we *are* racing with umount() and active references to
|
||||
* superblock can't be acquired anymore? We can bump ->s_count, grab
|
||||
* ->s_umount, which will almost certainly wait until the superblock is shut
|
||||
* down and the watch in question is pining for fjords. That's fine, but
|
||||
* there is a problem - we might have hit the window between ->s_active
|
||||
* getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock
|
||||
* is past the point of no return and is heading for shutdown) and the
|
||||
* moment when deactivate_super() acquires ->s_umount. We could just do
|
||||
* drop_super() yield() and retry, but that's rather antisocial and this
|
||||
* stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having
|
||||
* found that we'd got there first (i.e. that ->s_root is non-NULL) we know
|
||||
* that we won't race with inotify_umount_inodes(). So we could grab a
|
||||
* reference to watch and do the rest as above, just with drop_super() instead
|
||||
* of deactivate_super(), right? Wrong. We had to drop ih->mutex before we
|
||||
* could grab ->s_umount. So the watch could've been gone already.
|
||||
*
|
||||
* That still can be dealt with - we need to save watch->wd, do idr_find()
|
||||
* and compare its result with our pointer. If they match, we either have
|
||||
* the damn thing still alive or we'd lost not one but two races at once,
|
||||
* the watch had been killed and a new one got created with the same ->wd
|
||||
* at the same address. That couldn't have happened in inotify_destroy(),
|
||||
* but inotify_rm_wd() could run into that. Still, "new one got created"
|
||||
* is not a problem - we have every right to kill it or leave it alone,
|
||||
* whatever's more convenient.
|
||||
*
|
||||
* So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
|
||||
* "grab it and kill it" check. If it's been our original watch, we are
|
||||
* fine, if it's a newcomer - nevermind, just pretend that we'd won the
|
||||
* race and kill the fscker anyway; we are safe since we know that its
|
||||
* superblock won't be going away.
|
||||
* ->s_umount, which will wait until the superblock is shut down and the
|
||||
* watch in question is pining for fjords.
|
||||
*
|
||||
* And yes, this is far beyond mere "not very pretty"; so's the entire
|
||||
* concept of inotify to start with.
|
||||
|
@ -556,57 +526,31 @@ EXPORT_SYMBOL_GPL(inotify_init_watch);
|
|||
* Called with ih->mutex held, drops it. Possible return values:
|
||||
* 0 - nothing to do, it has died
|
||||
* 1 - remove it, drop the reference and deactivate_super()
|
||||
* 2 - remove it, drop the reference and drop_super(); we tried hard to avoid
|
||||
* that variant, since it involved a lot of PITA, but that's the best that
|
||||
* could've been done.
|
||||
*/
|
||||
static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
|
||||
{
|
||||
struct super_block *sb = watch->inode->i_sb;
|
||||
s32 wd = watch->wd;
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
if (sb->s_count >= S_BIAS) {
|
||||
atomic_inc(&sb->s_active);
|
||||
spin_unlock(&sb_lock);
|
||||
if (atomic_inc_not_zero(&sb->s_active)) {
|
||||
get_inotify_watch(watch);
|
||||
mutex_unlock(&ih->mutex);
|
||||
return 1; /* the best outcome */
|
||||
}
|
||||
spin_lock(&sb_lock);
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */
|
||||
down_read(&sb->s_umount);
|
||||
if (likely(!sb->s_root)) {
|
||||
/* fs is already shut down; the watch is dead */
|
||||
drop_super(sb);
|
||||
return 0;
|
||||
}
|
||||
/* raced with the final deactivate_super() */
|
||||
mutex_lock(&ih->mutex);
|
||||
if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) {
|
||||
/* the watch is dead */
|
||||
mutex_unlock(&ih->mutex);
|
||||
drop_super(sb);
|
||||
return 0;
|
||||
}
|
||||
/* still alive or freed and reused with the same sb and wd; kill */
|
||||
get_inotify_watch(watch);
|
||||
mutex_unlock(&ih->mutex);
|
||||
return 2;
|
||||
/* fs is already shut down; the watch is dead */
|
||||
drop_super(sb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void unpin_and_kill(struct inotify_watch *watch, int how)
|
||||
static void unpin_and_kill(struct inotify_watch *watch)
|
||||
{
|
||||
struct super_block *sb = watch->inode->i_sb;
|
||||
put_inotify_watch(watch);
|
||||
switch (how) {
|
||||
case 1:
|
||||
deactivate_super(sb);
|
||||
break;
|
||||
case 2:
|
||||
drop_super(sb);
|
||||
}
|
||||
deactivate_super(sb);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -628,7 +572,6 @@ void inotify_destroy(struct inotify_handle *ih)
|
|||
struct list_head *watches;
|
||||
struct super_block *sb;
|
||||
struct inode *inode;
|
||||
int how;
|
||||
|
||||
mutex_lock(&ih->mutex);
|
||||
watches = &ih->watches;
|
||||
|
@ -638,8 +581,7 @@ void inotify_destroy(struct inotify_handle *ih)
|
|||
}
|
||||
watch = list_first_entry(watches, struct inotify_watch, h_list);
|
||||
sb = watch->inode->i_sb;
|
||||
how = pin_to_kill(ih, watch);
|
||||
if (!how)
|
||||
if (!pin_to_kill(ih, watch))
|
||||
continue;
|
||||
|
||||
inode = watch->inode;
|
||||
|
@ -654,7 +596,7 @@ void inotify_destroy(struct inotify_handle *ih)
|
|||
|
||||
mutex_unlock(&ih->mutex);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
unpin_and_kill(watch, how);
|
||||
unpin_and_kill(watch);
|
||||
}
|
||||
|
||||
/* free this handle: the put matching the get in inotify_init() */
|
||||
|
@ -857,7 +799,6 @@ int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
|
|||
struct inotify_watch *watch;
|
||||
struct super_block *sb;
|
||||
struct inode *inode;
|
||||
int how;
|
||||
|
||||
mutex_lock(&ih->mutex);
|
||||
watch = idr_find(&ih->idr, wd);
|
||||
|
@ -866,8 +807,7 @@ int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
|
|||
return -EINVAL;
|
||||
}
|
||||
sb = watch->inode->i_sb;
|
||||
how = pin_to_kill(ih, watch);
|
||||
if (!how)
|
||||
if (!pin_to_kill(ih, watch))
|
||||
return 0;
|
||||
|
||||
inode = watch->inode;
|
||||
|
@ -881,7 +821,7 @@ int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
|
|||
|
||||
mutex_unlock(&ih->mutex);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
unpin_and_kill(watch, how);
|
||||
unpin_and_kill(watch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -489,7 +489,7 @@ static int ocfs2_xattr_set_acl(struct dentry *dentry, const char *name,
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct xattr_handler ocfs2_xattr_acl_access_handler = {
|
||||
const struct xattr_handler ocfs2_xattr_acl_access_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_ACCESS,
|
||||
.flags = ACL_TYPE_ACCESS,
|
||||
.list = ocfs2_xattr_list_acl_access,
|
||||
|
@ -497,7 +497,7 @@ struct xattr_handler ocfs2_xattr_acl_access_handler = {
|
|||
.set = ocfs2_xattr_set_acl,
|
||||
};
|
||||
|
||||
struct xattr_handler ocfs2_xattr_acl_default_handler = {
|
||||
const struct xattr_handler ocfs2_xattr_acl_default_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_DEFAULT,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
.list = ocfs2_xattr_list_acl_default,
|
||||
|
|
|
@ -204,14 +204,7 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, int mode)
|
|||
inode->i_nlink = 2;
|
||||
else
|
||||
inode->i_nlink = 1;
|
||||
inode->i_uid = current_fsuid();
|
||||
if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
inode->i_mode = mode;
|
||||
inode_init_owner(inode, dir, mode);
|
||||
dquot_initialize(inode);
|
||||
return inode;
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ static struct ocfs2_xattr_def_value_root def_xv = {
|
|||
.xv.xr_list.l_count = cpu_to_le16(1),
|
||||
};
|
||||
|
||||
struct xattr_handler *ocfs2_xattr_handlers[] = {
|
||||
const struct xattr_handler *ocfs2_xattr_handlers[] = {
|
||||
&ocfs2_xattr_user_handler,
|
||||
&ocfs2_xattr_acl_access_handler,
|
||||
&ocfs2_xattr_acl_default_handler,
|
||||
|
@ -106,7 +106,7 @@ struct xattr_handler *ocfs2_xattr_handlers[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
|
||||
static const struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
|
||||
[OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
|
||||
[OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS]
|
||||
= &ocfs2_xattr_acl_access_handler,
|
||||
|
@ -540,7 +540,7 @@ static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno,
|
|||
|
||||
static inline const char *ocfs2_xattr_prefix(int name_index)
|
||||
{
|
||||
struct xattr_handler *handler = NULL;
|
||||
const struct xattr_handler *handler = NULL;
|
||||
|
||||
if (name_index > 0 && name_index < OCFS2_XATTR_MAX)
|
||||
handler = ocfs2_xattr_handler_map[name_index];
|
||||
|
@ -7213,7 +7213,7 @@ int ocfs2_init_security_set(handle_t *handle,
|
|||
xattr_ac, data_ac);
|
||||
}
|
||||
|
||||
struct xattr_handler ocfs2_xattr_security_handler = {
|
||||
const struct xattr_handler ocfs2_xattr_security_handler = {
|
||||
.prefix = XATTR_SECURITY_PREFIX,
|
||||
.list = ocfs2_xattr_security_list,
|
||||
.get = ocfs2_xattr_security_get,
|
||||
|
@ -7257,7 +7257,7 @@ static int ocfs2_xattr_trusted_set(struct dentry *dentry, const char *name,
|
|||
name, value, size, flags);
|
||||
}
|
||||
|
||||
struct xattr_handler ocfs2_xattr_trusted_handler = {
|
||||
const struct xattr_handler ocfs2_xattr_trusted_handler = {
|
||||
.prefix = XATTR_TRUSTED_PREFIX,
|
||||
.list = ocfs2_xattr_trusted_list,
|
||||
.get = ocfs2_xattr_trusted_get,
|
||||
|
@ -7313,7 +7313,7 @@ static int ocfs2_xattr_user_set(struct dentry *dentry, const char *name,
|
|||
name, value, size, flags);
|
||||
}
|
||||
|
||||
struct xattr_handler ocfs2_xattr_user_handler = {
|
||||
const struct xattr_handler ocfs2_xattr_user_handler = {
|
||||
.prefix = XATTR_USER_PREFIX,
|
||||
.list = ocfs2_xattr_user_list,
|
||||
.get = ocfs2_xattr_user_get,
|
||||
|
|
|
@ -37,12 +37,12 @@ struct ocfs2_security_xattr_info {
|
|||
size_t value_len;
|
||||
};
|
||||
|
||||
extern struct xattr_handler ocfs2_xattr_user_handler;
|
||||
extern struct xattr_handler ocfs2_xattr_trusted_handler;
|
||||
extern struct xattr_handler ocfs2_xattr_security_handler;
|
||||
extern struct xattr_handler ocfs2_xattr_acl_access_handler;
|
||||
extern struct xattr_handler ocfs2_xattr_acl_default_handler;
|
||||
extern struct xattr_handler *ocfs2_xattr_handlers[];
|
||||
extern const struct xattr_handler ocfs2_xattr_user_handler;
|
||||
extern const struct xattr_handler ocfs2_xattr_trusted_handler;
|
||||
extern const struct xattr_handler ocfs2_xattr_security_handler;
|
||||
extern const struct xattr_handler ocfs2_xattr_acl_access_handler;
|
||||
extern const struct xattr_handler ocfs2_xattr_acl_default_handler;
|
||||
extern const struct xattr_handler *ocfs2_xattr_handlers[];
|
||||
|
||||
ssize_t ocfs2_listxattr(struct dentry *, char *, size_t);
|
||||
int ocfs2_xattr_get_nolock(struct inode *, struct buffer_head *, int,
|
||||
|
|
|
@ -37,9 +37,7 @@ struct inode *omfs_new_inode(struct inode *dir, int mode)
|
|||
goto fail;
|
||||
|
||||
inode->i_ino = new_block;
|
||||
inode->i_mode = mode;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = current_fsgid();
|
||||
inode_init_owner(inode, NULL, mode);
|
||||
inode->i_mapping->a_ops = &omfs_aops;
|
||||
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
|
|
166
fs/open.c
166
fs/open.c
|
@ -17,7 +17,6 @@
|
|||
#include <linux/securebits.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/vfs.h>
|
||||
#include <linux/fcntl.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
@ -33,171 +32,6 @@
|
|||
|
||||
#include "internal.h"
|
||||
|
||||
int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
{
|
||||
int retval = -ENODEV;
|
||||
|
||||
if (dentry) {
|
||||
retval = -ENOSYS;
|
||||
if (dentry->d_sb->s_op->statfs) {
|
||||
memset(buf, 0, sizeof(*buf));
|
||||
retval = security_sb_statfs(dentry);
|
||||
if (retval)
|
||||
return retval;
|
||||
retval = dentry->d_sb->s_op->statfs(dentry, buf);
|
||||
if (retval == 0 && buf->f_frsize == 0)
|
||||
buf->f_frsize = buf->f_bsize;
|
||||
}
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(vfs_statfs);
|
||||
|
||||
static int vfs_statfs_native(struct dentry *dentry, struct statfs *buf)
|
||||
{
|
||||
struct kstatfs st;
|
||||
int retval;
|
||||
|
||||
retval = vfs_statfs(dentry, &st);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
if (sizeof(*buf) == sizeof(st))
|
||||
memcpy(buf, &st, sizeof(st));
|
||||
else {
|
||||
if (sizeof buf->f_blocks == 4) {
|
||||
if ((st.f_blocks | st.f_bfree | st.f_bavail |
|
||||
st.f_bsize | st.f_frsize) &
|
||||
0xffffffff00000000ULL)
|
||||
return -EOVERFLOW;
|
||||
/*
|
||||
* f_files and f_ffree may be -1; it's okay to stuff
|
||||
* that into 32 bits
|
||||
*/
|
||||
if (st.f_files != -1 &&
|
||||
(st.f_files & 0xffffffff00000000ULL))
|
||||
return -EOVERFLOW;
|
||||
if (st.f_ffree != -1 &&
|
||||
(st.f_ffree & 0xffffffff00000000ULL))
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
buf->f_type = st.f_type;
|
||||
buf->f_bsize = st.f_bsize;
|
||||
buf->f_blocks = st.f_blocks;
|
||||
buf->f_bfree = st.f_bfree;
|
||||
buf->f_bavail = st.f_bavail;
|
||||
buf->f_files = st.f_files;
|
||||
buf->f_ffree = st.f_ffree;
|
||||
buf->f_fsid = st.f_fsid;
|
||||
buf->f_namelen = st.f_namelen;
|
||||
buf->f_frsize = st.f_frsize;
|
||||
memset(buf->f_spare, 0, sizeof(buf->f_spare));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfs_statfs64(struct dentry *dentry, struct statfs64 *buf)
|
||||
{
|
||||
struct kstatfs st;
|
||||
int retval;
|
||||
|
||||
retval = vfs_statfs(dentry, &st);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
if (sizeof(*buf) == sizeof(st))
|
||||
memcpy(buf, &st, sizeof(st));
|
||||
else {
|
||||
buf->f_type = st.f_type;
|
||||
buf->f_bsize = st.f_bsize;
|
||||
buf->f_blocks = st.f_blocks;
|
||||
buf->f_bfree = st.f_bfree;
|
||||
buf->f_bavail = st.f_bavail;
|
||||
buf->f_files = st.f_files;
|
||||
buf->f_ffree = st.f_ffree;
|
||||
buf->f_fsid = st.f_fsid;
|
||||
buf->f_namelen = st.f_namelen;
|
||||
buf->f_frsize = st.f_frsize;
|
||||
memset(buf->f_spare, 0, sizeof(buf->f_spare));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct statfs __user *, buf)
|
||||
{
|
||||
struct path path;
|
||||
int error;
|
||||
|
||||
error = user_path(pathname, &path);
|
||||
if (!error) {
|
||||
struct statfs tmp;
|
||||
error = vfs_statfs_native(path.dentry, &tmp);
|
||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
||||
error = -EFAULT;
|
||||
path_put(&path);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(statfs64, const char __user *, pathname, size_t, sz, struct statfs64 __user *, buf)
|
||||
{
|
||||
struct path path;
|
||||
long error;
|
||||
|
||||
if (sz != sizeof(*buf))
|
||||
return -EINVAL;
|
||||
error = user_path(pathname, &path);
|
||||
if (!error) {
|
||||
struct statfs64 tmp;
|
||||
error = vfs_statfs64(path.dentry, &tmp);
|
||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
||||
error = -EFAULT;
|
||||
path_put(&path);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct statfs __user *, buf)
|
||||
{
|
||||
struct file * file;
|
||||
struct statfs tmp;
|
||||
int error;
|
||||
|
||||
error = -EBADF;
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
goto out;
|
||||
error = vfs_statfs_native(file->f_path.dentry, &tmp);
|
||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
||||
error = -EFAULT;
|
||||
fput(file);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user *, buf)
|
||||
{
|
||||
struct file * file;
|
||||
struct statfs64 tmp;
|
||||
int error;
|
||||
|
||||
if (sz != sizeof(*buf))
|
||||
return -EINVAL;
|
||||
|
||||
error = -EBADF;
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
goto out;
|
||||
error = vfs_statfs64(file->f_path.dentry, &tmp);
|
||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
||||
error = -EFAULT;
|
||||
fput(file);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
|
||||
struct file *filp)
|
||||
{
|
||||
|
|
|
@ -626,7 +626,7 @@ int efi_partition(struct parsed_partitions *state)
|
|||
/* If this is a RAID volume, tell md */
|
||||
if (!efi_guidcmp(ptes[i].partition_type_guid,
|
||||
PARTITION_LINUX_RAID_GUID))
|
||||
state->parts[i+1].flags = 1;
|
||||
state->parts[i + 1].flags = ADDPART_FLAG_RAID;
|
||||
}
|
||||
kfree(ptes);
|
||||
kfree(gpt);
|
||||
|
|
|
@ -75,7 +75,7 @@ int mac_partition(struct parsed_partitions *state)
|
|||
be32_to_cpu(part->block_count) * (secsize/512));
|
||||
|
||||
if (!strnicmp(part->type, "Linux_RAID", 10))
|
||||
state->parts[slot].flags = 1;
|
||||
state->parts[slot].flags = ADDPART_FLAG_RAID;
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
/*
|
||||
* If this is the first bootable partition, tell the
|
||||
|
|
|
@ -498,7 +498,7 @@ int msdos_partition(struct parsed_partitions *state)
|
|||
}
|
||||
put_partition(state, slot, start, size);
|
||||
if (SYS_IND(p) == LINUX_RAID_PARTITION)
|
||||
state->parts[slot].flags = 1;
|
||||
state->parts[slot].flags = ADDPART_FLAG_RAID;
|
||||
if (SYS_IND(p) == DM6_PARTITION)
|
||||
printk("[DM]");
|
||||
if (SYS_IND(p) == EZD_PARTITION)
|
||||
|
|
|
@ -45,36 +45,22 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
|
|||
return security_quotactl(cmd, type, id, sb);
|
||||
}
|
||||
|
||||
static void quota_sync_one(struct super_block *sb, void *arg)
|
||||
{
|
||||
if (sb->s_qcop && sb->s_qcop->quota_sync)
|
||||
sb->s_qcop->quota_sync(sb, *(int *)arg, 1);
|
||||
}
|
||||
|
||||
static int quota_sync_all(int type)
|
||||
{
|
||||
struct super_block *sb;
|
||||
int ret;
|
||||
|
||||
if (type >= MAXQUOTAS)
|
||||
return -EINVAL;
|
||||
ret = security_quotactl(Q_SYNC, type, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
restart:
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
if (!sb->s_qcop || !sb->s_qcop->quota_sync)
|
||||
continue;
|
||||
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
down_read(&sb->s_umount);
|
||||
if (sb->s_root)
|
||||
sb->s_qcop->quota_sync(sb, type, 1);
|
||||
up_read(&sb->s_umount);
|
||||
spin_lock(&sb_lock);
|
||||
if (__put_super_and_need_restart(sb))
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
|
||||
return 0;
|
||||
if (!ret)
|
||||
iterate_supers(quota_sync_one, &type);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
|
||||
|
|
|
@ -52,14 +52,13 @@ static struct backing_dev_info ramfs_backing_dev_info = {
|
|||
BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
|
||||
};
|
||||
|
||||
struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev)
|
||||
struct inode *ramfs_get_inode(struct super_block *sb,
|
||||
const struct inode *dir, int mode, dev_t dev)
|
||||
{
|
||||
struct inode * inode = new_inode(sb);
|
||||
|
||||
if (inode) {
|
||||
inode->i_mode = mode;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = current_fsgid();
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_mapping->a_ops = &ramfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
|
||||
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
|
||||
|
@ -95,15 +94,10 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev)
|
|||
static int
|
||||
ramfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
|
||||
{
|
||||
struct inode * inode = ramfs_get_inode(dir->i_sb, mode, dev);
|
||||
struct inode * inode = ramfs_get_inode(dir->i_sb, dir, mode, dev);
|
||||
int error = -ENOSPC;
|
||||
|
||||
if (inode) {
|
||||
if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
inode->i_mode |= S_ISGID;
|
||||
}
|
||||
d_instantiate(dentry, inode);
|
||||
dget(dentry); /* Extra count - pin the dentry in core */
|
||||
error = 0;
|
||||
|
@ -130,13 +124,11 @@ static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char *
|
|||
struct inode *inode;
|
||||
int error = -ENOSPC;
|
||||
|
||||
inode = ramfs_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
|
||||
inode = ramfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
|
||||
if (inode) {
|
||||
int l = strlen(symname)+1;
|
||||
error = page_symlink(inode, symname, l);
|
||||
if (!error) {
|
||||
if (dir->i_mode & S_ISGID)
|
||||
inode->i_gid = dir->i_gid;
|
||||
d_instantiate(dentry, inode);
|
||||
dget(dentry);
|
||||
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
|
||||
|
@ -241,7 +233,7 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
sb->s_op = &ramfs_ops;
|
||||
sb->s_time_gran = 1;
|
||||
|
||||
inode = ramfs_get_inode(sb, S_IFDIR | fsi->mount_opts.mode, 0);
|
||||
inode = ramfs_get_inode(sb, NULL, S_IFDIR | fsi->mount_opts.mode, 0);
|
||||
if (!inode) {
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
|
|
|
@ -561,23 +561,13 @@ static int drop_new_inode(struct inode *inode)
|
|||
*/
|
||||
static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
|
||||
{
|
||||
|
||||
/* the quota init calls have to know who to charge the quota to, so
|
||||
** we have to set uid and gid here
|
||||
*/
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_mode = mode;
|
||||
/* Make inode invalid - just in case we are going to drop it before
|
||||
* the initialization happens */
|
||||
INODE_PKEY(inode)->k_objectid = 0;
|
||||
|
||||
if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
inode->i_mode |= S_ISGID;
|
||||
} else {
|
||||
inode->i_gid = current_fsgid();
|
||||
}
|
||||
/* the quota init calls have to know who to charge the quota to, so
|
||||
** we have to set uid and gid here
|
||||
*/
|
||||
inode_init_owner(inode, dir, mode);
|
||||
dquot_initialize(inode);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -723,11 +723,11 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
|
|||
(handler) = *(handlers)++)
|
||||
|
||||
/* This is the implementation for the xattr plugin infrastructure */
|
||||
static inline struct xattr_handler *
|
||||
find_xattr_handler_prefix(struct xattr_handler **handlers,
|
||||
static inline const struct xattr_handler *
|
||||
find_xattr_handler_prefix(const struct xattr_handler **handlers,
|
||||
const char *name)
|
||||
{
|
||||
struct xattr_handler *xah;
|
||||
const struct xattr_handler *xah;
|
||||
|
||||
if (!handlers)
|
||||
return NULL;
|
||||
|
@ -748,7 +748,7 @@ ssize_t
|
|||
reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
|
||||
size_t size)
|
||||
{
|
||||
struct xattr_handler *handler;
|
||||
const struct xattr_handler *handler;
|
||||
|
||||
handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
|
||||
|
||||
|
@ -767,7 +767,7 @@ int
|
|||
reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
|
||||
size_t size, int flags)
|
||||
{
|
||||
struct xattr_handler *handler;
|
||||
const struct xattr_handler *handler;
|
||||
|
||||
handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
|
||||
|
||||
|
@ -784,7 +784,7 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
|
|||
*/
|
||||
int reiserfs_removexattr(struct dentry *dentry, const char *name)
|
||||
{
|
||||
struct xattr_handler *handler;
|
||||
const struct xattr_handler *handler;
|
||||
handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
|
||||
|
||||
if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
|
||||
|
@ -807,7 +807,7 @@ static int listxattr_filler(void *buf, const char *name, int namelen,
|
|||
size_t size;
|
||||
if (name[0] != '.' ||
|
||||
(namelen != 1 && (name[1] != '.' || namelen != 2))) {
|
||||
struct xattr_handler *handler;
|
||||
const struct xattr_handler *handler;
|
||||
handler = find_xattr_handler_prefix(b->dentry->d_sb->s_xattr,
|
||||
name);
|
||||
if (!handler) /* Unsupported xattr name */
|
||||
|
@ -920,7 +920,7 @@ static int create_privroot(struct dentry *dentry) { return 0; }
|
|||
#endif
|
||||
|
||||
/* Actual operations that are exported to VFS-land */
|
||||
struct xattr_handler *reiserfs_xattr_handlers[] = {
|
||||
const struct xattr_handler *reiserfs_xattr_handlers[] = {
|
||||
#ifdef CONFIG_REISERFS_FS_XATTR
|
||||
&reiserfs_xattr_user_handler,
|
||||
&reiserfs_xattr_trusted_handler,
|
||||
|
|
|
@ -500,7 +500,7 @@ static size_t posix_acl_access_list(struct dentry *dentry, char *list,
|
|||
return size;
|
||||
}
|
||||
|
||||
struct xattr_handler reiserfs_posix_acl_access_handler = {
|
||||
const struct xattr_handler reiserfs_posix_acl_access_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_ACCESS,
|
||||
.flags = ACL_TYPE_ACCESS,
|
||||
.get = posix_acl_get,
|
||||
|
@ -520,7 +520,7 @@ static size_t posix_acl_default_list(struct dentry *dentry, char *list,
|
|||
return size;
|
||||
}
|
||||
|
||||
struct xattr_handler reiserfs_posix_acl_default_handler = {
|
||||
const struct xattr_handler reiserfs_posix_acl_default_handler = {
|
||||
.prefix = POSIX_ACL_XATTR_DEFAULT,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
.get = posix_acl_get,
|
||||
|
|
|
@ -111,7 +111,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec)
|
|||
sec->value = NULL;
|
||||
}
|
||||
|
||||
struct xattr_handler reiserfs_xattr_security_handler = {
|
||||
const struct xattr_handler reiserfs_xattr_security_handler = {
|
||||
.prefix = XATTR_SECURITY_PREFIX,
|
||||
.get = security_get,
|
||||
.set = security_set,
|
||||
|
|
|
@ -48,7 +48,7 @@ static size_t trusted_list(struct dentry *dentry, char *list, size_t list_size,
|
|||
return len;
|
||||
}
|
||||
|
||||
struct xattr_handler reiserfs_xattr_trusted_handler = {
|
||||
const struct xattr_handler reiserfs_xattr_trusted_handler = {
|
||||
.prefix = XATTR_TRUSTED_PREFIX,
|
||||
.get = trusted_get,
|
||||
.set = trusted_set,
|
||||
|
|
|
@ -44,7 +44,7 @@ static size_t user_list(struct dentry *dentry, char *list, size_t list_size,
|
|||
return len;
|
||||
}
|
||||
|
||||
struct xattr_handler reiserfs_xattr_user_handler = {
|
||||
const struct xattr_handler reiserfs_xattr_user_handler = {
|
||||
.prefix = XATTR_USER_PREFIX,
|
||||
.get = user_get,
|
||||
.set = user_set,
|
||||
|
|
|
@ -0,0 +1,196 @@
|
|||
#include <linux/syscalls.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/statfs.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
{
|
||||
int retval = -ENODEV;
|
||||
|
||||
if (dentry) {
|
||||
retval = -ENOSYS;
|
||||
if (dentry->d_sb->s_op->statfs) {
|
||||
memset(buf, 0, sizeof(*buf));
|
||||
retval = security_sb_statfs(dentry);
|
||||
if (retval)
|
||||
return retval;
|
||||
retval = dentry->d_sb->s_op->statfs(dentry, buf);
|
||||
if (retval == 0 && buf->f_frsize == 0)
|
||||
buf->f_frsize = buf->f_bsize;
|
||||
}
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(vfs_statfs);
|
||||
|
||||
static int vfs_statfs_native(struct dentry *dentry, struct statfs *buf)
|
||||
{
|
||||
struct kstatfs st;
|
||||
int retval;
|
||||
|
||||
retval = vfs_statfs(dentry, &st);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
if (sizeof(*buf) == sizeof(st))
|
||||
memcpy(buf, &st, sizeof(st));
|
||||
else {
|
||||
if (sizeof buf->f_blocks == 4) {
|
||||
if ((st.f_blocks | st.f_bfree | st.f_bavail |
|
||||
st.f_bsize | st.f_frsize) &
|
||||
0xffffffff00000000ULL)
|
||||
return -EOVERFLOW;
|
||||
/*
|
||||
* f_files and f_ffree may be -1; it's okay to stuff
|
||||
* that into 32 bits
|
||||
*/
|
||||
if (st.f_files != -1 &&
|
||||
(st.f_files & 0xffffffff00000000ULL))
|
||||
return -EOVERFLOW;
|
||||
if (st.f_ffree != -1 &&
|
||||
(st.f_ffree & 0xffffffff00000000ULL))
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
buf->f_type = st.f_type;
|
||||
buf->f_bsize = st.f_bsize;
|
||||
buf->f_blocks = st.f_blocks;
|
||||
buf->f_bfree = st.f_bfree;
|
||||
buf->f_bavail = st.f_bavail;
|
||||
buf->f_files = st.f_files;
|
||||
buf->f_ffree = st.f_ffree;
|
||||
buf->f_fsid = st.f_fsid;
|
||||
buf->f_namelen = st.f_namelen;
|
||||
buf->f_frsize = st.f_frsize;
|
||||
memset(buf->f_spare, 0, sizeof(buf->f_spare));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfs_statfs64(struct dentry *dentry, struct statfs64 *buf)
|
||||
{
|
||||
struct kstatfs st;
|
||||
int retval;
|
||||
|
||||
retval = vfs_statfs(dentry, &st);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
if (sizeof(*buf) == sizeof(st))
|
||||
memcpy(buf, &st, sizeof(st));
|
||||
else {
|
||||
buf->f_type = st.f_type;
|
||||
buf->f_bsize = st.f_bsize;
|
||||
buf->f_blocks = st.f_blocks;
|
||||
buf->f_bfree = st.f_bfree;
|
||||
buf->f_bavail = st.f_bavail;
|
||||
buf->f_files = st.f_files;
|
||||
buf->f_ffree = st.f_ffree;
|
||||
buf->f_fsid = st.f_fsid;
|
||||
buf->f_namelen = st.f_namelen;
|
||||
buf->f_frsize = st.f_frsize;
|
||||
memset(buf->f_spare, 0, sizeof(buf->f_spare));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct statfs __user *, buf)
|
||||
{
|
||||
struct path path;
|
||||
int error;
|
||||
|
||||
error = user_path(pathname, &path);
|
||||
if (!error) {
|
||||
struct statfs tmp;
|
||||
error = vfs_statfs_native(path.dentry, &tmp);
|
||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
||||
error = -EFAULT;
|
||||
path_put(&path);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(statfs64, const char __user *, pathname, size_t, sz, struct statfs64 __user *, buf)
|
||||
{
|
||||
struct path path;
|
||||
long error;
|
||||
|
||||
if (sz != sizeof(*buf))
|
||||
return -EINVAL;
|
||||
error = user_path(pathname, &path);
|
||||
if (!error) {
|
||||
struct statfs64 tmp;
|
||||
error = vfs_statfs64(path.dentry, &tmp);
|
||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
||||
error = -EFAULT;
|
||||
path_put(&path);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct statfs __user *, buf)
|
||||
{
|
||||
struct file *file;
|
||||
struct statfs tmp;
|
||||
int error;
|
||||
|
||||
error = -EBADF;
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
goto out;
|
||||
error = vfs_statfs_native(file->f_path.dentry, &tmp);
|
||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
||||
error = -EFAULT;
|
||||
fput(file);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user *, buf)
|
||||
{
|
||||
struct file *file;
|
||||
struct statfs64 tmp;
|
||||
int error;
|
||||
|
||||
if (sz != sizeof(*buf))
|
||||
return -EINVAL;
|
||||
|
||||
error = -EBADF;
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
goto out;
|
||||
error = vfs_statfs64(file->f_path.dentry, &tmp);
|
||||
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
|
||||
error = -EFAULT;
|
||||
fput(file);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
|
||||
{
|
||||
struct super_block *s;
|
||||
struct ustat tmp;
|
||||
struct kstatfs sbuf;
|
||||
int err;
|
||||
|
||||
s = user_get_super(new_decode_dev(dev));
|
||||
if (!s)
|
||||
return -EINVAL;
|
||||
|
||||
err = vfs_statfs(s->s_root, &sbuf);
|
||||
drop_super(s);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memset(&tmp,0,sizeof(struct ustat));
|
||||
tmp.f_tfree = sbuf.f_bfree;
|
||||
tmp.f_tinode = sbuf.f_ffree;
|
||||
|
||||
return copy_to_user(ubuf, &tmp, sizeof(struct ustat)) ? -EFAULT : 0;
|
||||
}
|
333
fs/super.c
333
fs/super.c
|
@ -22,23 +22,15 @@
|
|||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/acct.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/vfs.h>
|
||||
#include <linux/writeback.h> /* for the emergency remount stuff */
|
||||
#include <linux/idr.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
|
@ -93,9 +85,10 @@ static struct super_block *alloc_super(struct file_system_type *type)
|
|||
* subclass.
|
||||
*/
|
||||
down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
|
||||
s->s_count = S_BIAS;
|
||||
s->s_count = 1;
|
||||
atomic_set(&s->s_active, 1);
|
||||
mutex_init(&s->s_vfs_rename_mutex);
|
||||
lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
|
||||
mutex_init(&s->s_dquot.dqio_mutex);
|
||||
mutex_init(&s->s_dquot.dqonoff_mutex);
|
||||
init_rwsem(&s->s_dquot.dqptr_sem);
|
||||
|
@ -127,39 +120,14 @@ static inline void destroy_super(struct super_block *s)
|
|||
/* Superblock refcounting */
|
||||
|
||||
/*
|
||||
* Drop a superblock's refcount. Returns non-zero if the superblock was
|
||||
* destroyed. The caller must hold sb_lock.
|
||||
* Drop a superblock's refcount. The caller must hold sb_lock.
|
||||
*/
|
||||
static int __put_super(struct super_block *sb)
|
||||
void __put_super(struct super_block *sb)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!--sb->s_count) {
|
||||
list_del_init(&sb->s_list);
|
||||
destroy_super(sb);
|
||||
ret = 1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop a superblock's refcount.
|
||||
* Returns non-zero if the superblock is about to be destroyed and
|
||||
* at least is already removed from super_blocks list, so if we are
|
||||
* making a loop through super blocks then we need to restart.
|
||||
* The caller must hold sb_lock.
|
||||
*/
|
||||
int __put_super_and_need_restart(struct super_block *sb)
|
||||
{
|
||||
/* check for race with generic_shutdown_super() */
|
||||
if (list_empty(&sb->s_list)) {
|
||||
/* super block is removed, need to restart... */
|
||||
__put_super(sb);
|
||||
return 1;
|
||||
}
|
||||
/* can't be the last, since s_list is still in use */
|
||||
sb->s_count--;
|
||||
BUG_ON(sb->s_count == 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -177,48 +145,21 @@ void put_super(struct super_block *sb)
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* deactivate_super - drop an active reference to superblock
|
||||
* @s: superblock to deactivate
|
||||
*
|
||||
* Drops an active reference to superblock, acquiring a temprory one if
|
||||
* there is no active references left. In that case we lock superblock,
|
||||
* tell fs driver to shut it down and drop the temporary reference we
|
||||
* had just acquired.
|
||||
*/
|
||||
void deactivate_super(struct super_block *s)
|
||||
{
|
||||
struct file_system_type *fs = s->s_type;
|
||||
if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
|
||||
s->s_count -= S_BIAS-1;
|
||||
spin_unlock(&sb_lock);
|
||||
vfs_dq_off(s, 0);
|
||||
down_write(&s->s_umount);
|
||||
fs->kill_sb(s);
|
||||
put_filesystem(fs);
|
||||
put_super(s);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(deactivate_super);
|
||||
|
||||
/**
|
||||
* deactivate_locked_super - drop an active reference to superblock
|
||||
* @s: superblock to deactivate
|
||||
*
|
||||
* Equivalent of up_write(&s->s_umount); deactivate_super(s);, except that
|
||||
* it does not unlock it until it's all over. As the result, it's safe to
|
||||
* use to dispose of new superblock on ->get_sb() failure exits - nobody
|
||||
* will see the sucker until it's all over. Equivalent using up_write +
|
||||
* deactivate_super is safe for that purpose only if superblock is either
|
||||
* safe to use or has NULL ->s_root when we unlock.
|
||||
* Drops an active reference to superblock, converting it into a temprory
|
||||
* one if there is no other active references left. In that case we
|
||||
* tell fs driver to shut it down and drop the temporary reference we
|
||||
* had just acquired.
|
||||
*
|
||||
* Caller holds exclusive lock on superblock; that lock is released.
|
||||
*/
|
||||
void deactivate_locked_super(struct super_block *s)
|
||||
{
|
||||
struct file_system_type *fs = s->s_type;
|
||||
if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
|
||||
s->s_count -= S_BIAS-1;
|
||||
spin_unlock(&sb_lock);
|
||||
if (atomic_dec_and_test(&s->s_active)) {
|
||||
vfs_dq_off(s, 0);
|
||||
fs->kill_sb(s);
|
||||
put_filesystem(fs);
|
||||
|
@ -230,6 +171,24 @@ void deactivate_locked_super(struct super_block *s)
|
|||
|
||||
EXPORT_SYMBOL(deactivate_locked_super);
|
||||
|
||||
/**
|
||||
* deactivate_super - drop an active reference to superblock
|
||||
* @s: superblock to deactivate
|
||||
*
|
||||
* Variant of deactivate_locked_super(), except that superblock is *not*
|
||||
* locked by caller. If we are going to drop the final active reference,
|
||||
* lock will be acquired prior to that.
|
||||
*/
|
||||
void deactivate_super(struct super_block *s)
|
||||
{
|
||||
if (!atomic_add_unless(&s->s_active, -1, 1)) {
|
||||
down_write(&s->s_umount);
|
||||
deactivate_locked_super(s);
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(deactivate_super);
|
||||
|
||||
/**
|
||||
* grab_super - acquire an active reference
|
||||
* @s: reference we are trying to make active
|
||||
|
@ -243,22 +202,17 @@ EXPORT_SYMBOL(deactivate_locked_super);
|
|||
*/
|
||||
static int grab_super(struct super_block *s) __releases(sb_lock)
|
||||
{
|
||||
if (atomic_inc_not_zero(&s->s_active)) {
|
||||
spin_unlock(&sb_lock);
|
||||
return 1;
|
||||
}
|
||||
/* it's going away */
|
||||
s->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
/* wait for it to die */
|
||||
down_write(&s->s_umount);
|
||||
if (s->s_root) {
|
||||
spin_lock(&sb_lock);
|
||||
if (s->s_count > S_BIAS) {
|
||||
atomic_inc(&s->s_active);
|
||||
s->s_count--;
|
||||
spin_unlock(&sb_lock);
|
||||
return 1;
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
}
|
||||
up_write(&s->s_umount);
|
||||
put_super(s);
|
||||
yield();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -321,8 +275,7 @@ void generic_shutdown_super(struct super_block *sb)
|
|||
}
|
||||
spin_lock(&sb_lock);
|
||||
/* should be initialized for __put_super_and_need_restart() */
|
||||
list_del_init(&sb->s_list);
|
||||
list_del(&sb->s_instances);
|
||||
list_del_init(&sb->s_instances);
|
||||
spin_unlock(&sb_lock);
|
||||
up_write(&sb->s_umount);
|
||||
}
|
||||
|
@ -357,6 +310,7 @@ struct super_block *sget(struct file_system_type *type,
|
|||
up_write(&s->s_umount);
|
||||
destroy_super(s);
|
||||
}
|
||||
down_write(&old->s_umount);
|
||||
return old;
|
||||
}
|
||||
}
|
||||
|
@ -408,11 +362,12 @@ EXPORT_SYMBOL(drop_super);
|
|||
*/
|
||||
void sync_supers(void)
|
||||
{
|
||||
struct super_block *sb;
|
||||
struct super_block *sb, *n;
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
restart:
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
|
||||
if (list_empty(&sb->s_instances))
|
||||
continue;
|
||||
if (sb->s_op->write_super && sb->s_dirt) {
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
|
@ -423,13 +378,42 @@ void sync_supers(void)
|
|||
up_read(&sb->s_umount);
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
if (__put_super_and_need_restart(sb))
|
||||
goto restart;
|
||||
__put_super(sb);
|
||||
}
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* iterate_supers - call function for all active superblocks
|
||||
* @f: function to call
|
||||
* @arg: argument to pass to it
|
||||
*
|
||||
* Scans the superblock list and calls given function, passing it
|
||||
* locked superblock and given argument.
|
||||
*/
|
||||
void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
|
||||
{
|
||||
struct super_block *sb, *n;
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
|
||||
if (list_empty(&sb->s_instances))
|
||||
continue;
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
|
||||
down_read(&sb->s_umount);
|
||||
if (sb->s_root)
|
||||
f(sb, arg);
|
||||
up_read(&sb->s_umount);
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
__put_super(sb);
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* get_super - get the superblock of a device
|
||||
* @bdev: device to get the superblock for
|
||||
|
@ -438,7 +422,7 @@ void sync_supers(void)
|
|||
* mounted on the device given. %NULL is returned if no match is found.
|
||||
*/
|
||||
|
||||
struct super_block * get_super(struct block_device *bdev)
|
||||
struct super_block *get_super(struct block_device *bdev)
|
||||
{
|
||||
struct super_block *sb;
|
||||
|
||||
|
@ -448,17 +432,20 @@ struct super_block * get_super(struct block_device *bdev)
|
|||
spin_lock(&sb_lock);
|
||||
rescan:
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
if (list_empty(&sb->s_instances))
|
||||
continue;
|
||||
if (sb->s_bdev == bdev) {
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
down_read(&sb->s_umount);
|
||||
/* still alive? */
|
||||
if (sb->s_root)
|
||||
return sb;
|
||||
up_read(&sb->s_umount);
|
||||
/* restart only when sb is no longer on the list */
|
||||
/* nope, got unmounted */
|
||||
spin_lock(&sb_lock);
|
||||
if (__put_super_and_need_restart(sb))
|
||||
goto rescan;
|
||||
__put_super(sb);
|
||||
goto rescan;
|
||||
}
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
|
@ -473,7 +460,7 @@ EXPORT_SYMBOL(get_super);
|
|||
*
|
||||
* Scans the superblock list and finds the superblock of the file system
|
||||
* mounted on the device given. Returns the superblock with an active
|
||||
* reference and s_umount held exclusively or %NULL if none was found.
|
||||
* reference or %NULL if none was found.
|
||||
*/
|
||||
struct super_block *get_active_super(struct block_device *bdev)
|
||||
{
|
||||
|
@ -482,81 +469,49 @@ struct super_block *get_active_super(struct block_device *bdev)
|
|||
if (!bdev)
|
||||
return NULL;
|
||||
|
||||
restart:
|
||||
spin_lock(&sb_lock);
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
if (sb->s_bdev != bdev)
|
||||
if (list_empty(&sb->s_instances))
|
||||
continue;
|
||||
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
down_write(&sb->s_umount);
|
||||
if (sb->s_root) {
|
||||
spin_lock(&sb_lock);
|
||||
if (sb->s_count > S_BIAS) {
|
||||
atomic_inc(&sb->s_active);
|
||||
sb->s_count--;
|
||||
spin_unlock(&sb_lock);
|
||||
if (sb->s_bdev == bdev) {
|
||||
if (grab_super(sb)) /* drops sb_lock */
|
||||
return sb;
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
else
|
||||
goto restart;
|
||||
}
|
||||
up_write(&sb->s_umount);
|
||||
put_super(sb);
|
||||
yield();
|
||||
spin_lock(&sb_lock);
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct super_block * user_get_super(dev_t dev)
|
||||
struct super_block *user_get_super(dev_t dev)
|
||||
{
|
||||
struct super_block *sb;
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
rescan:
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
if (list_empty(&sb->s_instances))
|
||||
continue;
|
||||
if (sb->s_dev == dev) {
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
down_read(&sb->s_umount);
|
||||
/* still alive? */
|
||||
if (sb->s_root)
|
||||
return sb;
|
||||
up_read(&sb->s_umount);
|
||||
/* restart only when sb is no longer on the list */
|
||||
/* nope, got unmounted */
|
||||
spin_lock(&sb_lock);
|
||||
if (__put_super_and_need_restart(sb))
|
||||
goto rescan;
|
||||
__put_super(sb);
|
||||
goto rescan;
|
||||
}
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
|
||||
{
|
||||
struct super_block *s;
|
||||
struct ustat tmp;
|
||||
struct kstatfs sbuf;
|
||||
int err = -EINVAL;
|
||||
|
||||
s = user_get_super(new_decode_dev(dev));
|
||||
if (s == NULL)
|
||||
goto out;
|
||||
err = vfs_statfs(s->s_root, &sbuf);
|
||||
drop_super(s);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
memset(&tmp,0,sizeof(struct ustat));
|
||||
tmp.f_tfree = sbuf.f_bfree;
|
||||
tmp.f_tinode = sbuf.f_ffree;
|
||||
|
||||
err = copy_to_user(ubuf,&tmp,sizeof(struct ustat)) ? -EFAULT : 0;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* do_remount_sb - asks filesystem to change mount options.
|
||||
* @sb: superblock in question
|
||||
|
@ -622,24 +577,24 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
|
|||
|
||||
static void do_emergency_remount(struct work_struct *work)
|
||||
{
|
||||
struct super_block *sb;
|
||||
struct super_block *sb, *n;
|
||||
|
||||
spin_lock(&sb_lock);
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
|
||||
if (list_empty(&sb->s_instances))
|
||||
continue;
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
down_write(&sb->s_umount);
|
||||
if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
|
||||
/*
|
||||
* ->remount_fs needs lock_kernel().
|
||||
*
|
||||
* What lock protects sb->s_flags??
|
||||
*/
|
||||
do_remount_sb(sb, MS_RDONLY, NULL, 1);
|
||||
}
|
||||
up_write(&sb->s_umount);
|
||||
put_super(sb);
|
||||
spin_lock(&sb_lock);
|
||||
__put_super(sb);
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
kfree(work);
|
||||
|
@ -990,6 +945,96 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
|
|||
|
||||
EXPORT_SYMBOL_GPL(vfs_kern_mount);
|
||||
|
||||
/**
|
||||
* freeze_super -- lock the filesystem and force it into a consistent state
|
||||
* @super: the super to lock
|
||||
*
|
||||
* Syncs the super to make sure the filesystem is consistent and calls the fs's
|
||||
* freeze_fs. Subsequent calls to this without first thawing the fs will return
|
||||
* -EBUSY.
|
||||
*/
|
||||
int freeze_super(struct super_block *sb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
atomic_inc(&sb->s_active);
|
||||
down_write(&sb->s_umount);
|
||||
if (sb->s_frozen) {
|
||||
deactivate_locked_super(sb);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (sb->s_flags & MS_RDONLY) {
|
||||
sb->s_frozen = SB_FREEZE_TRANS;
|
||||
smp_wmb();
|
||||
up_write(&sb->s_umount);
|
||||
return 0;
|
||||
}
|
||||
|
||||
sb->s_frozen = SB_FREEZE_WRITE;
|
||||
smp_wmb();
|
||||
|
||||
sync_filesystem(sb);
|
||||
|
||||
sb->s_frozen = SB_FREEZE_TRANS;
|
||||
smp_wmb();
|
||||
|
||||
sync_blockdev(sb->s_bdev);
|
||||
if (sb->s_op->freeze_fs) {
|
||||
ret = sb->s_op->freeze_fs(sb);
|
||||
if (ret) {
|
||||
printk(KERN_ERR
|
||||
"VFS:Filesystem freeze failed\n");
|
||||
sb->s_frozen = SB_UNFROZEN;
|
||||
deactivate_locked_super(sb);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
up_write(&sb->s_umount);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(freeze_super);
|
||||
|
||||
/**
|
||||
* thaw_super -- unlock filesystem
|
||||
* @sb: the super to thaw
|
||||
*
|
||||
* Unlocks the filesystem and marks it writeable again after freeze_super().
|
||||
*/
|
||||
int thaw_super(struct super_block *sb)
|
||||
{
|
||||
int error;
|
||||
|
||||
down_write(&sb->s_umount);
|
||||
if (sb->s_frozen == SB_UNFROZEN) {
|
||||
up_write(&sb->s_umount);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
goto out;
|
||||
|
||||
if (sb->s_op->unfreeze_fs) {
|
||||
error = sb->s_op->unfreeze_fs(sb);
|
||||
if (error) {
|
||||
printk(KERN_ERR
|
||||
"VFS:Filesystem thaw failed\n");
|
||||
sb->s_frozen = SB_FREEZE_TRANS;
|
||||
up_write(&sb->s_umount);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
sb->s_frozen = SB_UNFROZEN;
|
||||
smp_wmb();
|
||||
wake_up(&sb->s_wait_unfrozen);
|
||||
deactivate_locked_super(sb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(thaw_super);
|
||||
|
||||
static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
|
||||
{
|
||||
int err;
|
||||
|
|
86
fs/sync.c
86
fs/sync.c
|
@ -77,50 +77,18 @@ int sync_filesystem(struct super_block *sb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sync_filesystem);
|
||||
|
||||
static void sync_one_sb(struct super_block *sb, void *arg)
|
||||
{
|
||||
if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi)
|
||||
__sync_filesystem(sb, *(int *)arg);
|
||||
}
|
||||
/*
|
||||
* Sync all the data for all the filesystems (called by sys_sync() and
|
||||
* emergency sync)
|
||||
*
|
||||
* This operation is careful to avoid the livelock which could easily happen
|
||||
* if two or more filesystems are being continuously dirtied. s_need_sync
|
||||
* is used only here. We set it against all filesystems and then clear it as
|
||||
* we sync them. So redirtied filesystems are skipped.
|
||||
*
|
||||
* But if process A is currently running sync_filesystems and then process B
|
||||
* calls sync_filesystems as well, process B will set all the s_need_sync
|
||||
* flags again, which will cause process A to resync everything. Fix that with
|
||||
* a local mutex.
|
||||
*/
|
||||
static void sync_filesystems(int wait)
|
||||
{
|
||||
struct super_block *sb;
|
||||
static DEFINE_MUTEX(mutex);
|
||||
|
||||
mutex_lock(&mutex); /* Could be down_interruptible */
|
||||
spin_lock(&sb_lock);
|
||||
list_for_each_entry(sb, &super_blocks, s_list)
|
||||
sb->s_need_sync = 1;
|
||||
|
||||
restart:
|
||||
list_for_each_entry(sb, &super_blocks, s_list) {
|
||||
if (!sb->s_need_sync)
|
||||
continue;
|
||||
sb->s_need_sync = 0;
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
|
||||
down_read(&sb->s_umount);
|
||||
if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi)
|
||||
__sync_filesystem(sb, wait);
|
||||
up_read(&sb->s_umount);
|
||||
|
||||
/* restart only when sb is no longer on the list */
|
||||
spin_lock(&sb_lock);
|
||||
if (__put_super_and_need_restart(sb))
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&sb_lock);
|
||||
mutex_unlock(&mutex);
|
||||
iterate_supers(sync_one_sb, &wait);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -190,7 +158,6 @@ EXPORT_SYMBOL(file_fsync);
|
|||
/**
|
||||
* vfs_fsync_range - helper to sync a range of data & metadata to disk
|
||||
* @file: file to sync
|
||||
* @dentry: dentry of @file
|
||||
* @start: offset in bytes of the beginning of data range to sync
|
||||
* @end: offset in bytes of the end of data range (inclusive)
|
||||
* @datasync: perform only datasync
|
||||
|
@ -198,32 +165,13 @@ EXPORT_SYMBOL(file_fsync);
|
|||
* Write back data in range @start..@end and metadata for @file to disk. If
|
||||
* @datasync is set only metadata needed to access modified file data is
|
||||
* written.
|
||||
*
|
||||
* In case this function is called from nfsd @file may be %NULL and
|
||||
* only @dentry is set. This can only happen when the filesystem
|
||||
* implements the export_operations API.
|
||||
*/
|
||||
int vfs_fsync_range(struct file *file, struct dentry *dentry, loff_t start,
|
||||
loff_t end, int datasync)
|
||||
int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
{
|
||||
const struct file_operations *fop;
|
||||
struct address_space *mapping;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
int err, ret;
|
||||
|
||||
/*
|
||||
* Get mapping and operations from the file in case we have
|
||||
* as file, or get the default values for them in case we
|
||||
* don't have a struct file available. Damn nfsd..
|
||||
*/
|
||||
if (file) {
|
||||
mapping = file->f_mapping;
|
||||
fop = file->f_op;
|
||||
} else {
|
||||
mapping = dentry->d_inode->i_mapping;
|
||||
fop = dentry->d_inode->i_fop;
|
||||
}
|
||||
|
||||
if (!fop || !fop->fsync) {
|
||||
if (!file->f_op || !file->f_op->fsync) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -235,7 +183,7 @@ int vfs_fsync_range(struct file *file, struct dentry *dentry, loff_t start,
|
|||
* livelocks in fsync_buffers_list().
|
||||
*/
|
||||
mutex_lock(&mapping->host->i_mutex);
|
||||
err = fop->fsync(file, dentry, datasync);
|
||||
err = file->f_op->fsync(file, file->f_path.dentry, datasync);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
mutex_unlock(&mapping->host->i_mutex);
|
||||
|
@ -248,19 +196,14 @@ EXPORT_SYMBOL(vfs_fsync_range);
|
|||
/**
|
||||
* vfs_fsync - perform a fsync or fdatasync on a file
|
||||
* @file: file to sync
|
||||
* @dentry: dentry of @file
|
||||
* @datasync: only perform a fdatasync operation
|
||||
*
|
||||
* Write back data and metadata for @file to disk. If @datasync is
|
||||
* set only metadata needed to access modified file data is written.
|
||||
*
|
||||
* In case this function is called from nfsd @file may be %NULL and
|
||||
* only @dentry is set. This can only happen when the filesystem
|
||||
* implements the export_operations API.
|
||||
*/
|
||||
int vfs_fsync(struct file *file, struct dentry *dentry, int datasync)
|
||||
int vfs_fsync(struct file *file, int datasync)
|
||||
{
|
||||
return vfs_fsync_range(file, dentry, 0, LLONG_MAX, datasync);
|
||||
return vfs_fsync_range(file, 0, LLONG_MAX, datasync);
|
||||
}
|
||||
EXPORT_SYMBOL(vfs_fsync);
|
||||
|
||||
|
@ -271,7 +214,7 @@ static int do_fsync(unsigned int fd, int datasync)
|
|||
|
||||
file = fget(fd);
|
||||
if (file) {
|
||||
ret = vfs_fsync(file, file->f_path.dentry, datasync);
|
||||
ret = vfs_fsync(file, datasync);
|
||||
fput(file);
|
||||
}
|
||||
return ret;
|
||||
|
@ -299,8 +242,7 @@ int generic_write_sync(struct file *file, loff_t pos, loff_t count)
|
|||
{
|
||||
if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
|
||||
return 0;
|
||||
return vfs_fsync_range(file, file->f_path.dentry, pos,
|
||||
pos + count - 1,
|
||||
return vfs_fsync_range(file, pos, pos + count - 1,
|
||||
(file->f_flags & __O_SYNC) ? 0 : 1);
|
||||
}
|
||||
EXPORT_SYMBOL(generic_write_sync);
|
||||
|
|
|
@ -159,15 +159,7 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
|
|||
*sbi->s_sb_fic_count = cpu_to_fs16(sbi, count);
|
||||
fs16_add(sbi, sbi->s_sb_total_free_inodes, -1);
|
||||
dirty_sb(sb);
|
||||
|
||||
if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
|
||||
inode->i_uid = current_fsuid();
|
||||
inode_init_owner(inode, dir, mode);
|
||||
inode->i_ino = fs16_to_cpu(sbi, ino);
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
|
||||
inode->i_blocks = 0;
|
||||
|
@ -176,7 +168,6 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
|
|||
insert_inode_hash(inode);
|
||||
mark_inode_dirty(inode);
|
||||
|
||||
inode->i_mode = mode; /* for sysv_write_inode() */
|
||||
sysv_write_inode(inode, 0); /* ensure inode not allocated again */
|
||||
mark_inode_dirty(inode); /* cleared by sysv_write_inode() */
|
||||
/* That's it. */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue