mirror of https://gitee.com/openkylin/linux.git
fs: dcache scale dentry refcount
Make d_count non-atomic and protect it with d_lock. This allows us to ensure a 0 refcount dentry remains 0 without dcache_lock. It is also fairly natural when we start protecting many other dentry members with d_lock. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
This commit is contained in:
parent
2304450783
commit
b7ab39f631
|
@ -162,7 +162,7 @@ static void spufs_prune_dir(struct dentry *dir)
|
|||
spin_lock(&dcache_lock);
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (!(d_unhashed(dentry)) && dentry->d_inode) {
|
||||
dget_locked(dentry);
|
||||
dget_locked_dlock(dentry);
|
||||
__d_drop(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
simple_unlink(dir->d_inode, dentry);
|
||||
|
|
|
@ -280,7 +280,7 @@ static int remove_file(struct dentry *parent, char *name)
|
|||
spin_lock(&dcache_lock);
|
||||
spin_lock(&tmp->d_lock);
|
||||
if (!(d_unhashed(tmp) && tmp->d_inode)) {
|
||||
dget_locked(tmp);
|
||||
dget_locked_dlock(tmp);
|
||||
__d_drop(tmp);
|
||||
spin_unlock(&tmp->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
|
|
|
@ -456,7 +456,7 @@ static int remove_file(struct dentry *parent, char *name)
|
|||
spin_lock(&dcache_lock);
|
||||
spin_lock(&tmp->d_lock);
|
||||
if (!(d_unhashed(tmp) && tmp->d_inode)) {
|
||||
dget_locked(tmp);
|
||||
dget_locked_dlock(tmp);
|
||||
__d_drop(tmp);
|
||||
spin_unlock(&tmp->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
|
|
|
@ -198,7 +198,7 @@ static int autofs4_tree_busy(struct vfsmount *mnt,
|
|||
else
|
||||
ino_count++;
|
||||
|
||||
if (atomic_read(&p->d_count) > ino_count) {
|
||||
if (p->d_count > ino_count) {
|
||||
top_ino->last_used = jiffies;
|
||||
dput(p);
|
||||
return 1;
|
||||
|
@ -347,7 +347,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
|
|||
|
||||
/* Path walk currently on this dentry? */
|
||||
ino_count = atomic_read(&ino->count) + 2;
|
||||
if (atomic_read(&dentry->d_count) > ino_count)
|
||||
if (dentry->d_count > ino_count)
|
||||
goto next;
|
||||
|
||||
/* Can we umount this guy */
|
||||
|
@ -369,7 +369,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
|
|||
if (!exp_leaves) {
|
||||
/* Path walk currently on this dentry? */
|
||||
ino_count = atomic_read(&ino->count) + 1;
|
||||
if (atomic_read(&dentry->d_count) > ino_count)
|
||||
if (dentry->d_count > ino_count)
|
||||
goto next;
|
||||
|
||||
if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) {
|
||||
|
@ -383,7 +383,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
|
|||
} else {
|
||||
/* Path walk currently on this dentry? */
|
||||
ino_count = atomic_read(&ino->count) + 1;
|
||||
if (atomic_read(&dentry->d_count) > ino_count)
|
||||
if (dentry->d_count > ino_count)
|
||||
goto next;
|
||||
|
||||
expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
|
||||
|
|
|
@ -436,7 +436,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
|
|||
spin_lock(&active->d_lock);
|
||||
|
||||
/* Already gone? */
|
||||
if (atomic_read(&active->d_count) == 0)
|
||||
if (active->d_count == 0)
|
||||
goto next;
|
||||
|
||||
qstr = &active->d_name;
|
||||
|
@ -452,7 +452,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
|
|||
goto next;
|
||||
|
||||
if (d_unhashed(active)) {
|
||||
dget(active);
|
||||
dget_dlock(active);
|
||||
spin_unlock(&active->d_lock);
|
||||
spin_unlock(&sbi->lookup_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
|
@ -507,7 +507,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
|
|||
goto next;
|
||||
|
||||
if (d_unhashed(expiring)) {
|
||||
dget(expiring);
|
||||
dget_dlock(expiring);
|
||||
spin_unlock(&expiring->d_lock);
|
||||
spin_unlock(&sbi->lookup_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
|
|
|
@ -150,7 +150,9 @@ static int __dcache_readdir(struct file *filp,
|
|||
di = ceph_dentry(dentry);
|
||||
}
|
||||
|
||||
atomic_inc(&dentry->d_count);
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_count++;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
|
||||
dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
|
||||
|
|
|
@ -879,8 +879,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
|
|||
} else if (realdn) {
|
||||
dout("dn %p (%d) spliced with %p (%d) "
|
||||
"inode %p ino %llx.%llx\n",
|
||||
dn, atomic_read(&dn->d_count),
|
||||
realdn, atomic_read(&realdn->d_count),
|
||||
dn, dn->d_count,
|
||||
realdn, realdn->d_count,
|
||||
realdn->d_inode, ceph_vinop(realdn->d_inode));
|
||||
dput(dn);
|
||||
dn = realdn;
|
||||
|
|
|
@ -1486,7 +1486,7 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
|
|||
*base = ceph_ino(temp->d_inode);
|
||||
*plen = len;
|
||||
dout("build_path on %p %d built %llx '%.*s'\n",
|
||||
dentry, atomic_read(&dentry->d_count), *base, len, path);
|
||||
dentry, dentry->d_count, *base, len, path);
|
||||
return path;
|
||||
}
|
||||
|
||||
|
|
|
@ -559,7 +559,7 @@ static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd)
|
|||
if (cii->c_flags & C_FLUSH)
|
||||
coda_flag_inode_children(inode, C_FLUSH);
|
||||
|
||||
if (atomic_read(&de->d_count) > 1)
|
||||
if (de->d_count > 1)
|
||||
/* pretend it's valid, but don't change the flags */
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -394,8 +394,7 @@ static void remove_dir(struct dentry * d)
|
|||
if (d->d_inode)
|
||||
simple_rmdir(parent->d_inode,d);
|
||||
|
||||
pr_debug(" o %s removing done (%d)\n",d->d_name.name,
|
||||
atomic_read(&d->d_count));
|
||||
pr_debug(" o %s removing done (%d)\n",d->d_name.name, d->d_count);
|
||||
|
||||
dput(parent);
|
||||
}
|
||||
|
|
|
@ -253,7 +253,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
|
|||
spin_lock(&dcache_lock);
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (!(d_unhashed(dentry) && dentry->d_inode)) {
|
||||
dget_locked(dentry);
|
||||
dget_locked_dlock(dentry);
|
||||
__d_drop(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
|
|
106
fs/dcache.c
106
fs/dcache.c
|
@ -45,6 +45,7 @@
|
|||
* - d_flags
|
||||
* - d_name
|
||||
* - d_lru
|
||||
* - d_count
|
||||
*
|
||||
* Ordering:
|
||||
* dcache_lock
|
||||
|
@ -125,6 +126,7 @@ static void __d_free(struct rcu_head *head)
|
|||
*/
|
||||
static void d_free(struct dentry *dentry)
|
||||
{
|
||||
BUG_ON(dentry->d_count);
|
||||
this_cpu_dec(nr_dentry);
|
||||
if (dentry->d_op && dentry->d_op->d_release)
|
||||
dentry->d_op->d_release(dentry);
|
||||
|
@ -222,8 +224,11 @@ static struct dentry *d_kill(struct dentry *dentry)
|
|||
struct dentry *parent;
|
||||
|
||||
list_del(&dentry->d_u.d_child);
|
||||
/*drops the locks, at that point nobody can reach this dentry */
|
||||
dentry_iput(dentry);
|
||||
/*
|
||||
* dentry_iput drops the locks, at which point nobody (except
|
||||
* transient RCU lookups) can reach this dentry.
|
||||
*/
|
||||
if (IS_ROOT(dentry))
|
||||
parent = NULL;
|
||||
else
|
||||
|
@ -303,13 +308,23 @@ void dput(struct dentry *dentry)
|
|||
return;
|
||||
|
||||
repeat:
|
||||
if (atomic_read(&dentry->d_count) == 1)
|
||||
if (dentry->d_count == 1)
|
||||
might_sleep();
|
||||
if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
|
||||
return;
|
||||
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (atomic_read(&dentry->d_count)) {
|
||||
if (dentry->d_count == 1) {
|
||||
if (!spin_trylock(&dcache_lock)) {
|
||||
/*
|
||||
* Something of a livelock possibility we could avoid
|
||||
* by taking dcache_lock and trying again, but we
|
||||
* want to reduce dcache_lock anyway so this will
|
||||
* get improved.
|
||||
*/
|
||||
spin_unlock(&dentry->d_lock);
|
||||
goto repeat;
|
||||
}
|
||||
}
|
||||
dentry->d_count--;
|
||||
if (dentry->d_count) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
return;
|
||||
|
@ -389,7 +404,7 @@ int d_invalidate(struct dentry * dentry)
|
|||
* working directory or similar).
|
||||
*/
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (atomic_read(&dentry->d_count) > 1) {
|
||||
if (dentry->d_count > 1) {
|
||||
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
|
@ -404,29 +419,61 @@ int d_invalidate(struct dentry * dentry)
|
|||
}
|
||||
EXPORT_SYMBOL(d_invalidate);
|
||||
|
||||
/* This should be called _only_ with dcache_lock held */
|
||||
/* This must be called with dcache_lock and d_lock held */
|
||||
static inline struct dentry * __dget_locked_dlock(struct dentry *dentry)
|
||||
{
|
||||
atomic_inc(&dentry->d_count);
|
||||
dentry->d_count++;
|
||||
dentry_lru_del(dentry);
|
||||
return dentry;
|
||||
}
|
||||
|
||||
/* This should be called _only_ with dcache_lock held */
|
||||
static inline struct dentry * __dget_locked(struct dentry *dentry)
|
||||
{
|
||||
atomic_inc(&dentry->d_count);
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry_lru_del(dentry);
|
||||
__dget_locked_dlock(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
return dentry;
|
||||
}
|
||||
|
||||
struct dentry * dget_locked_dlock(struct dentry *dentry)
|
||||
{
|
||||
return __dget_locked_dlock(dentry);
|
||||
}
|
||||
|
||||
struct dentry * dget_locked(struct dentry *dentry)
|
||||
{
|
||||
return __dget_locked(dentry);
|
||||
}
|
||||
EXPORT_SYMBOL(dget_locked);
|
||||
|
||||
struct dentry *dget_parent(struct dentry *dentry)
|
||||
{
|
||||
struct dentry *ret;
|
||||
|
||||
repeat:
|
||||
spin_lock(&dentry->d_lock);
|
||||
ret = dentry->d_parent;
|
||||
if (!ret)
|
||||
goto out;
|
||||
if (dentry == ret) {
|
||||
ret->d_count++;
|
||||
goto out;
|
||||
}
|
||||
if (!spin_trylock(&ret->d_lock)) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
cpu_relax();
|
||||
goto repeat;
|
||||
}
|
||||
BUG_ON(!ret->d_count);
|
||||
ret->d_count++;
|
||||
spin_unlock(&ret->d_lock);
|
||||
out:
|
||||
spin_unlock(&dentry->d_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dget_parent);
|
||||
|
||||
/**
|
||||
* d_find_alias - grab a hashed alias of inode
|
||||
* @inode: inode in question
|
||||
|
@ -495,7 +542,7 @@ void d_prune_aliases(struct inode *inode)
|
|||
spin_lock(&dcache_lock);
|
||||
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (!atomic_read(&dentry->d_count)) {
|
||||
if (!dentry->d_count) {
|
||||
__dget_locked_dlock(dentry);
|
||||
__d_drop(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
|
@ -530,7 +577,10 @@ static void prune_one_dentry(struct dentry * dentry)
|
|||
*/
|
||||
while (dentry) {
|
||||
spin_lock(&dcache_lock);
|
||||
if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_count--;
|
||||
if (dentry->d_count) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
return;
|
||||
}
|
||||
|
@ -562,7 +612,7 @@ static void shrink_dentry_list(struct list_head *list)
|
|||
* the LRU because of laziness during lookup. Do not free
|
||||
* it - just keep it off the LRU list.
|
||||
*/
|
||||
if (atomic_read(&dentry->d_count)) {
|
||||
if (dentry->d_count) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
continue;
|
||||
}
|
||||
|
@ -783,7 +833,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|||
do {
|
||||
struct inode *inode;
|
||||
|
||||
if (atomic_read(&dentry->d_count) != 0) {
|
||||
if (dentry->d_count != 0) {
|
||||
printk(KERN_ERR
|
||||
"BUG: Dentry %p{i=%lx,n=%s}"
|
||||
" still in use (%d)"
|
||||
|
@ -792,7 +842,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|||
dentry->d_inode ?
|
||||
dentry->d_inode->i_ino : 0UL,
|
||||
dentry->d_name.name,
|
||||
atomic_read(&dentry->d_count),
|
||||
dentry->d_count,
|
||||
dentry->d_sb->s_type->name,
|
||||
dentry->d_sb->s_id);
|
||||
BUG();
|
||||
|
@ -802,7 +852,9 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|||
parent = NULL;
|
||||
else {
|
||||
parent = dentry->d_parent;
|
||||
atomic_dec(&parent->d_count);
|
||||
spin_lock(&parent->d_lock);
|
||||
parent->d_count--;
|
||||
spin_unlock(&parent->d_lock);
|
||||
}
|
||||
|
||||
list_del(&dentry->d_u.d_child);
|
||||
|
@ -853,7 +905,9 @@ void shrink_dcache_for_umount(struct super_block *sb)
|
|||
|
||||
dentry = sb->s_root;
|
||||
sb->s_root = NULL;
|
||||
atomic_dec(&dentry->d_count);
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_count--;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
shrink_dcache_for_umount_subtree(dentry);
|
||||
|
||||
while (!hlist_empty(&sb->s_anon)) {
|
||||
|
@ -950,7 +1004,7 @@ static int select_parent(struct dentry * parent)
|
|||
* move only zero ref count dentries to the end
|
||||
* of the unused list for prune_dcache
|
||||
*/
|
||||
if (!atomic_read(&dentry->d_count)) {
|
||||
if (!dentry->d_count) {
|
||||
dentry_lru_move_tail(dentry);
|
||||
found++;
|
||||
} else {
|
||||
|
@ -1068,7 +1122,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
|
|||
memcpy(dname, name->name, name->len);
|
||||
dname[name->len] = 0;
|
||||
|
||||
atomic_set(&dentry->d_count, 1);
|
||||
dentry->d_count = 1;
|
||||
dentry->d_flags = DCACHE_UNHASHED;
|
||||
spin_lock_init(&dentry->d_lock);
|
||||
dentry->d_inode = NULL;
|
||||
|
@ -1556,7 +1610,7 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
|
|||
goto next;
|
||||
}
|
||||
|
||||
atomic_inc(&dentry->d_count);
|
||||
dentry->d_count++;
|
||||
found = dentry;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
break;
|
||||
|
@ -1653,7 +1707,7 @@ void d_delete(struct dentry * dentry)
|
|||
spin_lock(&dcache_lock);
|
||||
spin_lock(&dentry->d_lock);
|
||||
isdir = S_ISDIR(dentry->d_inode->i_mode);
|
||||
if (atomic_read(&dentry->d_count) == 1) {
|
||||
if (dentry->d_count == 1) {
|
||||
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
|
||||
dentry_iput(dentry);
|
||||
fsnotify_nameremove(dentry, isdir);
|
||||
|
@ -2494,11 +2548,15 @@ void d_genocide(struct dentry *root)
|
|||
this_parent = dentry;
|
||||
goto repeat;
|
||||
}
|
||||
atomic_dec(&dentry->d_count);
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_count--;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
if (this_parent != root) {
|
||||
next = this_parent->d_u.d_child.next;
|
||||
atomic_dec(&this_parent->d_count);
|
||||
spin_lock(&this_parent->d_lock);
|
||||
this_parent->d_count--;
|
||||
spin_unlock(&this_parent->d_lock);
|
||||
this_parent = this_parent->d_parent;
|
||||
goto resume;
|
||||
}
|
||||
|
|
|
@ -260,7 +260,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
|
|||
ecryptfs_dentry->d_parent));
|
||||
lower_inode = lower_dentry->d_inode;
|
||||
fsstack_copy_attr_atime(ecryptfs_dir_inode, lower_dir_dentry->d_inode);
|
||||
BUG_ON(!atomic_read(&lower_dentry->d_count));
|
||||
BUG_ON(!lower_dentry->d_count);
|
||||
ecryptfs_set_dentry_private(ecryptfs_dentry,
|
||||
kmem_cache_alloc(ecryptfs_dentry_info_cache,
|
||||
GFP_KERNEL));
|
||||
|
|
|
@ -1389,7 +1389,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
|
|||
if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
|
||||
goto out;
|
||||
if ((arg == F_WRLCK)
|
||||
&& ((atomic_read(&dentry->d_count) > 1)
|
||||
&& ((dentry->d_count > 1)
|
||||
|| (atomic_read(&inode->i_count) > 1)))
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -2133,7 +2133,7 @@ void dentry_unhash(struct dentry *dentry)
|
|||
shrink_dcache_parent(dentry);
|
||||
spin_lock(&dcache_lock);
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (atomic_read(&dentry->d_count) == 2)
|
||||
if (dentry->d_count == 2)
|
||||
__d_drop(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
|
|
|
@ -1720,7 +1720,7 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
|
||||
spin_lock(&dcache_lock);
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (atomic_read(&dentry->d_count) > 1) {
|
||||
if (dentry->d_count > 1) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&dcache_lock);
|
||||
/* Start asynchronous writeout of the inode */
|
||||
|
@ -1868,7 +1868,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
|
||||
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
|
||||
new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
|
||||
atomic_read(&new_dentry->d_count));
|
||||
new_dentry->d_count);
|
||||
|
||||
/*
|
||||
* For non-directories, check whether the target is busy and if so,
|
||||
|
@ -1886,7 +1886,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
rehash = new_dentry;
|
||||
}
|
||||
|
||||
if (atomic_read(&new_dentry->d_count) > 2) {
|
||||
if (new_dentry->d_count > 2) {
|
||||
int err;
|
||||
|
||||
/* copy the target dentry's name */
|
||||
|
|
|
@ -496,7 +496,7 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
|
|||
|
||||
dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
|
||||
dentry->d_parent->d_name.name, dentry->d_name.name,
|
||||
atomic_read(&dentry->d_count));
|
||||
dentry->d_count);
|
||||
nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
|
||||
|
||||
/*
|
||||
|
|
|
@ -1756,8 +1756,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
|
|||
goto out_dput_new;
|
||||
|
||||
if (svc_msnfs(ffhp) &&
|
||||
((atomic_read(&odentry->d_count) > 1)
|
||||
|| (atomic_read(&ndentry->d_count) > 1))) {
|
||||
((odentry->d_count > 1) || (ndentry->d_count > 1))) {
|
||||
host_err = -EPERM;
|
||||
goto out_dput_new;
|
||||
}
|
||||
|
@ -1843,7 +1842,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
|
|||
if (type != S_IFDIR) { /* It's UNLINK */
|
||||
#ifdef MSNFS
|
||||
if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
|
||||
(atomic_read(&rdentry->d_count) > 1)) {
|
||||
(rdentry->d_count > 1)) {
|
||||
host_err = -EPERM;
|
||||
} else
|
||||
#endif
|
||||
|
|
|
@ -838,7 +838,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
|
|||
|
||||
static int nilfs_tree_was_touched(struct dentry *root_dentry)
|
||||
{
|
||||
return atomic_read(&root_dentry->d_count) > 1;
|
||||
return root_dentry->d_count > 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -87,7 +87,7 @@ full_name_hash(const unsigned char *name, unsigned int len)
|
|||
#endif
|
||||
|
||||
struct dentry {
|
||||
atomic_t d_count;
|
||||
unsigned int d_count; /* protected by d_lock */
|
||||
unsigned int d_flags; /* protected by d_lock */
|
||||
spinlock_t d_lock; /* per dentry lock */
|
||||
int d_mounted;
|
||||
|
@ -297,17 +297,28 @@ extern char *dentry_path(struct dentry *, char *, int);
|
|||
* needs and they take necessary precautions) you should hold dcache_lock
|
||||
* and call dget_locked() instead of dget().
|
||||
*/
|
||||
|
||||
static inline struct dentry *dget_dlock(struct dentry *dentry)
|
||||
{
|
||||
if (dentry) {
|
||||
BUG_ON(!dentry->d_count);
|
||||
dentry->d_count++;
|
||||
}
|
||||
return dentry;
|
||||
}
|
||||
static inline struct dentry *dget(struct dentry *dentry)
|
||||
{
|
||||
if (dentry) {
|
||||
BUG_ON(!atomic_read(&dentry->d_count));
|
||||
atomic_inc(&dentry->d_count);
|
||||
spin_lock(&dentry->d_lock);
|
||||
dget_dlock(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
return dentry;
|
||||
}
|
||||
|
||||
extern struct dentry * dget_locked(struct dentry *);
|
||||
extern struct dentry * dget_locked_dlock(struct dentry *);
|
||||
|
||||
extern struct dentry *dget_parent(struct dentry *dentry);
|
||||
|
||||
/**
|
||||
* d_unhashed - is dentry hashed
|
||||
|
@ -338,16 +349,6 @@ static inline void dont_mount(struct dentry *dentry)
|
|||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
|
||||
static inline struct dentry *dget_parent(struct dentry *dentry)
|
||||
{
|
||||
struct dentry *ret;
|
||||
|
||||
spin_lock(&dentry->d_lock);
|
||||
ret = dget(dentry->d_parent);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern void dput(struct dentry *);
|
||||
|
||||
static inline int d_mountpoint(struct dentry *dentry)
|
||||
|
|
|
@ -3655,9 +3655,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
|||
list_del(&cgrp->sibling);
|
||||
cgroup_unlock_hierarchy(cgrp->root);
|
||||
|
||||
spin_lock(&cgrp->dentry->d_lock);
|
||||
d = dget(cgrp->dentry);
|
||||
spin_unlock(&d->d_lock);
|
||||
|
||||
cgroup_d_remove_dir(d);
|
||||
dput(d);
|
||||
|
|
Loading…
Reference in New Issue