linux_old1/include/linux/dcache.h

384 lines
11 KiB
C
Raw Normal View History

#ifndef __LINUX_DCACHE_H
#define __LINUX_DCACHE_H
#include <asm/atomic.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/rcupdate.h>
struct nameidata;
struct path;
struct vfsmount;
/*
* linux/include/linux/dcache.h
*
* Dirent cache data structures
*
* (C) Copyright 1997 Thomas Schoebel-Theuer,
* with heavy changes by Linus Torvalds
*/
#define IS_ROOT(x) ((x) == (x)->d_parent)
/*
* "quick string" -- eases parameter passing, but more importantly
* saves "metadata" about the string (ie length and the hash).
*
* hash comes first so it snuggles against d_parent in the
* dentry.
*/
struct qstr {
unsigned int hash;
unsigned int len;
const unsigned char *name;
};
struct dentry_stat_t {
int nr_dentry;
int nr_unused;
int age_limit; /* age in seconds */
int want_pages; /* pages requested by system */
int dummy[2];
};
extern struct dentry_stat_t dentry_stat;
/* Name hashing routines. Initial hash value */
/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
#define init_name_hash() 0
/* partial hash update function. Assume roughly 4 bits per character */
static inline unsigned long
partial_name_hash(unsigned long c, unsigned long prevhash)
{
return (prevhash + (c << 4) + (c >> 4)) * 11;
}
/*
* Finally: cut down the number of bits to a int value (and try to avoid
* losing bits)
*/
static inline unsigned long end_name_hash(unsigned long hash)
{
return (unsigned int) hash;
}
/* Compute the hash for a name string. */
static inline unsigned int
full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long hash = init_name_hash();
while (len--)
hash = partial_name_hash(*name++, hash);
return end_name_hash(hash);
}
shrink struct dentry struct dentry is one of the most critical structures in the kernel. So it's sad to see it going neglected. With CONFIG_PROFILING turned on (which is probably the common case at least for distros and kernel developers), sizeof(struct dcache) == 208 here (64-bit). This gives 19 objects per slab. I packed d_mounted into a hole, and took another 4 bytes off the inline name length to take the padding out from the end of the structure. This shinks it to 200 bytes. I could have gone the other way and increased the length to 40, but I'm aiming for a magic number, read on... I then got rid of the d_cookie pointer. This shrinks it to 192 bytes. Rant: why was this ever a good idea? The cookie system should increase its hash size or use a tree or something if lookups are a problem. Also the "fast dcookie lookups" in oprofile should be moved into the dcookie code -- how can oprofile possibly care about the dcookie_mutex? It gets dropped after get_dcookie() returns so it can't be providing any sort of protection. At 192 bytes, 21 objects fit into a 4K page, saving about 3MB on my system with ~140 000 entries allocated. 192 is also a multiple of 64, so we get nice cacheline alignment on 64 and 32 byte line systems -- any given dentry will now require 3 cachelines to touch all fields wheras previously it would require 4. I know the inline name size was chosen quite carefully, however with the reduction in cacheline footprint, it should actually be just about as fast to do a name lookup for a 36 character name as it was before the patch (and faster for other sizes). The memory footprint savings for names which are <= 32 or > 36 bytes long should more than make up for the memory cost for 33-36 byte names. Performance is a feature... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2008-12-01 16:33:43 +08:00
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
* give reasonable cacheline footprint with larger lines without the
* large memory footprint increase).
*/
#ifdef CONFIG_64BIT
#define DNAME_INLINE_LEN_MIN 32 /* 192 bytes */
#else
#define DNAME_INLINE_LEN_MIN 40 /* 128 bytes */
#endif
struct dentry {
atomic_t d_count;
unsigned int d_flags; /* protected by d_lock */
spinlock_t d_lock; /* per dentry lock */
shrink struct dentry struct dentry is one of the most critical structures in the kernel. So it's sad to see it going neglected. With CONFIG_PROFILING turned on (which is probably the common case at least for distros and kernel developers), sizeof(struct dcache) == 208 here (64-bit). This gives 19 objects per slab. I packed d_mounted into a hole, and took another 4 bytes off the inline name length to take the padding out from the end of the structure. This shinks it to 200 bytes. I could have gone the other way and increased the length to 40, but I'm aiming for a magic number, read on... I then got rid of the d_cookie pointer. This shrinks it to 192 bytes. Rant: why was this ever a good idea? The cookie system should increase its hash size or use a tree or something if lookups are a problem. Also the "fast dcookie lookups" in oprofile should be moved into the dcookie code -- how can oprofile possibly care about the dcookie_mutex? It gets dropped after get_dcookie() returns so it can't be providing any sort of protection. At 192 bytes, 21 objects fit into a 4K page, saving about 3MB on my system with ~140 000 entries allocated. 192 is also a multiple of 64, so we get nice cacheline alignment on 64 and 32 byte line systems -- any given dentry will now require 3 cachelines to touch all fields wheras previously it would require 4. I know the inline name size was chosen quite carefully, however with the reduction in cacheline footprint, it should actually be just about as fast to do a name lookup for a 36 character name as it was before the patch (and faster for other sizes). The memory footprint savings for names which are <= 32 or > 36 bytes long should more than make up for the memory cost for 33-36 byte names. Performance is a feature... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2008-12-01 16:33:43 +08:00
int d_mounted;
struct inode *d_inode; /* Where the name belongs to - NULL is
* negative */
/*
* The next three fields are touched by __d_lookup. Place them here
* so they all fit in a cache line.
*/
struct hlist_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
struct qstr d_name;
struct list_head d_lru; /* LRU list */
[PATCH] shrink dentry struct Some long time ago, dentry struct was carefully tuned so that on 32 bits UP, sizeof(struct dentry) was exactly 128, ie a power of 2, and a multiple of memory cache lines. Then RCU was added and dentry struct enlarged by two pointers, with nice results for SMP, but not so good on UP, because breaking the above tuning (128 + 8 = 136 bytes) This patch reverts this unwanted side effect, by using an union (d_u), where d_rcu and d_child are placed so that these two fields can share their memory needs. At the time d_free() is called (and d_rcu is really used), d_child is known to be empty and not touched by the dentry freeing. Lockless lookups only access d_name, d_parent, d_lock, d_op, d_flags (so the previous content of d_child is not needed if said dentry was unhashed but still accessed by a CPU because of RCU constraints) As dentry cache easily contains millions of entries, a size reduction is worth the extra complexity of the ugly C union. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Cc: Dipankar Sarma <dipankar@in.ibm.com> Cc: Maneesh Soni <maneesh@in.ibm.com> Cc: Miklos Szeredi <miklos@szeredi.hu> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Cc: Ian Kent <raven@themaw.net> Cc: Paul Jackson <pj@sgi.com> Cc: Al Viro <viro@ftp.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: Neil Brown <neilb@cse.unsw.edu.au> Cc: James Morris <jmorris@namei.org> Cc: Stephen Smalley <sds@epoch.ncsc.mil> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-08 17:03:32 +08:00
/*
* d_child and d_rcu can share memory
*/
union {
struct list_head d_child; /* child of parent list */
struct rcu_head d_rcu;
} d_u;
struct list_head d_subdirs; /* our children */
struct list_head d_alias; /* inode alias list */
unsigned long d_time; /* used by d_revalidate */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
void *d_fsdata; /* fs-specific data */
shrink struct dentry struct dentry is one of the most critical structures in the kernel. So it's sad to see it going neglected. With CONFIG_PROFILING turned on (which is probably the common case at least for distros and kernel developers), sizeof(struct dcache) == 208 here (64-bit). This gives 19 objects per slab. I packed d_mounted into a hole, and took another 4 bytes off the inline name length to take the padding out from the end of the structure. This shinks it to 200 bytes. I could have gone the other way and increased the length to 40, but I'm aiming for a magic number, read on... I then got rid of the d_cookie pointer. This shrinks it to 192 bytes. Rant: why was this ever a good idea? The cookie system should increase its hash size or use a tree or something if lookups are a problem. Also the "fast dcookie lookups" in oprofile should be moved into the dcookie code -- how can oprofile possibly care about the dcookie_mutex? It gets dropped after get_dcookie() returns so it can't be providing any sort of protection. At 192 bytes, 21 objects fit into a 4K page, saving about 3MB on my system with ~140 000 entries allocated. 192 is also a multiple of 64, so we get nice cacheline alignment on 64 and 32 byte line systems -- any given dentry will now require 3 cachelines to touch all fields wheras previously it would require 4. I know the inline name size was chosen quite carefully, however with the reduction in cacheline footprint, it should actually be just about as fast to do a name lookup for a 36 character name as it was before the patch (and faster for other sizes). The memory footprint savings for names which are <= 32 or > 36 bytes long should more than make up for the memory cost for 33-36 byte names. Performance is a feature... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2008-12-01 16:33:43 +08:00
unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
};
/*
* dentry->d_lock spinlock nesting subclasses:
*
* 0: normal
* 1: nested
*/
enum dentry_d_lock_class
{
DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */
DENTRY_D_LOCK_NESTED
};
struct dentry_operations {
int (*d_revalidate)(struct dentry *, struct nameidata *);
int (*d_hash) (struct dentry *, struct qstr *);
int (*d_compare) (struct dentry *, struct qstr *, struct qstr *);
int (*d_delete)(struct dentry *);
void (*d_release)(struct dentry *);
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)(struct dentry *, char *, int);
};
/* the dentry parameter passed to d_hash and d_compare is the parent
* directory of the entries to be compared. It is used in case these
* functions need any directory specific information for determining
* equivalency classes. Using the dentry itself might not work, as it
* might be a negative dentry which has no information associated with
* it */
/*
locking rules:
big lock dcache_lock d_lock may block
d_revalidate: no no no yes
d_hash no no no yes
d_compare: no yes yes no
d_delete: no yes no no
d_release: no no no yes
d_iput: no no no yes
*/
/* d_flags entries */
#define DCACHE_AUTOFS_PENDING 0x0001 /* autofs: "under construction" */
#define DCACHE_NFSFS_RENAMED 0x0002 /* this dentry has been "silly
* renamed" and has to be
* deleted on the last dput()
*/
#define DCACHE_DISCONNECTED 0x0004
/* This dentry is possibly not currently connected to the dcache tree,
* in which case its parent will either be itself, or will have this
* flag as well. nfsd will not use a dentry with this bit set, but will
* first endeavour to clear the bit either by discovering that it is
* connected, or by performing lookup operations. Any filesystem which
* supports nfsd_operations MUST have a lookup function which, if it finds
* a directory inode with a DCACHE_DISCONNECTED dentry, will d_move
* that dentry into place and return that dentry rather than the passed one,
* typically using d_splice_alias.
*/
#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
#define DCACHE_UNHASHED 0x0010
#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched by inotify */
shrink struct dentry struct dentry is one of the most critical structures in the kernel. So it's sad to see it going neglected. With CONFIG_PROFILING turned on (which is probably the common case at least for distros and kernel developers), sizeof(struct dcache) == 208 here (64-bit). This gives 19 objects per slab. I packed d_mounted into a hole, and took another 4 bytes off the inline name length to take the padding out from the end of the structure. This shinks it to 200 bytes. I could have gone the other way and increased the length to 40, but I'm aiming for a magic number, read on... I then got rid of the d_cookie pointer. This shrinks it to 192 bytes. Rant: why was this ever a good idea? The cookie system should increase its hash size or use a tree or something if lookups are a problem. Also the "fast dcookie lookups" in oprofile should be moved into the dcookie code -- how can oprofile possibly care about the dcookie_mutex? It gets dropped after get_dcookie() returns so it can't be providing any sort of protection. At 192 bytes, 21 objects fit into a 4K page, saving about 3MB on my system with ~140 000 entries allocated. 192 is also a multiple of 64, so we get nice cacheline alignment on 64 and 32 byte line systems -- any given dentry will now require 3 cachelines to touch all fields wheras previously it would require 4. I know the inline name size was chosen quite carefully, however with the reduction in cacheline footprint, it should actually be just about as fast to do a name lookup for a 36 character name as it was before the patch (and faster for other sizes). The memory footprint savings for names which are <= 32 or > 36 bytes long should more than make up for the memory cost for 33-36 byte names. Performance is a feature... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2008-12-01 16:33:43 +08:00
#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */
#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */
extern spinlock_t dcache_lock;
extern seqlock_t rename_lock;
/**
* d_drop - drop a dentry
* @dentry: dentry to drop
*
* d_drop() unhashes the entry from the parent dentry hashes, so that it won't
* be found through a VFS lookup any more. Note that this is different from
* deleting the dentry - d_delete will try to mark the dentry negative if
* possible, giving a successful _negative_ lookup, while d_drop will
* just make the cache lookup fail.
*
* d_drop() is used mainly for stuff that wants to invalidate a dentry for some
* reason (NFS timeouts or autofs deletes).
*
* __d_drop requires dentry->d_lock.
*/
static inline void __d_drop(struct dentry *dentry)
{
if (!(dentry->d_flags & DCACHE_UNHASHED)) {
dentry->d_flags |= DCACHE_UNHASHED;
hlist_del_rcu(&dentry->d_hash);
}
}
static inline void d_drop(struct dentry *dentry)
{
spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
}
static inline int dname_external(struct dentry *dentry)
{
return dentry->d_name.name != dentry->d_iname;
}
/*
* These are the low-level FS interfaces to the dcache..
*/
extern void d_instantiate(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
extern void d_delete(struct dentry *);
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern struct dentry * d_obtain_alias(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
[PATCH] VFS: Destroy the dentries contributed by a superblock on unmounting The attached patch destroys all the dentries attached to a superblock in one go by: (1) Destroying the tree rooted at s_root. (2) Destroying every entry in the anon list, one at a time. (3) Each entry in the anon list has its subtree consumed from the leaves inwards. This reduces the amount of work generic_shutdown_super() does, and avoids iterating through the dentry_unused list. Note that locking is almost entirely absent in the shrink_dcache_for_umount*() functions added by this patch. This is because: (1) at the point the filesystem calls generic_shutdown_super(), it is not permitted to further touch the superblock's set of dentries, and nor may it remove aliases from inodes; (2) the dcache memory shrinker now skips dentries that are being unmounted; and (3) the superblock no longer has any external references through which the VFS can reach it. Given these points, the only locking we need to do is when we remove dentries from the unused list and the name hashes, which we do a directory's worth at a time. We also don't need to guard against reference counts going to zero unexpectedly and removing bits of the tree we're working on as nothing else can call dput(). A cut down version of dentry_iput() has been folded into shrink_dcache_for_umount_subtree() function. Apart from not needing to unlock things, it also doesn't need to check for inotify watches. In this version of the patch, the complaint about a dentry still being in use has been expanded from a single BUG_ON() and now gives much more information. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: NeilBrown <neilb@suse.de> Acked-by: Ian Kent <raven@themaw.net> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-11 16:22:19 +08:00
extern void shrink_dcache_for_umount(struct super_block *);
extern int d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_alloc_root(struct inode *);
/* <clickety>-<click> the ramfs-type tree */
extern void d_genocide(struct dentry *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
/* test whether we have any submounts in a subdir tree */
extern int have_submounts(struct dentry *);
/*
* This adds the entry to the hash queues.
*/
extern void d_rehash(struct dentry *);
/**
* d_add - add dentry to hash queues
* @entry: dentry to add
* @inode: The inode to attach to this dentry
*
* This adds the entry to the hash queues and initializes @inode.
* The entry was actually filled in earlier during d_alloc().
*/
static inline void d_add(struct dentry *entry, struct inode *inode)
{
d_instantiate(entry, inode);
d_rehash(entry);
}
/**
* d_add_unique - add dentry to hash queues without aliasing
* @entry: dentry to add
* @inode: The inode to attach to this dentry
*
* This adds the entry to the hash queues and initializes @inode.
* The entry was actually filled in earlier during d_alloc().
*/
static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
{
struct dentry *res;
res = d_instantiate_unique(entry, inode);
d_rehash(res != NULL ? res : entry);
return res;
}
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
/* appendix may either be NULL or be used for transname suffixes */
extern struct dentry * d_lookup(struct dentry *, struct qstr *);
extern struct dentry * __d_lookup(struct dentry *, struct qstr *);
extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *);
/* validate "insecure" dentry pointer */
extern int d_validate(struct dentry *, struct dentry *);
/*
* helper function for dentry_operations.d_dname() members
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *__d_path(const struct path *path, struct path *root, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *dentry_path(struct dentry *, char *, int);
/* Allocation counts.. */
/**
* dget, dget_locked - get a reference to a dentry
* @dentry: dentry to get a reference to
*
* Given a dentry or %NULL pointer increment the reference count
* if appropriate and return the dentry. A dentry will not be
* destroyed when it has references. dget() should never be
* called for dentries with zero reference counter. For these cases
* (preferably none, functions in dcache.c are sufficient for normal
* needs and they take necessary precautions) you should hold dcache_lock
* and call dget_locked() instead of dget().
*/
static inline struct dentry *dget(struct dentry *dentry)
{
if (dentry) {
BUG_ON(!atomic_read(&dentry->d_count));
atomic_inc(&dentry->d_count);
}
return dentry;
}
extern struct dentry * dget_locked(struct dentry *);
/**
* d_unhashed - is dentry hashed
* @dentry: entry to check
*
* Returns true if the dentry passed is not currently hashed.
*/
static inline int d_unhashed(struct dentry *dentry)
{
return (dentry->d_flags & DCACHE_UNHASHED);
}
static inline int d_unlinked(struct dentry *dentry)
{
return d_unhashed(dentry) && !IS_ROOT(dentry);
}
static inline struct dentry *dget_parent(struct dentry *dentry)
{
struct dentry *ret;
spin_lock(&dentry->d_lock);
ret = dget(dentry->d_parent);
spin_unlock(&dentry->d_lock);
return ret;
}
extern void dput(struct dentry *);
static inline int d_mountpoint(struct dentry *dentry)
{
return dentry->d_mounted;
}
extern struct vfsmount *lookup_mnt(struct path *);
extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
extern int sysctl_vfs_cache_pressure;
#endif /* __LINUX_DCACHE_H */