[PATCH] inotify (1/5): split kernel API from userspace support

The following series of patches introduces a kernel API for inotify,
making it possible for kernel modules to benefit from inotify's
mechanism for watching inodes.  With these patches, inotify will
maintain for each caller a list of watches (via an embedded struct
inotify_watch), where each inotify_watch is associated with a
corresponding struct inode.  The caller registers an event handler and
specifies for which filesystem events their event handler should be
called per inotify_watch.

Signed-off-by: Amy Griffis <amy.griffis@hp.com>
Acked-by: Robert Love <rml@novell.com>
Acked-by: John McCutchan <john@johnmccutchan.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Amy Griffis 2006-06-01 13:10:59 -07:00 committed by Al Viro
parent 90204e0b7b
commit 2d9048e201
8 changed files with 1066 additions and 741 deletions

View File

@ -393,18 +393,30 @@ config INOTIFY
bool "Inotify file change notification support" bool "Inotify file change notification support"
default y default y
---help--- ---help---
Say Y here to enable inotify support and the associated system Say Y here to enable inotify support. Inotify is a file change
calls. Inotify is a file change notification system and a notification system and a replacement for dnotify. Inotify fixes
replacement for dnotify. Inotify fixes numerous shortcomings in numerous shortcomings in dnotify and introduces several new features
dnotify and introduces several new features. It allows monitoring including multiple file events, one-shot support, and unmount
of both files and directories via a single open fd. Other features
include multiple file events, one-shot support, and unmount
notification. notification.
For more information, see Documentation/filesystems/inotify.txt For more information, see Documentation/filesystems/inotify.txt
If unsure, say Y. If unsure, say Y.
config INOTIFY_USER
bool "Inotify support for userspace"
depends on INOTIFY
default y
---help---
Say Y here to enable inotify support for userspace, including the
associated system calls. Inotify allows monitoring of both files and
directories via a single open fd. Events are read from the file
descriptor, which is also select()- and poll()-able.
For more information, see Documentation/filesystems/inotify.txt
If unsure, say Y.
config QUOTA config QUOTA
bool "Quota support" bool "Quota support"
help help

View File

@ -13,6 +13,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \
ioprio.o pnode.o drop_caches.o splice.o sync.o ioprio.o pnode.o drop_caches.o splice.o sync.o
obj-$(CONFIG_INOTIFY) += inotify.o obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_INOTIFY_USER) += inotify_user.o
obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_EPOLL) += eventpoll.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o

File diff suppressed because it is too large Load Diff

717
fs/inotify_user.c Normal file
View File

@ -0,0 +1,717 @@
/*
* fs/inotify_user.c - inotify support for userspace
*
* Authors:
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
* Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/inotify.h>
#include <linux/syscalls.h>
#include <asm/ioctls.h>
static kmem_cache_t *watch_cachep __read_mostly;
static kmem_cache_t *event_cachep __read_mostly;
static struct vfsmount *inotify_mnt __read_mostly;
/* these are configurable via /proc/sys/fs/inotify/ */
int inotify_max_user_instances __read_mostly;
int inotify_max_user_watches __read_mostly;
int inotify_max_queued_events __read_mostly;
/*
* Lock ordering:
*
* inotify_dev->up_mutex (ensures we don't re-add the same watch)
* inode->inotify_mutex (protects inode's watch list)
* inotify_handle->mutex (protects inotify_handle's watch list)
* inotify_dev->ev_mutex (protects device's event queue)
*/
/*
* Lifetimes of the main data structures:
*
* inotify_device: Lifetime is managed by reference count, from
* sys_inotify_init() until release. Additional references can bump the count
* via get_inotify_dev() and drop the count via put_inotify_dev().
*
* inotify_user_watch: Lifetime is from create_watch() to the receipt of an
* IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the
* first event, or to inotify_destroy().
*/
/*
* struct inotify_device - represents an inotify instance
*
* This structure is protected by the mutex 'mutex'.
*/
struct inotify_device {
wait_queue_head_t wq; /* wait queue for i/o */
struct mutex ev_mutex; /* protects event queue */
struct mutex up_mutex; /* synchronizes watch updates */
struct list_head events; /* list of queued events */
atomic_t count; /* reference count */
struct user_struct *user; /* user who opened this dev */
struct inotify_handle *ih; /* inotify handle */
unsigned int queue_size; /* size of the queue (bytes) */
unsigned int event_count; /* number of pending events */
unsigned int max_events; /* maximum number of events */
};
/*
* struct inotify_kernel_event - An inotify event, originating from a watch and
* queued for user-space. A list of these is attached to each instance of the
* device. In read(), this list is walked and all events that can fit in the
* buffer are returned.
*
* Protected by dev->ev_mutex of the device in which we are queued.
*/
struct inotify_kernel_event {
struct inotify_event event; /* the user-space event */
struct list_head list; /* entry in inotify_device's list */
char *name; /* filename, if any */
};
/*
* struct inotify_user_watch - our version of an inotify_watch, we add
* a reference to the associated inotify_device.
*/
struct inotify_user_watch {
struct inotify_device *dev; /* associated device */
struct inotify_watch wdata; /* inotify watch data */
};
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
static int zero;
ctl_table inotify_table[] = {
{
.ctl_name = INOTIFY_MAX_USER_INSTANCES,
.procname = "max_user_instances",
.data = &inotify_max_user_instances,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
{
.ctl_name = INOTIFY_MAX_USER_WATCHES,
.procname = "max_user_watches",
.data = &inotify_max_user_watches,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
{
.ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
.procname = "max_queued_events",
.data = &inotify_max_queued_events,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec,
.extra1 = &zero
},
{ .ctl_name = 0 }
};
#endif /* CONFIG_SYSCTL */
static inline void get_inotify_dev(struct inotify_device *dev)
{
atomic_inc(&dev->count);
}
static inline void put_inotify_dev(struct inotify_device *dev)
{
if (atomic_dec_and_test(&dev->count)) {
atomic_dec(&dev->user->inotify_devs);
free_uid(dev->user);
kfree(dev);
}
}
/*
* free_inotify_user_watch - cleans up the watch and its references
*/
static void free_inotify_user_watch(struct inotify_watch *w)
{
struct inotify_user_watch *watch;
struct inotify_device *dev;
watch = container_of(w, struct inotify_user_watch, wdata);
dev = watch->dev;
atomic_dec(&dev->user->inotify_watches);
put_inotify_dev(dev);
kmem_cache_free(watch_cachep, watch);
}
/*
* kernel_event - create a new kernel event with the given parameters
*
* This function can sleep.
*/
static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
const char *name)
{
struct inotify_kernel_event *kevent;
kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
if (unlikely(!kevent))
return NULL;
/* we hand this out to user-space, so zero it just in case */
memset(&kevent->event, 0, sizeof(struct inotify_event));
kevent->event.wd = wd;
kevent->event.mask = mask;
kevent->event.cookie = cookie;
INIT_LIST_HEAD(&kevent->list);
if (name) {
size_t len, rem, event_size = sizeof(struct inotify_event);
/*
* We need to pad the filename so as to properly align an
* array of inotify_event structures. Because the structure is
* small and the common case is a small filename, we just round
* up to the next multiple of the structure's sizeof. This is
* simple and safe for all architectures.
*/
len = strlen(name) + 1;
rem = event_size - len;
if (len > event_size) {
rem = event_size - (len % event_size);
if (len % event_size == 0)
rem = 0;
}
kevent->name = kmalloc(len + rem, GFP_KERNEL);
if (unlikely(!kevent->name)) {
kmem_cache_free(event_cachep, kevent);
return NULL;
}
memcpy(kevent->name, name, len);
if (rem)
memset(kevent->name + len, 0, rem);
kevent->event.len = len + rem;
} else {
kevent->event.len = 0;
kevent->name = NULL;
}
return kevent;
}
/*
* inotify_dev_get_event - return the next event in the given dev's queue
*
* Caller must hold dev->ev_mutex.
*/
static inline struct inotify_kernel_event *
inotify_dev_get_event(struct inotify_device *dev)
{
return list_entry(dev->events.next, struct inotify_kernel_event, list);
}
/*
* inotify_dev_queue_event - event handler registered with core inotify, adds
* a new event to the given device
*
* Can sleep (calls kernel_event()).
*/
static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
u32 cookie, const char *name)
{
struct inotify_user_watch *watch;
struct inotify_device *dev;
struct inotify_kernel_event *kevent, *last;
watch = container_of(w, struct inotify_user_watch, wdata);
dev = watch->dev;
mutex_lock(&dev->ev_mutex);
/* we can safely put the watch as we don't reference it while
* generating the event
*/
if (mask & IN_IGNORED || mask & IN_ONESHOT)
put_inotify_watch(w); /* final put */
/* coalescing: drop this event if it is a dupe of the previous */
last = inotify_dev_get_event(dev);
if (last && last->event.mask == mask && last->event.wd == wd &&
last->event.cookie == cookie) {
const char *lastname = last->name;
if (!name && !lastname)
goto out;
if (name && lastname && !strcmp(lastname, name))
goto out;
}
/* the queue overflowed and we already sent the Q_OVERFLOW event */
if (unlikely(dev->event_count > dev->max_events))
goto out;
/* if the queue overflows, we need to notify user space */
if (unlikely(dev->event_count == dev->max_events))
kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
else
kevent = kernel_event(wd, mask, cookie, name);
if (unlikely(!kevent))
goto out;
/* queue the event and wake up anyone waiting */
dev->event_count++;
dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
list_add_tail(&kevent->list, &dev->events);
wake_up_interruptible(&dev->wq);
out:
mutex_unlock(&dev->ev_mutex);
}
/*
* remove_kevent - cleans up and ultimately frees the given kevent
*
* Caller must hold dev->ev_mutex.
*/
static void remove_kevent(struct inotify_device *dev,
struct inotify_kernel_event *kevent)
{
list_del(&kevent->list);
dev->event_count--;
dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
kfree(kevent->name);
kmem_cache_free(event_cachep, kevent);
}
/*
* inotify_dev_event_dequeue - destroy an event on the given device
*
* Caller must hold dev->ev_mutex.
*/
static void inotify_dev_event_dequeue(struct inotify_device *dev)
{
if (!list_empty(&dev->events)) {
struct inotify_kernel_event *kevent;
kevent = inotify_dev_get_event(dev);
remove_kevent(dev, kevent);
}
}
/*
* find_inode - resolve a user-given path to a specific inode and return a nd
*/
static int find_inode(const char __user *dirname, struct nameidata *nd,
unsigned flags)
{
int error;
error = __user_walk(dirname, flags, nd);
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
error = vfs_permission(nd, MAY_READ);
if (error)
path_release(nd);
return error;
}
/*
* create_watch - creates a watch on the given device.
*
* Callers must hold dev->up_mutex.
*/
static int create_watch(struct inotify_device *dev, struct inode *inode,
u32 mask)
{
struct inotify_user_watch *watch;
int ret;
if (atomic_read(&dev->user->inotify_watches) >=
inotify_max_user_watches)
return -ENOSPC;
watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
if (unlikely(!watch))
return -ENOMEM;
/* save a reference to device and bump the count to make it official */
get_inotify_dev(dev);
watch->dev = dev;
atomic_inc(&dev->user->inotify_watches);
ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask);
if (ret < 0)
free_inotify_user_watch(&watch->wdata);
return ret;
}
/* Device Interface */
static unsigned int inotify_poll(struct file *file, poll_table *wait)
{
struct inotify_device *dev = file->private_data;
int ret = 0;
poll_wait(file, &dev->wq, wait);
mutex_lock(&dev->ev_mutex);
if (!list_empty(&dev->events))
ret = POLLIN | POLLRDNORM;
mutex_unlock(&dev->ev_mutex);
return ret;
}
static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
size_t event_size = sizeof (struct inotify_event);
struct inotify_device *dev;
char __user *start;
int ret;
DEFINE_WAIT(wait);
start = buf;
dev = file->private_data;
while (1) {
int events;
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&dev->ev_mutex);
events = !list_empty(&dev->events);
mutex_unlock(&dev->ev_mutex);
if (events) {
ret = 0;
break;
}
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -EINTR;
break;
}
schedule();
}
finish_wait(&dev->wq, &wait);
if (ret)
return ret;
mutex_lock(&dev->ev_mutex);
while (1) {
struct inotify_kernel_event *kevent;
ret = buf - start;
if (list_empty(&dev->events))
break;
kevent = inotify_dev_get_event(dev);
if (event_size + kevent->event.len > count)
break;
if (copy_to_user(buf, &kevent->event, event_size)) {
ret = -EFAULT;
break;
}
buf += event_size;
count -= event_size;
if (kevent->name) {
if (copy_to_user(buf, kevent->name, kevent->event.len)){
ret = -EFAULT;
break;
}
buf += kevent->event.len;
count -= kevent->event.len;
}
remove_kevent(dev, kevent);
}
mutex_unlock(&dev->ev_mutex);
return ret;
}
static int inotify_release(struct inode *ignored, struct file *file)
{
struct inotify_device *dev = file->private_data;
inotify_destroy(dev->ih);
/* destroy all of the events on this device */
mutex_lock(&dev->ev_mutex);
while (!list_empty(&dev->events))
inotify_dev_event_dequeue(dev);
mutex_unlock(&dev->ev_mutex);
/* free this device: the put matching the get in inotify_init() */
put_inotify_dev(dev);
return 0;
}
static long inotify_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct inotify_device *dev;
void __user *p;
int ret = -ENOTTY;
dev = file->private_data;
p = (void __user *) arg;
switch (cmd) {
case FIONREAD:
ret = put_user(dev->queue_size, (int __user *) p);
break;
}
return ret;
}
static const struct file_operations inotify_fops = {
.poll = inotify_poll,
.read = inotify_read,
.release = inotify_release,
.unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl,
};
static const struct inotify_operations inotify_user_ops = {
.handle_event = inotify_dev_queue_event,
.destroy_watch = free_inotify_user_watch,
};
asmlinkage long sys_inotify_init(void)
{
struct inotify_device *dev;
struct inotify_handle *ih;
struct user_struct *user;
struct file *filp;
int fd, ret;
fd = get_unused_fd();
if (fd < 0)
return fd;
filp = get_empty_filp();
if (!filp) {
ret = -ENFILE;
goto out_put_fd;
}
user = get_uid(current->user);
if (unlikely(atomic_read(&user->inotify_devs) >=
inotify_max_user_instances)) {
ret = -EMFILE;
goto out_free_uid;
}
dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
if (unlikely(!dev)) {
ret = -ENOMEM;
goto out_free_uid;
}
ih = inotify_init(&inotify_user_ops);
if (unlikely(IS_ERR(ih))) {
ret = PTR_ERR(ih);
goto out_free_dev;
}
dev->ih = ih;
filp->f_op = &inotify_fops;
filp->f_vfsmnt = mntget(inotify_mnt);
filp->f_dentry = dget(inotify_mnt->mnt_root);
filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
filp->f_mode = FMODE_READ;
filp->f_flags = O_RDONLY;
filp->private_data = dev;
INIT_LIST_HEAD(&dev->events);
init_waitqueue_head(&dev->wq);
mutex_init(&dev->ev_mutex);
mutex_init(&dev->up_mutex);
dev->event_count = 0;
dev->queue_size = 0;
dev->max_events = inotify_max_queued_events;
dev->user = user;
atomic_set(&dev->count, 0);
get_inotify_dev(dev);
atomic_inc(&user->inotify_devs);
fd_install(fd, filp);
return fd;
out_free_dev:
kfree(dev);
out_free_uid:
free_uid(user);
put_filp(filp);
out_put_fd:
put_unused_fd(fd);
return ret;
}
asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
{
struct inode *inode;
struct inotify_device *dev;
struct nameidata nd;
struct file *filp;
int ret, fput_needed;
unsigned flags = 0;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
if (unlikely(filp->f_op != &inotify_fops)) {
ret = -EINVAL;
goto fput_and_out;
}
if (!(mask & IN_DONT_FOLLOW))
flags |= LOOKUP_FOLLOW;
if (mask & IN_ONLYDIR)
flags |= LOOKUP_DIRECTORY;
ret = find_inode(path, &nd, flags);
if (unlikely(ret))
goto fput_and_out;
/* inode held in place by reference to nd; dev by fget on fd */
inode = nd.dentry->d_inode;
dev = filp->private_data;
mutex_lock(&dev->up_mutex);
ret = inotify_find_update_watch(dev->ih, inode, mask);
if (ret == -ENOENT)
ret = create_watch(dev, inode, mask);
mutex_unlock(&dev->up_mutex);
path_release(&nd);
fput_and_out:
fput_light(filp, fput_needed);
return ret;
}
asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
{
struct file *filp;
struct inotify_device *dev;
int ret, fput_needed;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
if (unlikely(filp->f_op != &inotify_fops)) {
ret = -EINVAL;
goto out;
}
dev = filp->private_data;
/* we free our watch data when we get IN_IGNORED */
ret = inotify_rm_wd(dev->ih, wd);
out:
fput_light(filp, fput_needed);
return ret;
}
static struct super_block *
inotify_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
}
static struct file_system_type inotify_fs_type = {
.name = "inotifyfs",
.get_sb = inotify_get_sb,
.kill_sb = kill_anon_super,
};
/*
* inotify_user_setup - Our initialization function. Note that we cannnot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
* must result in panic().
*/
static int __init inotify_user_setup(void)
{
int ret;
ret = register_filesystem(&inotify_fs_type);
if (unlikely(ret))
panic("inotify: register_filesystem returned %d!\n", ret);
inotify_mnt = kern_mount(&inotify_fs_type);
if (IS_ERR(inotify_mnt))
panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
inotify_max_queued_events = 16384;
inotify_max_user_instances = 128;
inotify_max_user_watches = 8192;
watch_cachep = kmem_cache_create("inotify_watch_cache",
sizeof(struct inotify_user_watch),
0, SLAB_PANIC, NULL, NULL);
event_cachep = kmem_cache_create("inotify_event_cache",
sizeof(struct inotify_kernel_event),
0, SLAB_PANIC, NULL, NULL);
return 0;
}
module_init(inotify_user_setup);

View File

@ -68,8 +68,37 @@ struct inotify_event {
#include <linux/dcache.h> #include <linux/dcache.h>
#include <linux/fs.h> #include <linux/fs.h>
/*
* struct inotify_watch - represents a watch request on a specific inode
*
* h_list is protected by ih->mutex of the associated inotify_handle.
* i_list, mask are protected by inode->inotify_mutex of the associated inode.
* ih, inode, and wd are never written to once the watch is created.
*
* Callers must use the established inotify interfaces to access inotify_watch
* contents. The content of this structure is private to the inotify
* implementation.
*/
struct inotify_watch {
struct list_head h_list; /* entry in inotify_handle's list */
struct list_head i_list; /* entry in inode's list */
atomic_t count; /* reference count */
struct inotify_handle *ih; /* associated inotify handle */
struct inode *inode; /* associated inode */
__s32 wd; /* watch descriptor */
__u32 mask; /* event mask for this watch */
};
struct inotify_operations {
void (*handle_event)(struct inotify_watch *, u32, u32, u32,
const char *);
void (*destroy_watch)(struct inotify_watch *);
};
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY
/* Kernel API for producing events */
extern void inotify_d_instantiate(struct dentry *, struct inode *); extern void inotify_d_instantiate(struct dentry *, struct inode *);
extern void inotify_d_move(struct dentry *); extern void inotify_d_move(struct dentry *);
extern void inotify_inode_queue_event(struct inode *, __u32, __u32, extern void inotify_inode_queue_event(struct inode *, __u32, __u32,
@ -80,6 +109,18 @@ extern void inotify_unmount_inodes(struct list_head *);
extern void inotify_inode_is_dead(struct inode *); extern void inotify_inode_is_dead(struct inode *);
extern u32 inotify_get_cookie(void); extern u32 inotify_get_cookie(void);
/* Kernel Consumer API */
extern struct inotify_handle *inotify_init(const struct inotify_operations *);
extern void inotify_destroy(struct inotify_handle *);
extern __s32 inotify_find_update_watch(struct inotify_handle *, struct inode *,
u32);
extern __s32 inotify_add_watch(struct inotify_handle *, struct inotify_watch *,
struct inode *, __u32);
extern int inotify_rm_wd(struct inotify_handle *, __u32);
extern void get_inotify_watch(struct inotify_watch *);
extern void put_inotify_watch(struct inotify_watch *);
#else #else
static inline void inotify_d_instantiate(struct dentry *dentry, static inline void inotify_d_instantiate(struct dentry *dentry,
@ -116,6 +157,41 @@ static inline u32 inotify_get_cookie(void)
return 0; return 0;
} }
static inline struct inotify_handle *inotify_init(const struct inotify_operations *ops)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void inotify_destroy(struct inotify_handle *ih)
{
}
static inline __s32 inotify_find_update_watch(struct inotify_handle *ih,
struct inode *inode, u32 mask)
{
return -EOPNOTSUPP;
}
static inline __s32 inotify_add_watch(struct inotify_handle *ih,
struct inotify_watch *watch,
struct inode *inode, __u32 mask)
{
return -EOPNOTSUPP;
}
static inline int inotify_rm_wd(struct inotify_handle *ih, __u32 wd)
{
return -EOPNOTSUPP;
}
static inline void get_inotify_watch(struct inotify_watch *watch)
{
}
static inline void put_inotify_watch(struct inotify_watch *watch)
{
}
#endif /* CONFIG_INOTIFY */ #endif /* CONFIG_INOTIFY */
#endif /* __KERNEL __ */ #endif /* __KERNEL __ */

View File

@ -494,7 +494,7 @@ struct user_struct {
atomic_t processes; /* How many processes does this user have? */ atomic_t processes; /* How many processes does this user have? */
atomic_t files; /* How many open files does this user have? */ atomic_t files; /* How many open files does this user have? */
atomic_t sigpending; /* How many pending signals does this user have? */ atomic_t sigpending; /* How many pending signals does this user have? */
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY_USER
atomic_t inotify_watches; /* How many inotify watches does this user have? */ atomic_t inotify_watches; /* How many inotify watches does this user have? */
atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
#endif #endif

View File

@ -150,7 +150,7 @@ extern ctl_table random_table[];
#ifdef CONFIG_UNIX98_PTYS #ifdef CONFIG_UNIX98_PTYS
extern ctl_table pty_table[]; extern ctl_table pty_table[];
#endif #endif
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY_USER
extern ctl_table inotify_table[]; extern ctl_table inotify_table[];
#endif #endif
@ -1028,7 +1028,7 @@ static ctl_table fs_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = &proc_doulongvec_minmax, .proc_handler = &proc_doulongvec_minmax,
}, },
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY_USER
{ {
.ctl_name = FS_INOTIFY, .ctl_name = FS_INOTIFY,
.procname = "inotify", .procname = "inotify",

View File

@ -140,7 +140,7 @@ struct user_struct * alloc_uid(uid_t uid)
atomic_set(&new->processes, 0); atomic_set(&new->processes, 0);
atomic_set(&new->files, 0); atomic_set(&new->files, 0);
atomic_set(&new->sigpending, 0); atomic_set(&new->sigpending, 0);
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY_USER
atomic_set(&new->inotify_watches, 0); atomic_set(&new->inotify_watches, 0);
atomic_set(&new->inotify_devs, 0); atomic_set(&new->inotify_devs, 0);
#endif #endif