inotify: remove inotify in kernel interface

nothing uses inotify in the kernel, drop it!

Signed-off-by: Eric Paris <eparis@redhat.com>
This commit is contained in:
Eric Paris 2009-12-17 20:30:52 -05:00
Родитель 7050c48826
Коммит 2dfc1cae4c
10 изменённых файлов: 4 добавлений и 1130 удалений

Просмотреть файл

@ -367,14 +367,6 @@ When: 2.6.33
Why: Should be implemented in userspace, policy daemon. Why: Should be implemented in userspace, policy daemon.
Who: Johannes Berg <johannes@sipsolutions.net> Who: Johannes Berg <johannes@sipsolutions.net>
---------------------------
What: CONFIG_INOTIFY
When: 2.6.33
Why: last user (audit) will be converted to the newer more generic
and more easily maintained fsnotify subsystem
Who: Eric Paris <eparis@redhat.com>
---------------------------- ----------------------------
What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be

Просмотреть файл

@ -20,7 +20,6 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/inotify.h>
#include <linux/fsnotify.h> #include <linux/fsnotify.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/async.h> #include <linux/async.h>
@ -264,10 +263,6 @@ void inode_init_once(struct inode *inode)
INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
i_size_ordered_init(inode); i_size_ordered_init(inode);
#ifdef CONFIG_INOTIFY
INIT_LIST_HEAD(&inode->inotify_watches);
mutex_init(&inode->inotify_mutex);
#endif
#ifdef CONFIG_FSNOTIFY #ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries); INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries);
#endif #endif
@ -413,7 +408,6 @@ int invalidate_inodes(struct super_block *sb)
down_write(&iprune_sem); down_write(&iprune_sem);
spin_lock(&inode_lock); spin_lock(&inode_lock);
inotify_unmount_inodes(&sb->s_inodes);
fsnotify_unmount_inodes(&sb->s_inodes); fsnotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away); busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);

Просмотреть файл

@ -1,18 +1,3 @@
config INOTIFY
bool "Inotify file change notification support"
default n
---help---
Say Y here to enable legacy in kernel inotify support. Inotify is a
file change notification system. It is a replacement for dnotify.
This option only provides the legacy inotify in kernel API. There
are no in tree kernel users of this interface since it is deprecated.
You only need this if you are loading an out of tree kernel module
that uses inotify.
For more information, see <file:Documentation/filesystems/inotify.txt>
If unsure, say N.
config INOTIFY_USER config INOTIFY_USER
bool "Inotify support for userspace" bool "Inotify support for userspace"
select ANON_INODES select ANON_INODES

Просмотреть файл

@ -1,2 +1 @@
obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o

Просмотреть файл

@ -1,873 +0,0 @@
/*
* fs/inotify.c - inode-based file event notifications
*
* Authors:
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
* Kernel API added by: Amy Griffis <amy.griffis@hp.com>
*
* Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/writeback.h>
#include <linux/inotify.h>
#include <linux/fsnotify_backend.h>
static atomic_t inotify_cookie;
/*
* Lock ordering:
*
* dentry->d_lock (used to keep d_move() away from dentry->d_parent)
* iprune_mutex (synchronize shrink_icache_memory())
* inode_lock (protects the super_block->s_inodes list)
* inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
* inotify_handle->mutex (protects inotify_handle and watches->h_list)
*
* The inode->inotify_mutex and inotify_handle->mutex and held during execution
* of a caller's event handler. Thus, the caller must not hold any locks
* taken in their event handler while calling any of the published inotify
* interfaces.
*/
/*
* Lifetimes of the three main data structures--inotify_handle, inode, and
* inotify_watch--are managed by reference count.
*
* inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
* Additional references can bump the count via get_inotify_handle() and drop
* the count via put_inotify_handle().
*
* inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
* to remove_watch_no_event(). Additional references can bump the count via
* get_inotify_watch() and drop the count via put_inotify_watch(). The caller
* is reponsible for the final put after receiving IN_IGNORED, or when using
* IN_ONESHOT after receiving the first event. Inotify does the final put if
* inotify_destroy() is called.
*
* inode: Pinned so long as the inode is associated with a watch, from
* inotify_add_watch() to the final put_inotify_watch().
*/
/*
* struct inotify_handle - represents an inotify instance
*
* This structure is protected by the mutex 'mutex'.
*/
struct inotify_handle {
struct idr idr; /* idr mapping wd -> watch */
struct mutex mutex; /* protects this bad boy */
struct list_head watches; /* list of watches */
atomic_t count; /* reference count */
u32 last_wd; /* the last wd allocated */
const struct inotify_operations *in_ops; /* inotify caller operations */
};
static inline void get_inotify_handle(struct inotify_handle *ih)
{
atomic_inc(&ih->count);
}
static inline void put_inotify_handle(struct inotify_handle *ih)
{
if (atomic_dec_and_test(&ih->count)) {
idr_destroy(&ih->idr);
kfree(ih);
}
}
/**
* get_inotify_watch - grab a reference to an inotify_watch
* @watch: watch to grab
*/
void get_inotify_watch(struct inotify_watch *watch)
{
atomic_inc(&watch->count);
}
EXPORT_SYMBOL_GPL(get_inotify_watch);
int pin_inotify_watch(struct inotify_watch *watch)
{
struct super_block *sb = watch->inode->i_sb;
if (atomic_inc_not_zero(&sb->s_active)) {
atomic_inc(&watch->count);
return 1;
}
return 0;
}
/**
* put_inotify_watch - decrements the ref count on a given watch. cleans up
* watch references if the count reaches zero. inotify_watch is freed by
* inotify callers via the destroy_watch() op.
* @watch: watch to release
*/
void put_inotify_watch(struct inotify_watch *watch)
{
if (atomic_dec_and_test(&watch->count)) {
struct inotify_handle *ih = watch->ih;
iput(watch->inode);
ih->in_ops->destroy_watch(watch);
put_inotify_handle(ih);
}
}
EXPORT_SYMBOL_GPL(put_inotify_watch);
void unpin_inotify_watch(struct inotify_watch *watch)
{
struct super_block *sb = watch->inode->i_sb;
put_inotify_watch(watch);
deactivate_super(sb);
}
/*
* inotify_handle_get_wd - returns the next WD for use by the given handle
*
* Callers must hold ih->mutex. This function can sleep.
*/
static int inotify_handle_get_wd(struct inotify_handle *ih,
struct inotify_watch *watch)
{
int ret;
do {
if (unlikely(!idr_pre_get(&ih->idr, GFP_NOFS)))
return -ENOSPC;
ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
} while (ret == -EAGAIN);
if (likely(!ret))
ih->last_wd = watch->wd;
return ret;
}
/*
* inotify_inode_watched - returns nonzero if there are watches on this inode
* and zero otherwise. We call this lockless, we do not care if we race.
*/
static inline int inotify_inode_watched(struct inode *inode)
{
return !list_empty(&inode->inotify_watches);
}
/*
* Get child dentry flag into synch with parent inode.
* Flag should always be clear for negative dentrys.
*/
static void set_dentry_child_flags(struct inode *inode, int watched)
{
struct dentry *alias;
spin_lock(&dcache_lock);
list_for_each_entry(alias, &inode->i_dentry, d_alias) {
struct dentry *child;
list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
if (!child->d_inode)
continue;
spin_lock(&child->d_lock);
if (watched)
child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
else
child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
spin_unlock(&child->d_lock);
}
}
spin_unlock(&dcache_lock);
}
/*
* inotify_find_handle - find the watch associated with the given inode and
* handle
*
* Callers must hold inode->inotify_mutex.
*/
static struct inotify_watch *inode_find_handle(struct inode *inode,
struct inotify_handle *ih)
{
struct inotify_watch *watch;
list_for_each_entry(watch, &inode->inotify_watches, i_list) {
if (watch->ih == ih)
return watch;
}
return NULL;
}
/*
* remove_watch_no_event - remove watch without the IN_IGNORED event.
*
* Callers must hold both inode->inotify_mutex and ih->mutex.
*/
static void remove_watch_no_event(struct inotify_watch *watch,
struct inotify_handle *ih)
{
list_del(&watch->i_list);
list_del(&watch->h_list);
if (!inotify_inode_watched(watch->inode))
set_dentry_child_flags(watch->inode, 0);
idr_remove(&ih->idr, watch->wd);
}
/**
* inotify_remove_watch_locked - Remove a watch from both the handle and the
* inode. Sends the IN_IGNORED event signifying that the inode is no longer
* watched. May be invoked from a caller's event handler.
* @ih: inotify handle associated with watch
* @watch: watch to remove
*
* Callers must hold both inode->inotify_mutex and ih->mutex.
*/
void inotify_remove_watch_locked(struct inotify_handle *ih,
struct inotify_watch *watch)
{
remove_watch_no_event(watch, ih);
ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
/* Kernel API for producing events */
/*
* inotify_d_instantiate - instantiate dcache entry for inode
*/
void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
{
struct dentry *parent;
if (!inode)
return;
spin_lock(&entry->d_lock);
parent = entry->d_parent;
if (parent->d_inode && inotify_inode_watched(parent->d_inode))
entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
spin_unlock(&entry->d_lock);
}
/*
* inotify_d_move - dcache entry has been moved
*/
void inotify_d_move(struct dentry *entry)
{
struct dentry *parent;
parent = entry->d_parent;
if (inotify_inode_watched(parent->d_inode))
entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
else
entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
}
/**
* inotify_inode_queue_event - queue an event to all watches on this inode
* @inode: inode event is originating from
* @mask: event mask describing this event
* @cookie: cookie for synchronization, or zero
* @name: filename, if any
* @n_inode: inode associated with name
*/
void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
const char *name, struct inode *n_inode)
{
struct inotify_watch *watch, *next;
if (!inotify_inode_watched(inode))
return;
mutex_lock(&inode->inotify_mutex);
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
u32 watch_mask = watch->mask;
if (watch_mask & mask) {
struct inotify_handle *ih= watch->ih;
mutex_lock(&ih->mutex);
if (watch_mask & IN_ONESHOT)
remove_watch_no_event(watch, ih);
ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
name, n_inode);
mutex_unlock(&ih->mutex);
}
}
mutex_unlock(&inode->inotify_mutex);
}
EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
/**
* inotify_dentry_parent_queue_event - queue an event to a dentry's parent
* @dentry: the dentry in question, we queue against this dentry's parent
* @mask: event mask describing this event
* @cookie: cookie for synchronization, or zero
* @name: filename, if any
*/
void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
u32 cookie, const char *name)
{
struct dentry *parent;
struct inode *inode;
if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED))
return;
spin_lock(&dentry->d_lock);
parent = dentry->d_parent;
inode = parent->d_inode;
if (inotify_inode_watched(inode)) {
dget(parent);
spin_unlock(&dentry->d_lock);
inotify_inode_queue_event(inode, mask, cookie, name,
dentry->d_inode);
dput(parent);
} else
spin_unlock(&dentry->d_lock);
}
EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
/**
* inotify_get_cookie - return a unique cookie for use in synchronizing events.
*/
u32 inotify_get_cookie(void)
{
return atomic_inc_return(&inotify_cookie);
}
EXPORT_SYMBOL_GPL(inotify_get_cookie);
/**
* inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
* @list: list of inodes being unmounted (sb->s_inodes)
*
* Called with inode_lock held, protecting the unmounting super block's list
* of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
* We temporarily drop inode_lock, however, and CAN block.
*/
void inotify_unmount_inodes(struct list_head *list)
{
struct inode *inode, *next_i, *need_iput = NULL;
list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
struct inotify_watch *watch, *next_w;
struct inode *need_iput_tmp;
struct list_head *watches;
/*
* We cannot __iget() an inode in state I_CLEAR, I_FREEING,
* I_WILL_FREE, or I_NEW which is fine because by that point
* the inode cannot have any associated watches.
*/
if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
continue;
/*
* If i_count is zero, the inode cannot have any watches and
* doing an __iget/iput with MS_ACTIVE clear would actually
* evict all inodes with zero i_count from icache which is
* unnecessarily violent and may in fact be illegal to do.
*/
if (!atomic_read(&inode->i_count))
continue;
need_iput_tmp = need_iput;
need_iput = NULL;
/* In case inotify_remove_watch_locked() drops a reference. */
if (inode != need_iput_tmp)
__iget(inode);
else
need_iput_tmp = NULL;
/* In case the dropping of a reference would nuke next_i. */
if ((&next_i->i_sb_list != list) &&
atomic_read(&next_i->i_count) &&
!(next_i->i_state & (I_CLEAR | I_FREEING |
I_WILL_FREE))) {
__iget(next_i);
need_iput = next_i;
}
/*
* We can safely drop inode_lock here because we hold
* references on both inode and next_i. Also no new inodes
* will be added since the umount has begun. Finally,
* iprune_mutex keeps shrink_icache_memory() away.
*/
spin_unlock(&inode_lock);
if (need_iput_tmp)
iput(need_iput_tmp);
/* for each watch, send IN_UNMOUNT and then remove it */
mutex_lock(&inode->inotify_mutex);
watches = &inode->inotify_watches;
list_for_each_entry_safe(watch, next_w, watches, i_list) {
struct inotify_handle *ih= watch->ih;
get_inotify_watch(watch);
mutex_lock(&ih->mutex);
ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
NULL, NULL);
inotify_remove_watch_locked(ih, watch);
mutex_unlock(&ih->mutex);
put_inotify_watch(watch);
}
mutex_unlock(&inode->inotify_mutex);
iput(inode);
spin_lock(&inode_lock);
}
}
EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
/**
* inotify_inode_is_dead - an inode has been deleted, cleanup any watches
* @inode: inode that is about to be removed
*/
void inotify_inode_is_dead(struct inode *inode)
{
struct inotify_watch *watch, *next;
mutex_lock(&inode->inotify_mutex);
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
struct inotify_handle *ih = watch->ih;
mutex_lock(&ih->mutex);
inotify_remove_watch_locked(ih, watch);
mutex_unlock(&ih->mutex);
}
mutex_unlock(&inode->inotify_mutex);
}
EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
/* Kernel Consumer API */
/**
* inotify_init - allocate and initialize an inotify instance
* @ops: caller's inotify operations
*/
struct inotify_handle *inotify_init(const struct inotify_operations *ops)
{
struct inotify_handle *ih;
ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
if (unlikely(!ih))
return ERR_PTR(-ENOMEM);
idr_init(&ih->idr);
INIT_LIST_HEAD(&ih->watches);
mutex_init(&ih->mutex);
ih->last_wd = 0;
ih->in_ops = ops;
atomic_set(&ih->count, 0);
get_inotify_handle(ih);
return ih;
}
EXPORT_SYMBOL_GPL(inotify_init);
/**
* inotify_init_watch - initialize an inotify watch
* @watch: watch to initialize
*/
void inotify_init_watch(struct inotify_watch *watch)
{
INIT_LIST_HEAD(&watch->h_list);
INIT_LIST_HEAD(&watch->i_list);
atomic_set(&watch->count, 0);
get_inotify_watch(watch); /* initial get */
}
EXPORT_SYMBOL_GPL(inotify_init_watch);
/*
* Watch removals suck violently. To kick the watch out we need (in this
* order) inode->inotify_mutex and ih->mutex. That's fine if we have
* a hold on inode; however, for all other cases we need to make damn sure
* we don't race with umount. We can *NOT* just grab a reference to a
* watch - inotify_unmount_inodes() will happily sail past it and we'll end
* with reference to inode potentially outliving its superblock. Ideally
* we just want to grab an active reference to superblock if we can; that
* will make sure we won't go into inotify_umount_inodes() until we are
* done. Cleanup is just deactivate_super(). However, that leaves a messy
* case - what if we *are* racing with umount() and active references to
* superblock can't be acquired anymore? We can bump ->s_count, grab
* ->s_umount, which will wait until the superblock is shut down and the
* watch in question is pining for fjords.
*
* And yes, this is far beyond mere "not very pretty"; so's the entire
* concept of inotify to start with.
*/
/**
* pin_to_kill - pin the watch down for removal
* @ih: inotify handle
* @watch: watch to kill
*
* Called with ih->mutex held, drops it. Possible return values:
* 0 - nothing to do, it has died
* 1 - remove it, drop the reference and deactivate_super()
*/
static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
{
struct super_block *sb = watch->inode->i_sb;
if (atomic_inc_not_zero(&sb->s_active)) {
get_inotify_watch(watch);
mutex_unlock(&ih->mutex);
return 1; /* the best outcome */
}
spin_lock(&sb_lock);
sb->s_count++;
spin_unlock(&sb_lock);
mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */
down_read(&sb->s_umount);
/* fs is already shut down; the watch is dead */
drop_super(sb);
return 0;
}
static void unpin_and_kill(struct inotify_watch *watch)
{
struct super_block *sb = watch->inode->i_sb;
put_inotify_watch(watch);
deactivate_super(sb);
}
/**
* inotify_destroy - clean up and destroy an inotify instance
* @ih: inotify handle
*/
void inotify_destroy(struct inotify_handle *ih)
{
/*
* Destroy all of the watches for this handle. Unfortunately, not very
* pretty. We cannot do a simple iteration over the list, because we
* do not know the inode until we iterate to the watch. But we need to
* hold inode->inotify_mutex before ih->mutex. The following works.
*
* AV: it had to become even uglier to start working ;-/
*/
while (1) {
struct inotify_watch *watch;
struct list_head *watches;
struct super_block *sb;
struct inode *inode;
mutex_lock(&ih->mutex);
watches = &ih->watches;
if (list_empty(watches)) {
mutex_unlock(&ih->mutex);
break;
}
watch = list_first_entry(watches, struct inotify_watch, h_list);
sb = watch->inode->i_sb;
if (!pin_to_kill(ih, watch))
continue;
inode = watch->inode;
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex);
/* make sure we didn't race with another list removal */
if (likely(idr_find(&ih->idr, watch->wd))) {
remove_watch_no_event(watch, ih);
put_inotify_watch(watch);
}
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
unpin_and_kill(watch);
}
/* free this handle: the put matching the get in inotify_init() */
put_inotify_handle(ih);
}
EXPORT_SYMBOL_GPL(inotify_destroy);
/**
* inotify_find_watch - find an existing watch for an (ih,inode) pair
* @ih: inotify handle
* @inode: inode to watch
* @watchp: pointer to existing inotify_watch
*
* Caller must pin given inode (via nameidata).
*/
s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
struct inotify_watch **watchp)
{
struct inotify_watch *old;
int ret = -ENOENT;
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex);
old = inode_find_handle(inode, ih);
if (unlikely(old)) {
get_inotify_watch(old); /* caller must put watch */
*watchp = old;
ret = old->wd;
}
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(inotify_find_watch);
/**
* inotify_find_update_watch - find and update the mask of an existing watch
* @ih: inotify handle
* @inode: inode's watch to update
* @mask: mask of events to watch
*
* Caller must pin given inode (via nameidata).
*/
s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
u32 mask)
{
struct inotify_watch *old;
int mask_add = 0;
int ret;
if (mask & IN_MASK_ADD)
mask_add = 1;
/* don't allow invalid bits: we don't want flags set */
mask &= IN_ALL_EVENTS | IN_ONESHOT;
if (unlikely(!mask))
return -EINVAL;
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex);
/*
* Handle the case of re-adding a watch on an (inode,ih) pair that we
* are already watching. We just update the mask and return its wd.
*/
old = inode_find_handle(inode, ih);
if (unlikely(!old)) {
ret = -ENOENT;
goto out;
}
if (mask_add)
old->mask |= mask;
else
old->mask = mask;
ret = old->wd;
out:
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(inotify_find_update_watch);
/**
* inotify_add_watch - add a watch to an inotify instance
* @ih: inotify handle
* @watch: caller allocated watch structure
* @inode: inode to watch
* @mask: mask of events to watch
*
* Caller must pin given inode (via nameidata).
* Caller must ensure it only calls inotify_add_watch() once per watch.
* Calls inotify_handle_get_wd() so may sleep.
*/
s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
struct inode *inode, u32 mask)
{
int ret = 0;
int newly_watched;
/* don't allow invalid bits: we don't want flags set */
mask &= IN_ALL_EVENTS | IN_ONESHOT;
if (unlikely(!mask))
return -EINVAL;
watch->mask = mask;
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex);
/* Initialize a new watch */
ret = inotify_handle_get_wd(ih, watch);
if (unlikely(ret))
goto out;
ret = watch->wd;
/* save a reference to handle and bump the count to make it official */
get_inotify_handle(ih);
watch->ih = ih;
/*
* Save a reference to the inode and bump the ref count to make it
* official. We hold a reference to nameidata, which makes this safe.
*/
watch->inode = igrab(inode);
/* Add the watch to the handle's and the inode's list */
newly_watched = !inotify_inode_watched(inode);
list_add(&watch->h_list, &ih->watches);
list_add(&watch->i_list, &inode->inotify_watches);
/*
* Set child flags _after_ adding the watch, so there is no race
* windows where newly instantiated children could miss their parent's
* watched flag.
*/
if (newly_watched)
set_dentry_child_flags(inode, 1);
out:
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(inotify_add_watch);
/**
* inotify_clone_watch - put the watch next to existing one
* @old: already installed watch
* @new: new watch
*
* Caller must hold the inotify_mutex of inode we are dealing with;
* it is expected to remove the old watch before unlocking the inode.
*/
s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new)
{
struct inotify_handle *ih = old->ih;
int ret = 0;
new->mask = old->mask;
new->ih = ih;
mutex_lock(&ih->mutex);
/* Initialize a new watch */
ret = inotify_handle_get_wd(ih, new);
if (unlikely(ret))
goto out;
ret = new->wd;
get_inotify_handle(ih);
new->inode = igrab(old->inode);
list_add(&new->h_list, &ih->watches);
list_add(&new->i_list, &old->inode->inotify_watches);
out:
mutex_unlock(&ih->mutex);
return ret;
}
void inotify_evict_watch(struct inotify_watch *watch)
{
get_inotify_watch(watch);
mutex_lock(&watch->ih->mutex);
inotify_remove_watch_locked(watch->ih, watch);
mutex_unlock(&watch->ih->mutex);
}
/**
* inotify_rm_wd - remove a watch from an inotify instance
* @ih: inotify handle
* @wd: watch descriptor to remove
*
* Can sleep.
*/
int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
{
struct inotify_watch *watch;
struct super_block *sb;
struct inode *inode;
mutex_lock(&ih->mutex);
watch = idr_find(&ih->idr, wd);
if (unlikely(!watch)) {
mutex_unlock(&ih->mutex);
return -EINVAL;
}
sb = watch->inode->i_sb;
if (!pin_to_kill(ih, watch))
return 0;
inode = watch->inode;
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex);
/* make sure that we did not race */
if (likely(idr_find(&ih->idr, wd) == watch))
inotify_remove_watch_locked(ih, watch);
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
unpin_and_kill(watch);
return 0;
}
EXPORT_SYMBOL_GPL(inotify_rm_wd);
/**
* inotify_rm_watch - remove a watch from an inotify instance
* @ih: inotify handle
* @watch: watch to remove
*
* Can sleep.
*/
int inotify_rm_watch(struct inotify_handle *ih,
struct inotify_watch *watch)
{
return inotify_rm_wd(ih, watch->wd);
}
EXPORT_SYMBOL_GPL(inotify_rm_watch);
/*
* inotify_setup - core initialization function
*/
static int __init inotify_setup(void)
{
BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(IN_OPEN != FS_OPEN);
BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
BUILD_BUG_ON(IN_CREATE != FS_CREATE);
BUILD_BUG_ON(IN_DELETE != FS_DELETE);
BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR);
BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
atomic_set(&inotify_cookie, 0);
return 0;
}
module_init(inotify_setup);

Просмотреть файл

@ -29,6 +29,7 @@
#include <linux/falloc.h> #include <linux/falloc.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/ima.h> #include <linux/ima.h>
#include <linux/dnotify.h>
#include "internal.h" #include "internal.h"

Просмотреть файл

@ -771,11 +771,6 @@ struct inode {
struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */ struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */
#endif #endif
#ifdef CONFIG_INOTIFY
struct list_head inotify_watches; /* watches on this inode */
struct mutex inotify_mutex; /* protects the watches list */
#endif
unsigned long i_state; unsigned long i_state;
unsigned long dirtied_when; /* jiffies of first dirtying */ unsigned long dirtied_when; /* jiffies of first dirtying */

Просмотреть файл

@ -11,8 +11,6 @@
* (C) Copyright 2005 Robert Love * (C) Copyright 2005 Robert Love
*/ */
#include <linux/dnotify.h>
#include <linux/inotify.h>
#include <linux/fsnotify_backend.h> #include <linux/fsnotify_backend.h>
#include <linux/audit.h> #include <linux/audit.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -25,16 +23,12 @@ static inline void fsnotify_d_instantiate(struct dentry *entry,
struct inode *inode) struct inode *inode)
{ {
__fsnotify_d_instantiate(entry, inode); __fsnotify_d_instantiate(entry, inode);
inotify_d_instantiate(entry, inode);
} }
/* Notify this dentry's parent about a child's events. */ /* Notify this dentry's parent about a child's events. */
static inline void fsnotify_parent(struct dentry *dentry, __u32 mask) static inline void fsnotify_parent(struct dentry *dentry, __u32 mask)
{ {
__fsnotify_parent(dentry, mask); __fsnotify_parent(dentry, mask);
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
} }
/* /*
@ -48,8 +42,6 @@ static inline void fsnotify_d_move(struct dentry *entry)
* cares about events from this entry. * cares about events from this entry.
*/ */
__fsnotify_update_dcache_flags(entry); __fsnotify_update_dcache_flags(entry);
inotify_d_move(entry);
} }
/* /*
@ -57,8 +49,6 @@ static inline void fsnotify_d_move(struct dentry *entry)
*/ */
static inline void fsnotify_link_count(struct inode *inode) static inline void fsnotify_link_count(struct inode *inode)
{ {
inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
@ -70,7 +60,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
int isdir, struct inode *target, struct dentry *moved) int isdir, struct inode *target, struct dentry *moved)
{ {
struct inode *source = moved->d_inode; struct inode *source = moved->d_inode;
u32 in_cookie = inotify_get_cookie();
u32 fs_cookie = fsnotify_get_cookie(); u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
__u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
@ -80,31 +69,18 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
old_dir_mask |= FS_DN_RENAME; old_dir_mask |= FS_DN_RENAME;
if (isdir) { if (isdir) {
isdir = IN_ISDIR;
old_dir_mask |= FS_IN_ISDIR; old_dir_mask |= FS_IN_ISDIR;
new_dir_mask |= FS_IN_ISDIR; new_dir_mask |= FS_IN_ISDIR;
} }
inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name,
source);
inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name,
source);
fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
if (target) { if (target)
inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(target);
/* this is really a link_count change not a removal */
fsnotify_link_count(target); fsnotify_link_count(target);
}
if (source) { if (source)
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
audit_inode_child(moved, new_dir); audit_inode_child(moved, new_dir);
} }
@ -134,9 +110,6 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
*/ */
static inline void fsnotify_inoderemove(struct inode *inode) static inline void fsnotify_inoderemove(struct inode *inode)
{ {
inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(inode);
fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
__fsnotify_inode_delete(inode); __fsnotify_inode_delete(inode);
} }
@ -146,8 +119,6 @@ static inline void fsnotify_inoderemove(struct inode *inode)
*/ */
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{ {
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
dentry->d_inode);
audit_inode_child(dentry, inode); audit_inode_child(dentry, inode);
fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
@ -160,8 +131,6 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
*/ */
static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
{ {
inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
inode);
fsnotify_link_count(inode); fsnotify_link_count(inode);
audit_inode_child(new_dentry, dir); audit_inode_child(new_dentry, dir);
@ -176,7 +145,6 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
__u32 mask = (FS_CREATE | FS_IN_ISDIR); __u32 mask = (FS_CREATE | FS_IN_ISDIR);
struct inode *d_inode = dentry->d_inode; struct inode *d_inode = dentry->d_inode;
inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
audit_inode_child(dentry, inode); audit_inode_child(dentry, inode);
fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
@ -193,8 +161,6 @@ static inline void fsnotify_access(struct dentry *dentry)
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask); fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
@ -210,8 +176,6 @@ static inline void fsnotify_modify(struct dentry *dentry)
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask); fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
@ -227,8 +191,6 @@ static inline void fsnotify_open(struct dentry *dentry)
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask); fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
@ -246,8 +208,6 @@ static inline void fsnotify_close(struct file *file)
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask); fsnotify_parent(dentry, mask);
fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
} }
@ -263,8 +223,6 @@ static inline void fsnotify_xattr(struct dentry *dentry)
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask); fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
@ -299,14 +257,12 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
if (mask) { if (mask) {
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR; mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask); fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
} }
} }
#if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */ #if defined(CONFIG_FSNOTIFY) /* notify helpers */
/* /*
* fsnotify_oldname_init - save off the old filename before we change it * fsnotify_oldname_init - save off the old filename before we change it

Просмотреть файл

@ -69,178 +69,4 @@ struct inotify_event {
#define IN_CLOEXEC O_CLOEXEC #define IN_CLOEXEC O_CLOEXEC
#define IN_NONBLOCK O_NONBLOCK #define IN_NONBLOCK O_NONBLOCK
#ifdef __KERNEL__
#include <linux/dcache.h>
#include <linux/fs.h>
/*
* struct inotify_watch - represents a watch request on a specific inode
*
* h_list is protected by ih->mutex of the associated inotify_handle.
* i_list, mask are protected by inode->inotify_mutex of the associated inode.
* ih, inode, and wd are never written to once the watch is created.
*
* Callers must use the established inotify interfaces to access inotify_watch
* contents. The content of this structure is private to the inotify
* implementation.
*/
struct inotify_watch {
struct list_head h_list; /* entry in inotify_handle's list */
struct list_head i_list; /* entry in inode's list */
atomic_t count; /* reference count */
struct inotify_handle *ih; /* associated inotify handle */
struct inode *inode; /* associated inode */
__s32 wd; /* watch descriptor */
__u32 mask; /* event mask for this watch */
};
struct inotify_operations {
void (*handle_event)(struct inotify_watch *, u32, u32, u32,
const char *, struct inode *);
void (*destroy_watch)(struct inotify_watch *);
};
#ifdef CONFIG_INOTIFY
/* Kernel API for producing events */
extern void inotify_d_instantiate(struct dentry *, struct inode *);
extern void inotify_d_move(struct dentry *);
extern void inotify_inode_queue_event(struct inode *, __u32, __u32,
const char *, struct inode *);
extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32,
const char *);
extern void inotify_unmount_inodes(struct list_head *);
extern void inotify_inode_is_dead(struct inode *);
extern u32 inotify_get_cookie(void);
/* Kernel Consumer API */
extern struct inotify_handle *inotify_init(const struct inotify_operations *);
extern void inotify_init_watch(struct inotify_watch *);
extern void inotify_destroy(struct inotify_handle *);
extern __s32 inotify_find_watch(struct inotify_handle *, struct inode *,
struct inotify_watch **);
extern __s32 inotify_find_update_watch(struct inotify_handle *, struct inode *,
u32);
extern __s32 inotify_add_watch(struct inotify_handle *, struct inotify_watch *,
struct inode *, __u32);
extern __s32 inotify_clone_watch(struct inotify_watch *, struct inotify_watch *);
extern void inotify_evict_watch(struct inotify_watch *);
extern int inotify_rm_watch(struct inotify_handle *, struct inotify_watch *);
extern int inotify_rm_wd(struct inotify_handle *, __u32);
extern void inotify_remove_watch_locked(struct inotify_handle *,
struct inotify_watch *);
extern void get_inotify_watch(struct inotify_watch *);
extern void put_inotify_watch(struct inotify_watch *);
extern int pin_inotify_watch(struct inotify_watch *);
extern void unpin_inotify_watch(struct inotify_watch *);
#else
static inline void inotify_d_instantiate(struct dentry *dentry,
struct inode *inode)
{
}
static inline void inotify_d_move(struct dentry *dentry)
{
}
static inline void inotify_inode_queue_event(struct inode *inode,
__u32 mask, __u32 cookie,
const char *filename,
struct inode *n_inode)
{
}
static inline void inotify_dentry_parent_queue_event(struct dentry *dentry,
__u32 mask, __u32 cookie,
const char *filename)
{
}
static inline void inotify_unmount_inodes(struct list_head *list)
{
}
static inline void inotify_inode_is_dead(struct inode *inode)
{
}
static inline u32 inotify_get_cookie(void)
{
return 0;
}
static inline struct inotify_handle *inotify_init(const struct inotify_operations *ops)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void inotify_init_watch(struct inotify_watch *watch)
{
}
static inline void inotify_destroy(struct inotify_handle *ih)
{
}
static inline __s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
struct inotify_watch **watchp)
{
return -EOPNOTSUPP;
}
static inline __s32 inotify_find_update_watch(struct inotify_handle *ih,
struct inode *inode, u32 mask)
{
return -EOPNOTSUPP;
}
static inline __s32 inotify_add_watch(struct inotify_handle *ih,
struct inotify_watch *watch,
struct inode *inode, __u32 mask)
{
return -EOPNOTSUPP;
}
static inline int inotify_rm_watch(struct inotify_handle *ih,
struct inotify_watch *watch)
{
return -EOPNOTSUPP;
}
static inline int inotify_rm_wd(struct inotify_handle *ih, __u32 wd)
{
return -EOPNOTSUPP;
}
static inline void inotify_remove_watch_locked(struct inotify_handle *ih,
struct inotify_watch *watch)
{
}
static inline void get_inotify_watch(struct inotify_watch *watch)
{
}
static inline void put_inotify_watch(struct inotify_watch *watch)
{
}
extern inline int pin_inotify_watch(struct inotify_watch *watch)
{
return 0;
}
extern inline void unpin_inotify_watch(struct inotify_watch *watch)
{
}
#endif /* CONFIG_INOTIFY */
#endif /* __KERNEL __ */
#endif /* _LINUX_INOTIFY_H */ #endif /* _LINUX_INOTIFY_H */

Просмотреть файл

@ -65,7 +65,6 @@
#include <linux/binfmts.h> #include <linux/binfmts.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/inotify.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>