audit: reimplement audit_trees using fsnotify rather than inotify
Simply switch audit_trees from using inotify to using fsnotify for it's inode pinning and disappearing act information. Signed-off-by: Eric Paris <eparis@redhat.com>
This commit is contained in:
Родитель
40554c3dae
Коммит
28a3a7eb3b
|
@ -62,8 +62,9 @@
|
||||||
|
|
||||||
/* listeners that hard code group numbers near the top */
|
/* listeners that hard code group numbers near the top */
|
||||||
#define DNOTIFY_GROUP_NUM UINT_MAX
|
#define DNOTIFY_GROUP_NUM UINT_MAX
|
||||||
#define AUDIT_WATCH_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
|
#define AUDIT_WATCH_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
|
||||||
#define INOTIFY_GROUP_NUM (AUDIT_WATCH_GROUP_NUM-1)
|
#define AUDIT_TREE_GROUP_NUM (AUDIT_WATCH_GROUP_NUM-1)
|
||||||
|
#define INOTIFY_GROUP_NUM (AUDIT_TREE_GROUP_NUM-1)
|
||||||
|
|
||||||
struct fsnotify_group;
|
struct fsnotify_group;
|
||||||
struct fsnotify_event;
|
struct fsnotify_event;
|
||||||
|
|
|
@ -326,7 +326,7 @@ config AUDITSYSCALL
|
||||||
config AUDIT_TREE
|
config AUDIT_TREE
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on AUDITSYSCALL
|
depends on AUDITSYSCALL
|
||||||
select INOTIFY
|
select FSNOTIFY
|
||||||
|
|
||||||
menu "RCU Subsystem"
|
menu "RCU Subsystem"
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#include "audit.h"
|
#include "audit.h"
|
||||||
#include <linux/inotify.h>
|
#include <linux/fsnotify_backend.h>
|
||||||
#include <linux/namei.h>
|
#include <linux/namei.h>
|
||||||
#include <linux/mount.h>
|
#include <linux/mount.h>
|
||||||
#include <linux/kthread.h>
|
#include <linux/kthread.h>
|
||||||
|
@ -22,7 +22,7 @@ struct audit_tree {
|
||||||
|
|
||||||
struct audit_chunk {
|
struct audit_chunk {
|
||||||
struct list_head hash;
|
struct list_head hash;
|
||||||
struct inotify_watch watch;
|
struct fsnotify_mark_entry mark;
|
||||||
struct list_head trees; /* with root here */
|
struct list_head trees; /* with root here */
|
||||||
int dead;
|
int dead;
|
||||||
int count;
|
int count;
|
||||||
|
@ -59,7 +59,7 @@ static LIST_HEAD(prune_list);
|
||||||
* tree is refcounted; one reference for "some rules on rules_list refer to
|
* tree is refcounted; one reference for "some rules on rules_list refer to
|
||||||
* it", one for each chunk with pointer to it.
|
* it", one for each chunk with pointer to it.
|
||||||
*
|
*
|
||||||
* chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
|
* chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
|
||||||
* of watch contributes 1 to .refs).
|
* of watch contributes 1 to .refs).
|
||||||
*
|
*
|
||||||
* node.index allows to get from node.list to containing chunk.
|
* node.index allows to get from node.list to containing chunk.
|
||||||
|
@ -68,7 +68,7 @@ static LIST_HEAD(prune_list);
|
||||||
* that makes a difference. Some.
|
* that makes a difference. Some.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static struct inotify_handle *rtree_ih;
|
static struct fsnotify_group *audit_tree_group;
|
||||||
|
|
||||||
static struct audit_tree *alloc_tree(const char *s)
|
static struct audit_tree *alloc_tree(const char *s)
|
||||||
{
|
{
|
||||||
|
@ -111,29 +111,6 @@ const char *audit_tree_path(struct audit_tree *tree)
|
||||||
return tree->pathname;
|
return tree->pathname;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct audit_chunk *alloc_chunk(int count)
|
|
||||||
{
|
|
||||||
struct audit_chunk *chunk;
|
|
||||||
size_t size;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
|
|
||||||
chunk = kzalloc(size, GFP_KERNEL);
|
|
||||||
if (!chunk)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&chunk->hash);
|
|
||||||
INIT_LIST_HEAD(&chunk->trees);
|
|
||||||
chunk->count = count;
|
|
||||||
atomic_long_set(&chunk->refs, 1);
|
|
||||||
for (i = 0; i < count; i++) {
|
|
||||||
INIT_LIST_HEAD(&chunk->owners[i].list);
|
|
||||||
chunk->owners[i].index = i;
|
|
||||||
}
|
|
||||||
inotify_init_watch(&chunk->watch);
|
|
||||||
return chunk;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void free_chunk(struct audit_chunk *chunk)
|
static void free_chunk(struct audit_chunk *chunk)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -157,6 +134,35 @@ static void __put_chunk(struct rcu_head *rcu)
|
||||||
audit_put_chunk(chunk);
|
audit_put_chunk(chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void audit_tree_destroy_watch(struct fsnotify_mark_entry *entry)
|
||||||
|
{
|
||||||
|
struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
|
||||||
|
call_rcu(&chunk->head, __put_chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct audit_chunk *alloc_chunk(int count)
|
||||||
|
{
|
||||||
|
struct audit_chunk *chunk;
|
||||||
|
size_t size;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
|
||||||
|
chunk = kzalloc(size, GFP_KERNEL);
|
||||||
|
if (!chunk)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&chunk->hash);
|
||||||
|
INIT_LIST_HEAD(&chunk->trees);
|
||||||
|
chunk->count = count;
|
||||||
|
atomic_long_set(&chunk->refs, 1);
|
||||||
|
for (i = 0; i < count; i++) {
|
||||||
|
INIT_LIST_HEAD(&chunk->owners[i].list);
|
||||||
|
chunk->owners[i].index = i;
|
||||||
|
}
|
||||||
|
fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
|
||||||
|
return chunk;
|
||||||
|
}
|
||||||
|
|
||||||
enum {HASH_SIZE = 128};
|
enum {HASH_SIZE = 128};
|
||||||
static struct list_head chunk_hash_heads[HASH_SIZE];
|
static struct list_head chunk_hash_heads[HASH_SIZE];
|
||||||
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
|
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
|
||||||
|
@ -167,10 +173,15 @@ static inline struct list_head *chunk_hash(const struct inode *inode)
|
||||||
return chunk_hash_heads + n % HASH_SIZE;
|
return chunk_hash_heads + n % HASH_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* hash_lock is held by caller */
|
/* hash_lock & entry->lock is held by caller */
|
||||||
static void insert_hash(struct audit_chunk *chunk)
|
static void insert_hash(struct audit_chunk *chunk)
|
||||||
{
|
{
|
||||||
struct list_head *list = chunk_hash(chunk->watch.inode);
|
struct fsnotify_mark_entry *entry = &chunk->mark;
|
||||||
|
struct list_head *list;
|
||||||
|
|
||||||
|
if (!entry->inode)
|
||||||
|
return;
|
||||||
|
list = chunk_hash(entry->inode);
|
||||||
list_add_rcu(&chunk->hash, list);
|
list_add_rcu(&chunk->hash, list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,7 +192,8 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
|
||||||
struct audit_chunk *p;
|
struct audit_chunk *p;
|
||||||
|
|
||||||
list_for_each_entry_rcu(p, list, hash) {
|
list_for_each_entry_rcu(p, list, hash) {
|
||||||
if (p->watch.inode == inode) {
|
/* mark.inode may have gone NULL, but who cares? */
|
||||||
|
if (p->mark.inode == inode) {
|
||||||
atomic_long_inc(&p->refs);
|
atomic_long_inc(&p->refs);
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
@ -210,38 +222,19 @@ static struct audit_chunk *find_chunk(struct node *p)
|
||||||
static void untag_chunk(struct node *p)
|
static void untag_chunk(struct node *p)
|
||||||
{
|
{
|
||||||
struct audit_chunk *chunk = find_chunk(p);
|
struct audit_chunk *chunk = find_chunk(p);
|
||||||
|
struct fsnotify_mark_entry *entry = &chunk->mark;
|
||||||
struct audit_chunk *new;
|
struct audit_chunk *new;
|
||||||
struct audit_tree *owner;
|
struct audit_tree *owner;
|
||||||
int size = chunk->count - 1;
|
int size = chunk->count - 1;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
if (!pin_inotify_watch(&chunk->watch)) {
|
fsnotify_get_mark(entry);
|
||||||
/*
|
|
||||||
* Filesystem is shutting down; all watches are getting
|
|
||||||
* evicted, just take it off the node list for this
|
|
||||||
* tree and let the eviction logics take care of the
|
|
||||||
* rest.
|
|
||||||
*/
|
|
||||||
owner = p->owner;
|
|
||||||
if (owner->root == chunk) {
|
|
||||||
list_del_init(&owner->same_root);
|
|
||||||
owner->root = NULL;
|
|
||||||
}
|
|
||||||
list_del_init(&p->list);
|
|
||||||
p->owner = NULL;
|
|
||||||
put_tree(owner);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock(&hash_lock);
|
spin_unlock(&hash_lock);
|
||||||
|
|
||||||
/*
|
spin_lock(&entry->lock);
|
||||||
* pin_inotify_watch() succeeded, so the watch won't go away
|
if (chunk->dead || !entry->inode) {
|
||||||
* from under us.
|
spin_unlock(&entry->lock);
|
||||||
*/
|
|
||||||
mutex_lock(&chunk->watch.inode->inotify_mutex);
|
|
||||||
if (chunk->dead) {
|
|
||||||
mutex_unlock(&chunk->watch.inode->inotify_mutex);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,16 +249,17 @@ static void untag_chunk(struct node *p)
|
||||||
list_del_init(&p->list);
|
list_del_init(&p->list);
|
||||||
list_del_rcu(&chunk->hash);
|
list_del_rcu(&chunk->hash);
|
||||||
spin_unlock(&hash_lock);
|
spin_unlock(&hash_lock);
|
||||||
inotify_evict_watch(&chunk->watch);
|
spin_unlock(&entry->lock);
|
||||||
mutex_unlock(&chunk->watch.inode->inotify_mutex);
|
fsnotify_destroy_mark_by_entry(entry);
|
||||||
put_inotify_watch(&chunk->watch);
|
fsnotify_put_mark(entry);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
new = alloc_chunk(size);
|
new = alloc_chunk(size);
|
||||||
if (!new)
|
if (!new)
|
||||||
goto Fallback;
|
goto Fallback;
|
||||||
if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
|
fsnotify_duplicate_mark(&new->mark, entry);
|
||||||
|
if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, 1)) {
|
||||||
free_chunk(new);
|
free_chunk(new);
|
||||||
goto Fallback;
|
goto Fallback;
|
||||||
}
|
}
|
||||||
|
@ -298,9 +292,9 @@ static void untag_chunk(struct node *p)
|
||||||
list_for_each_entry(owner, &new->trees, same_root)
|
list_for_each_entry(owner, &new->trees, same_root)
|
||||||
owner->root = new;
|
owner->root = new;
|
||||||
spin_unlock(&hash_lock);
|
spin_unlock(&hash_lock);
|
||||||
inotify_evict_watch(&chunk->watch);
|
spin_unlock(&entry->lock);
|
||||||
mutex_unlock(&chunk->watch.inode->inotify_mutex);
|
fsnotify_destroy_mark_by_entry(entry);
|
||||||
put_inotify_watch(&chunk->watch);
|
fsnotify_put_mark(entry);
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
Fallback:
|
Fallback:
|
||||||
|
@ -314,31 +308,33 @@ Fallback:
|
||||||
p->owner = NULL;
|
p->owner = NULL;
|
||||||
put_tree(owner);
|
put_tree(owner);
|
||||||
spin_unlock(&hash_lock);
|
spin_unlock(&hash_lock);
|
||||||
mutex_unlock(&chunk->watch.inode->inotify_mutex);
|
spin_unlock(&entry->lock);
|
||||||
out:
|
out:
|
||||||
unpin_inotify_watch(&chunk->watch);
|
fsnotify_put_mark(entry);
|
||||||
spin_lock(&hash_lock);
|
spin_lock(&hash_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int create_chunk(struct inode *inode, struct audit_tree *tree)
|
static int create_chunk(struct inode *inode, struct audit_tree *tree)
|
||||||
{
|
{
|
||||||
|
struct fsnotify_mark_entry *entry;
|
||||||
struct audit_chunk *chunk = alloc_chunk(1);
|
struct audit_chunk *chunk = alloc_chunk(1);
|
||||||
if (!chunk)
|
if (!chunk)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
|
entry = &chunk->mark;
|
||||||
|
if (fsnotify_add_mark(entry, audit_tree_group, inode, 0)) {
|
||||||
free_chunk(chunk);
|
free_chunk(chunk);
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&inode->inotify_mutex);
|
spin_lock(&entry->lock);
|
||||||
spin_lock(&hash_lock);
|
spin_lock(&hash_lock);
|
||||||
if (tree->goner) {
|
if (tree->goner) {
|
||||||
spin_unlock(&hash_lock);
|
spin_unlock(&hash_lock);
|
||||||
chunk->dead = 1;
|
chunk->dead = 1;
|
||||||
inotify_evict_watch(&chunk->watch);
|
spin_unlock(&entry->lock);
|
||||||
mutex_unlock(&inode->inotify_mutex);
|
fsnotify_destroy_mark_by_entry(entry);
|
||||||
put_inotify_watch(&chunk->watch);
|
fsnotify_put_mark(entry);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
chunk->owners[0].index = (1U << 31);
|
chunk->owners[0].index = (1U << 31);
|
||||||
|
@ -351,30 +347,33 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
|
||||||
}
|
}
|
||||||
insert_hash(chunk);
|
insert_hash(chunk);
|
||||||
spin_unlock(&hash_lock);
|
spin_unlock(&hash_lock);
|
||||||
mutex_unlock(&inode->inotify_mutex);
|
spin_unlock(&entry->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* the first tagged inode becomes root of tree */
|
/* the first tagged inode becomes root of tree */
|
||||||
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
||||||
{
|
{
|
||||||
struct inotify_watch *watch;
|
struct fsnotify_mark_entry *old_entry, *chunk_entry;
|
||||||
struct audit_tree *owner;
|
struct audit_tree *owner;
|
||||||
struct audit_chunk *chunk, *old;
|
struct audit_chunk *chunk, *old;
|
||||||
struct node *p;
|
struct node *p;
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
|
spin_lock(&inode->i_lock);
|
||||||
|
old_entry = fsnotify_find_mark_entry(audit_tree_group, inode);
|
||||||
|
spin_unlock(&inode->i_lock);
|
||||||
|
if (!old_entry)
|
||||||
return create_chunk(inode, tree);
|
return create_chunk(inode, tree);
|
||||||
|
|
||||||
old = container_of(watch, struct audit_chunk, watch);
|
old = container_of(old_entry, struct audit_chunk, mark);
|
||||||
|
|
||||||
/* are we already there? */
|
/* are we already there? */
|
||||||
spin_lock(&hash_lock);
|
spin_lock(&hash_lock);
|
||||||
for (n = 0; n < old->count; n++) {
|
for (n = 0; n < old->count; n++) {
|
||||||
if (old->owners[n].owner == tree) {
|
if (old->owners[n].owner == tree) {
|
||||||
spin_unlock(&hash_lock);
|
spin_unlock(&hash_lock);
|
||||||
put_inotify_watch(&old->watch);
|
fsnotify_put_mark(old_entry);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -382,25 +381,44 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
||||||
|
|
||||||
chunk = alloc_chunk(old->count + 1);
|
chunk = alloc_chunk(old->count + 1);
|
||||||
if (!chunk) {
|
if (!chunk) {
|
||||||
put_inotify_watch(&old->watch);
|
fsnotify_put_mark(old_entry);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&inode->inotify_mutex);
|
chunk_entry = &chunk->mark;
|
||||||
if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
|
|
||||||
mutex_unlock(&inode->inotify_mutex);
|
spin_lock(&old_entry->lock);
|
||||||
put_inotify_watch(&old->watch);
|
if (!old_entry->inode) {
|
||||||
|
/* old_entry is being shot, lets just lie */
|
||||||
|
spin_unlock(&old_entry->lock);
|
||||||
|
fsnotify_put_mark(old_entry);
|
||||||
free_chunk(chunk);
|
free_chunk(chunk);
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
fsnotify_duplicate_mark(chunk_entry, old_entry);
|
||||||
|
if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, 1)) {
|
||||||
|
spin_unlock(&old_entry->lock);
|
||||||
|
free_chunk(chunk);
|
||||||
|
fsnotify_put_mark(old_entry);
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
|
||||||
|
spin_lock(&chunk_entry->lock);
|
||||||
spin_lock(&hash_lock);
|
spin_lock(&hash_lock);
|
||||||
|
|
||||||
|
/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
|
||||||
if (tree->goner) {
|
if (tree->goner) {
|
||||||
spin_unlock(&hash_lock);
|
spin_unlock(&hash_lock);
|
||||||
chunk->dead = 1;
|
chunk->dead = 1;
|
||||||
inotify_evict_watch(&chunk->watch);
|
spin_unlock(&chunk_entry->lock);
|
||||||
mutex_unlock(&inode->inotify_mutex);
|
spin_unlock(&old_entry->lock);
|
||||||
put_inotify_watch(&old->watch);
|
|
||||||
put_inotify_watch(&chunk->watch);
|
fsnotify_destroy_mark_by_entry(chunk_entry);
|
||||||
|
|
||||||
|
fsnotify_put_mark(chunk_entry);
|
||||||
|
fsnotify_put_mark(old_entry);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
list_replace_init(&old->trees, &chunk->trees);
|
list_replace_init(&old->trees, &chunk->trees);
|
||||||
|
@ -426,10 +444,11 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
||||||
list_add(&tree->same_root, &chunk->trees);
|
list_add(&tree->same_root, &chunk->trees);
|
||||||
}
|
}
|
||||||
spin_unlock(&hash_lock);
|
spin_unlock(&hash_lock);
|
||||||
inotify_evict_watch(&old->watch);
|
spin_unlock(&chunk_entry->lock);
|
||||||
mutex_unlock(&inode->inotify_mutex);
|
spin_unlock(&old_entry->lock);
|
||||||
put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
|
fsnotify_destroy_mark_by_entry(old_entry);
|
||||||
put_inotify_watch(&old->watch); /* and kill it */
|
fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
|
||||||
|
fsnotify_put_mark(old_entry); /* and kill it */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -584,7 +603,9 @@ void audit_trim_trees(void)
|
||||||
|
|
||||||
spin_lock(&hash_lock);
|
spin_lock(&hash_lock);
|
||||||
list_for_each_entry(node, &tree->chunks, list) {
|
list_for_each_entry(node, &tree->chunks, list) {
|
||||||
struct inode *inode = find_chunk(node)->watch.inode;
|
struct audit_chunk *chunk = find_chunk(node);
|
||||||
|
/* this could be NULL if the watch is dieing else where... */
|
||||||
|
struct inode *inode = chunk->mark.inode;
|
||||||
node->index |= 1U<<31;
|
node->index |= 1U<<31;
|
||||||
if (iterate_mounts(compare_root, inode, root_mnt))
|
if (iterate_mounts(compare_root, inode, root_mnt))
|
||||||
node->index &= ~(1U<<31);
|
node->index &= ~(1U<<31);
|
||||||
|
@ -846,7 +867,6 @@ void audit_kill_trees(struct list_head *list)
|
||||||
* Here comes the stuff asynchronous to auditctl operations
|
* Here comes the stuff asynchronous to auditctl operations
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* inode->inotify_mutex is locked */
|
|
||||||
static void evict_chunk(struct audit_chunk *chunk)
|
static void evict_chunk(struct audit_chunk *chunk)
|
||||||
{
|
{
|
||||||
struct audit_tree *owner;
|
struct audit_tree *owner;
|
||||||
|
@ -885,35 +905,41 @@ static void evict_chunk(struct audit_chunk *chunk)
|
||||||
mutex_unlock(&audit_filter_mutex);
|
mutex_unlock(&audit_filter_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
|
static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
|
||||||
u32 cookie, const char *dname, struct inode *inode)
|
|
||||||
{
|
{
|
||||||
struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
|
BUG();
|
||||||
|
return -EOPNOTSUPP;
|
||||||
if (mask & IN_IGNORED) {
|
|
||||||
evict_chunk(chunk);
|
|
||||||
put_inotify_watch(watch);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void destroy_watch(struct inotify_watch *watch)
|
static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
|
||||||
{
|
{
|
||||||
struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
|
struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
|
||||||
call_rcu(&chunk->head, __put_chunk);
|
|
||||||
|
evict_chunk(chunk);
|
||||||
|
fsnotify_put_mark(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct inotify_operations rtree_inotify_ops = {
|
static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
|
||||||
.handle_event = handle_event,
|
{
|
||||||
.destroy_watch = destroy_watch,
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct fsnotify_ops audit_tree_ops = {
|
||||||
|
.handle_event = audit_tree_handle_event,
|
||||||
|
.should_send_event = audit_tree_send_event,
|
||||||
|
.free_group_priv = NULL,
|
||||||
|
.free_event_priv = NULL,
|
||||||
|
.freeing_mark = audit_tree_freeing_mark,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init audit_tree_init(void)
|
static int __init audit_tree_init(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
rtree_ih = inotify_init(&rtree_inotify_ops);
|
audit_tree_group = fsnotify_obtain_group(AUDIT_TREE_GROUP_NUM,
|
||||||
if (IS_ERR(rtree_ih))
|
0, &audit_tree_ops);
|
||||||
audit_panic("cannot initialize inotify handle for rectree watches");
|
if (IS_ERR(audit_tree_group))
|
||||||
|
audit_panic("cannot initialize fsnotify group for rectree watches");
|
||||||
|
|
||||||
for (i = 0; i < HASH_SIZE; i++)
|
for (i = 0; i < HASH_SIZE; i++)
|
||||||
INIT_LIST_HEAD(&chunk_hash_heads[i]);
|
INIT_LIST_HEAD(&chunk_hash_heads[i]);
|
||||||
|
|
|
@ -1725,7 +1725,7 @@ static inline void handle_one(const struct inode *inode)
|
||||||
struct audit_tree_refs *p;
|
struct audit_tree_refs *p;
|
||||||
struct audit_chunk *chunk;
|
struct audit_chunk *chunk;
|
||||||
int count;
|
int count;
|
||||||
if (likely(list_empty(&inode->inotify_watches)))
|
if (likely(hlist_empty(&inode->i_fsnotify_mark_entries)))
|
||||||
return;
|
return;
|
||||||
context = current->audit_context;
|
context = current->audit_context;
|
||||||
p = context->trees;
|
p = context->trees;
|
||||||
|
@ -1768,7 +1768,7 @@ retry:
|
||||||
seq = read_seqbegin(&rename_lock);
|
seq = read_seqbegin(&rename_lock);
|
||||||
for(;;) {
|
for(;;) {
|
||||||
struct inode *inode = d->d_inode;
|
struct inode *inode = d->d_inode;
|
||||||
if (inode && unlikely(!list_empty(&inode->inotify_watches))) {
|
if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_mark_entries))) {
|
||||||
struct audit_chunk *chunk;
|
struct audit_chunk *chunk;
|
||||||
chunk = audit_tree_lookup(inode);
|
chunk = audit_tree_lookup(inode);
|
||||||
if (chunk) {
|
if (chunk) {
|
||||||
|
|
Загрузка…
Ссылка в новой задаче