Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify
* 'for-linus' of git://git.infradead.org/users/eparis/notify: (132 commits) fanotify: use both marks when possible fsnotify: pass both the vfsmount mark and inode mark fsnotify: walk the inode and vfsmount lists simultaneously fsnotify: rework ignored mark flushing fsnotify: remove global fsnotify groups lists fsnotify: remove group->mask fsnotify: remove the global masks fsnotify: cleanup should_send_event fanotify: use the mark in handler functions audit: use the mark in handler functions dnotify: use the mark in handler functions inotify: use the mark in handler functions fsnotify: send fsnotify_mark to groups in event handling functions fsnotify: Exchange list heads instead of moving elements fsnotify: srcu to protect read side of inode and vfsmount locks fsnotify: use an explicit flag to indicate fsnotify_destroy_mark has been called fsnotify: use _rcu functions for mark list traversal fsnotify: place marks on object in order of group memory address vfs/fsnotify: fsnotify_close can delay the final work in fput fsnotify: store struct file not struct path ... Fix up trivial delete/modify conflict in fs/notify/inotify/inotify.c.
This commit is contained in:
@@ -70,10 +70,11 @@ obj-$(CONFIG_IKCONFIG) += configs.o
|
||||
obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
|
||||
obj-$(CONFIG_SMP) += stop_machine.o
|
||||
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
|
||||
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o
|
||||
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
|
||||
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
|
||||
obj-$(CONFIG_GCOV_KERNEL) += gcov/
|
||||
obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o
|
||||
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
|
||||
obj-$(CONFIG_GCOV_KERNEL) += gcov/
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
obj-$(CONFIG_KGDB) += debug/
|
||||
obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
|
||||
|
@@ -56,7 +56,6 @@
|
||||
#include <net/netlink.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/inotify.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/tty.h>
|
||||
|
||||
|
@@ -103,21 +103,27 @@ extern struct mutex audit_filter_mutex;
|
||||
extern void audit_free_rule_rcu(struct rcu_head *);
|
||||
extern struct list_head audit_filter_list[];
|
||||
|
||||
extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
|
||||
|
||||
/* audit watch functions */
|
||||
extern unsigned long audit_watch_inode(struct audit_watch *watch);
|
||||
extern dev_t audit_watch_dev(struct audit_watch *watch);
|
||||
#ifdef CONFIG_AUDIT_WATCH
|
||||
extern void audit_put_watch(struct audit_watch *watch);
|
||||
extern void audit_get_watch(struct audit_watch *watch);
|
||||
extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op);
|
||||
extern int audit_add_watch(struct audit_krule *krule);
|
||||
extern void audit_remove_watch(struct audit_watch *watch);
|
||||
extern void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list);
|
||||
extern void audit_inotify_unregister(struct list_head *in_list);
|
||||
extern int audit_add_watch(struct audit_krule *krule, struct list_head **list);
|
||||
extern void audit_remove_watch_rule(struct audit_krule *krule);
|
||||
extern char *audit_watch_path(struct audit_watch *watch);
|
||||
extern struct list_head *audit_watch_rules(struct audit_watch *watch);
|
||||
extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev);
|
||||
#else
|
||||
#define audit_put_watch(w) {}
|
||||
#define audit_get_watch(w) {}
|
||||
#define audit_to_watch(k, p, l, o) (-EINVAL)
|
||||
#define audit_add_watch(k, l) (-EINVAL)
|
||||
#define audit_remove_watch_rule(k) BUG()
|
||||
#define audit_watch_path(w) ""
|
||||
#define audit_watch_compare(w, i, d) 0
|
||||
|
||||
extern struct audit_entry *audit_dupe_rule(struct audit_krule *old,
|
||||
struct audit_watch *watch);
|
||||
#endif /* CONFIG_AUDIT_WATCH */
|
||||
|
||||
#ifdef CONFIG_AUDIT_TREE
|
||||
extern struct audit_chunk *audit_tree_lookup(const struct inode *);
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#include "audit.h"
|
||||
#include <linux/inotify.h>
|
||||
#include <linux/fsnotify_backend.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/kthread.h>
|
||||
@@ -22,7 +22,7 @@ struct audit_tree {
|
||||
|
||||
struct audit_chunk {
|
||||
struct list_head hash;
|
||||
struct inotify_watch watch;
|
||||
struct fsnotify_mark mark;
|
||||
struct list_head trees; /* with root here */
|
||||
int dead;
|
||||
int count;
|
||||
@@ -59,7 +59,7 @@ static LIST_HEAD(prune_list);
|
||||
* tree is refcounted; one reference for "some rules on rules_list refer to
|
||||
* it", one for each chunk with pointer to it.
|
||||
*
|
||||
* chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
|
||||
* chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
|
||||
* of watch contributes 1 to .refs).
|
||||
*
|
||||
* node.index allows to get from node.list to containing chunk.
|
||||
@@ -68,7 +68,7 @@ static LIST_HEAD(prune_list);
|
||||
* that makes a difference. Some.
|
||||
*/
|
||||
|
||||
static struct inotify_handle *rtree_ih;
|
||||
static struct fsnotify_group *audit_tree_group;
|
||||
|
||||
static struct audit_tree *alloc_tree(const char *s)
|
||||
{
|
||||
@@ -111,29 +111,6 @@ const char *audit_tree_path(struct audit_tree *tree)
|
||||
return tree->pathname;
|
||||
}
|
||||
|
||||
static struct audit_chunk *alloc_chunk(int count)
|
||||
{
|
||||
struct audit_chunk *chunk;
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
|
||||
chunk = kzalloc(size, GFP_KERNEL);
|
||||
if (!chunk)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&chunk->hash);
|
||||
INIT_LIST_HEAD(&chunk->trees);
|
||||
chunk->count = count;
|
||||
atomic_long_set(&chunk->refs, 1);
|
||||
for (i = 0; i < count; i++) {
|
||||
INIT_LIST_HEAD(&chunk->owners[i].list);
|
||||
chunk->owners[i].index = i;
|
||||
}
|
||||
inotify_init_watch(&chunk->watch);
|
||||
return chunk;
|
||||
}
|
||||
|
||||
static void free_chunk(struct audit_chunk *chunk)
|
||||
{
|
||||
int i;
|
||||
@@ -157,6 +134,35 @@ static void __put_chunk(struct rcu_head *rcu)
|
||||
audit_put_chunk(chunk);
|
||||
}
|
||||
|
||||
static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
|
||||
{
|
||||
struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
|
||||
call_rcu(&chunk->head, __put_chunk);
|
||||
}
|
||||
|
||||
static struct audit_chunk *alloc_chunk(int count)
|
||||
{
|
||||
struct audit_chunk *chunk;
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
|
||||
chunk = kzalloc(size, GFP_KERNEL);
|
||||
if (!chunk)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&chunk->hash);
|
||||
INIT_LIST_HEAD(&chunk->trees);
|
||||
chunk->count = count;
|
||||
atomic_long_set(&chunk->refs, 1);
|
||||
for (i = 0; i < count; i++) {
|
||||
INIT_LIST_HEAD(&chunk->owners[i].list);
|
||||
chunk->owners[i].index = i;
|
||||
}
|
||||
fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
|
||||
return chunk;
|
||||
}
|
||||
|
||||
enum {HASH_SIZE = 128};
|
||||
static struct list_head chunk_hash_heads[HASH_SIZE];
|
||||
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
|
||||
@@ -167,10 +173,15 @@ static inline struct list_head *chunk_hash(const struct inode *inode)
|
||||
return chunk_hash_heads + n % HASH_SIZE;
|
||||
}
|
||||
|
||||
/* hash_lock is held by caller */
|
||||
/* hash_lock & entry->lock is held by caller */
|
||||
static void insert_hash(struct audit_chunk *chunk)
|
||||
{
|
||||
struct list_head *list = chunk_hash(chunk->watch.inode);
|
||||
struct fsnotify_mark *entry = &chunk->mark;
|
||||
struct list_head *list;
|
||||
|
||||
if (!entry->i.inode)
|
||||
return;
|
||||
list = chunk_hash(entry->i.inode);
|
||||
list_add_rcu(&chunk->hash, list);
|
||||
}
|
||||
|
||||
@@ -181,7 +192,8 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
|
||||
struct audit_chunk *p;
|
||||
|
||||
list_for_each_entry_rcu(p, list, hash) {
|
||||
if (p->watch.inode == inode) {
|
||||
/* mark.inode may have gone NULL, but who cares? */
|
||||
if (p->mark.i.inode == inode) {
|
||||
atomic_long_inc(&p->refs);
|
||||
return p;
|
||||
}
|
||||
@@ -210,38 +222,19 @@ static struct audit_chunk *find_chunk(struct node *p)
|
||||
static void untag_chunk(struct node *p)
|
||||
{
|
||||
struct audit_chunk *chunk = find_chunk(p);
|
||||
struct fsnotify_mark *entry = &chunk->mark;
|
||||
struct audit_chunk *new;
|
||||
struct audit_tree *owner;
|
||||
int size = chunk->count - 1;
|
||||
int i, j;
|
||||
|
||||
if (!pin_inotify_watch(&chunk->watch)) {
|
||||
/*
|
||||
* Filesystem is shutting down; all watches are getting
|
||||
* evicted, just take it off the node list for this
|
||||
* tree and let the eviction logics take care of the
|
||||
* rest.
|
||||
*/
|
||||
owner = p->owner;
|
||||
if (owner->root == chunk) {
|
||||
list_del_init(&owner->same_root);
|
||||
owner->root = NULL;
|
||||
}
|
||||
list_del_init(&p->list);
|
||||
p->owner = NULL;
|
||||
put_tree(owner);
|
||||
return;
|
||||
}
|
||||
fsnotify_get_mark(entry);
|
||||
|
||||
spin_unlock(&hash_lock);
|
||||
|
||||
/*
|
||||
* pin_inotify_watch() succeeded, so the watch won't go away
|
||||
* from under us.
|
||||
*/
|
||||
mutex_lock(&chunk->watch.inode->inotify_mutex);
|
||||
if (chunk->dead) {
|
||||
mutex_unlock(&chunk->watch.inode->inotify_mutex);
|
||||
spin_lock(&entry->lock);
|
||||
if (chunk->dead || !entry->i.inode) {
|
||||
spin_unlock(&entry->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -256,16 +249,17 @@ static void untag_chunk(struct node *p)
|
||||
list_del_init(&p->list);
|
||||
list_del_rcu(&chunk->hash);
|
||||
spin_unlock(&hash_lock);
|
||||
inotify_evict_watch(&chunk->watch);
|
||||
mutex_unlock(&chunk->watch.inode->inotify_mutex);
|
||||
put_inotify_watch(&chunk->watch);
|
||||
spin_unlock(&entry->lock);
|
||||
fsnotify_destroy_mark(entry);
|
||||
fsnotify_put_mark(entry);
|
||||
goto out;
|
||||
}
|
||||
|
||||
new = alloc_chunk(size);
|
||||
if (!new)
|
||||
goto Fallback;
|
||||
if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
|
||||
fsnotify_duplicate_mark(&new->mark, entry);
|
||||
if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
|
||||
free_chunk(new);
|
||||
goto Fallback;
|
||||
}
|
||||
@@ -298,9 +292,9 @@ static void untag_chunk(struct node *p)
|
||||
list_for_each_entry(owner, &new->trees, same_root)
|
||||
owner->root = new;
|
||||
spin_unlock(&hash_lock);
|
||||
inotify_evict_watch(&chunk->watch);
|
||||
mutex_unlock(&chunk->watch.inode->inotify_mutex);
|
||||
put_inotify_watch(&chunk->watch);
|
||||
spin_unlock(&entry->lock);
|
||||
fsnotify_destroy_mark(entry);
|
||||
fsnotify_put_mark(entry);
|
||||
goto out;
|
||||
|
||||
Fallback:
|
||||
@@ -314,31 +308,33 @@ Fallback:
|
||||
p->owner = NULL;
|
||||
put_tree(owner);
|
||||
spin_unlock(&hash_lock);
|
||||
mutex_unlock(&chunk->watch.inode->inotify_mutex);
|
||||
spin_unlock(&entry->lock);
|
||||
out:
|
||||
unpin_inotify_watch(&chunk->watch);
|
||||
fsnotify_put_mark(entry);
|
||||
spin_lock(&hash_lock);
|
||||
}
|
||||
|
||||
static int create_chunk(struct inode *inode, struct audit_tree *tree)
|
||||
{
|
||||
struct fsnotify_mark *entry;
|
||||
struct audit_chunk *chunk = alloc_chunk(1);
|
||||
if (!chunk)
|
||||
return -ENOMEM;
|
||||
|
||||
if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
|
||||
entry = &chunk->mark;
|
||||
if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
|
||||
free_chunk(chunk);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->inotify_mutex);
|
||||
spin_lock(&entry->lock);
|
||||
spin_lock(&hash_lock);
|
||||
if (tree->goner) {
|
||||
spin_unlock(&hash_lock);
|
||||
chunk->dead = 1;
|
||||
inotify_evict_watch(&chunk->watch);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
put_inotify_watch(&chunk->watch);
|
||||
spin_unlock(&entry->lock);
|
||||
fsnotify_destroy_mark(entry);
|
||||
fsnotify_put_mark(entry);
|
||||
return 0;
|
||||
}
|
||||
chunk->owners[0].index = (1U << 31);
|
||||
@@ -351,30 +347,31 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
|
||||
}
|
||||
insert_hash(chunk);
|
||||
spin_unlock(&hash_lock);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
spin_unlock(&entry->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* the first tagged inode becomes root of tree */
|
||||
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
||||
{
|
||||
struct inotify_watch *watch;
|
||||
struct fsnotify_mark *old_entry, *chunk_entry;
|
||||
struct audit_tree *owner;
|
||||
struct audit_chunk *chunk, *old;
|
||||
struct node *p;
|
||||
int n;
|
||||
|
||||
if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
|
||||
old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
|
||||
if (!old_entry)
|
||||
return create_chunk(inode, tree);
|
||||
|
||||
old = container_of(watch, struct audit_chunk, watch);
|
||||
old = container_of(old_entry, struct audit_chunk, mark);
|
||||
|
||||
/* are we already there? */
|
||||
spin_lock(&hash_lock);
|
||||
for (n = 0; n < old->count; n++) {
|
||||
if (old->owners[n].owner == tree) {
|
||||
spin_unlock(&hash_lock);
|
||||
put_inotify_watch(&old->watch);
|
||||
fsnotify_put_mark(old_entry);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -382,25 +379,44 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
||||
|
||||
chunk = alloc_chunk(old->count + 1);
|
||||
if (!chunk) {
|
||||
put_inotify_watch(&old->watch);
|
||||
fsnotify_put_mark(old_entry);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->inotify_mutex);
|
||||
if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
put_inotify_watch(&old->watch);
|
||||
chunk_entry = &chunk->mark;
|
||||
|
||||
spin_lock(&old_entry->lock);
|
||||
if (!old_entry->i.inode) {
|
||||
/* old_entry is being shot, lets just lie */
|
||||
spin_unlock(&old_entry->lock);
|
||||
fsnotify_put_mark(old_entry);
|
||||
free_chunk(chunk);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
fsnotify_duplicate_mark(chunk_entry, old_entry);
|
||||
if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
|
||||
spin_unlock(&old_entry->lock);
|
||||
free_chunk(chunk);
|
||||
fsnotify_put_mark(old_entry);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
|
||||
spin_lock(&chunk_entry->lock);
|
||||
spin_lock(&hash_lock);
|
||||
|
||||
/* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
|
||||
if (tree->goner) {
|
||||
spin_unlock(&hash_lock);
|
||||
chunk->dead = 1;
|
||||
inotify_evict_watch(&chunk->watch);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
put_inotify_watch(&old->watch);
|
||||
put_inotify_watch(&chunk->watch);
|
||||
spin_unlock(&chunk_entry->lock);
|
||||
spin_unlock(&old_entry->lock);
|
||||
|
||||
fsnotify_destroy_mark(chunk_entry);
|
||||
|
||||
fsnotify_put_mark(chunk_entry);
|
||||
fsnotify_put_mark(old_entry);
|
||||
return 0;
|
||||
}
|
||||
list_replace_init(&old->trees, &chunk->trees);
|
||||
@@ -426,10 +442,11 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
|
||||
list_add(&tree->same_root, &chunk->trees);
|
||||
}
|
||||
spin_unlock(&hash_lock);
|
||||
inotify_evict_watch(&old->watch);
|
||||
mutex_unlock(&inode->inotify_mutex);
|
||||
put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
|
||||
put_inotify_watch(&old->watch); /* and kill it */
|
||||
spin_unlock(&chunk_entry->lock);
|
||||
spin_unlock(&old_entry->lock);
|
||||
fsnotify_destroy_mark(old_entry);
|
||||
fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
|
||||
fsnotify_put_mark(old_entry); /* and kill it */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -584,7 +601,9 @@ void audit_trim_trees(void)
|
||||
|
||||
spin_lock(&hash_lock);
|
||||
list_for_each_entry(node, &tree->chunks, list) {
|
||||
struct inode *inode = find_chunk(node)->watch.inode;
|
||||
struct audit_chunk *chunk = find_chunk(node);
|
||||
/* this could be NULL if the watch is dieing else where... */
|
||||
struct inode *inode = chunk->mark.i.inode;
|
||||
node->index |= 1U<<31;
|
||||
if (iterate_mounts(compare_root, inode, root_mnt))
|
||||
node->index &= ~(1U<<31);
|
||||
@@ -846,7 +865,6 @@ void audit_kill_trees(struct list_head *list)
|
||||
* Here comes the stuff asynchronous to auditctl operations
|
||||
*/
|
||||
|
||||
/* inode->inotify_mutex is locked */
|
||||
static void evict_chunk(struct audit_chunk *chunk)
|
||||
{
|
||||
struct audit_tree *owner;
|
||||
@@ -885,35 +903,46 @@ static void evict_chunk(struct audit_chunk *chunk)
|
||||
mutex_unlock(&audit_filter_mutex);
|
||||
}
|
||||
|
||||
static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
|
||||
u32 cookie, const char *dname, struct inode *inode)
|
||||
static int audit_tree_handle_event(struct fsnotify_group *group,
|
||||
struct fsnotify_mark *inode_mark,
|
||||
struct fsnotify_mark *vfsmonut_mark,
|
||||
struct fsnotify_event *event)
|
||||
{
|
||||
struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
|
||||
|
||||
if (mask & IN_IGNORED) {
|
||||
evict_chunk(chunk);
|
||||
put_inotify_watch(watch);
|
||||
}
|
||||
BUG();
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void destroy_watch(struct inotify_watch *watch)
|
||||
static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
|
||||
{
|
||||
struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
|
||||
call_rcu(&chunk->head, __put_chunk);
|
||||
struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
|
||||
|
||||
evict_chunk(chunk);
|
||||
fsnotify_put_mark(entry);
|
||||
}
|
||||
|
||||
static const struct inotify_operations rtree_inotify_ops = {
|
||||
.handle_event = handle_event,
|
||||
.destroy_watch = destroy_watch,
|
||||
static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
|
||||
struct fsnotify_mark *inode_mark,
|
||||
struct fsnotify_mark *vfsmount_mark,
|
||||
__u32 mask, void *data, int data_type)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct fsnotify_ops audit_tree_ops = {
|
||||
.handle_event = audit_tree_handle_event,
|
||||
.should_send_event = audit_tree_send_event,
|
||||
.free_group_priv = NULL,
|
||||
.free_event_priv = NULL,
|
||||
.freeing_mark = audit_tree_freeing_mark,
|
||||
};
|
||||
|
||||
static int __init audit_tree_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
rtree_ih = inotify_init(&rtree_inotify_ops);
|
||||
if (IS_ERR(rtree_ih))
|
||||
audit_panic("cannot initialize inotify handle for rectree watches");
|
||||
audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
|
||||
if (IS_ERR(audit_tree_group))
|
||||
audit_panic("cannot initialize fsnotify group for rectree watches");
|
||||
|
||||
for (i = 0; i < HASH_SIZE; i++)
|
||||
INIT_LIST_HEAD(&chunk_hash_heads[i]);
|
||||
|
@@ -24,18 +24,18 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fsnotify_backend.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/inotify.h>
|
||||
#include <linux/security.h>
|
||||
#include "audit.h"
|
||||
|
||||
/*
|
||||
* Reference counting:
|
||||
*
|
||||
* audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED
|
||||
* audit_parent: lifetime is from audit_init_parent() to receipt of an FS_IGNORED
|
||||
* event. Each audit_watch holds a reference to its associated parent.
|
||||
*
|
||||
* audit_watch: if added to lists, lifetime is from audit_init_watch() to
|
||||
@@ -51,40 +51,61 @@ struct audit_watch {
|
||||
unsigned long ino; /* associated inode number */
|
||||
struct audit_parent *parent; /* associated parent */
|
||||
struct list_head wlist; /* entry in parent->watches list */
|
||||
struct list_head rules; /* associated rules */
|
||||
struct list_head rules; /* anchor for krule->rlist */
|
||||
};
|
||||
|
||||
struct audit_parent {
|
||||
struct list_head ilist; /* entry in inotify registration list */
|
||||
struct list_head watches; /* associated watches */
|
||||
struct inotify_watch wdata; /* inotify watch data */
|
||||
unsigned flags; /* status flags */
|
||||
struct list_head watches; /* anchor for audit_watch->wlist */
|
||||
struct fsnotify_mark mark; /* fsnotify mark on the inode */
|
||||
};
|
||||
|
||||
/* Inotify handle. */
|
||||
struct inotify_handle *audit_ih;
|
||||
/* fsnotify handle. */
|
||||
struct fsnotify_group *audit_watch_group;
|
||||
|
||||
/*
|
||||
* audit_parent status flags:
|
||||
*
|
||||
* AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to
|
||||
* a filesystem event to ensure we're adding audit watches to a valid parent.
|
||||
* Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot
|
||||
* receive them while we have nameidata, but must be used for IN_MOVE_SELF which
|
||||
* we can receive while holding nameidata.
|
||||
*/
|
||||
#define AUDIT_PARENT_INVALID 0x001
|
||||
/* fsnotify events we care about. */
|
||||
#define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\
|
||||
FS_MOVE_SELF | FS_EVENT_ON_CHILD)
|
||||
|
||||
/* Inotify events we care about. */
|
||||
#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF
|
||||
static void audit_free_parent(struct audit_parent *parent)
|
||||
{
|
||||
WARN_ON(!list_empty(&parent->watches));
|
||||
kfree(parent);
|
||||
}
|
||||
|
||||
static void audit_free_parent(struct inotify_watch *i_watch)
|
||||
static void audit_watch_free_mark(struct fsnotify_mark *entry)
|
||||
{
|
||||
struct audit_parent *parent;
|
||||
|
||||
parent = container_of(i_watch, struct audit_parent, wdata);
|
||||
WARN_ON(!list_empty(&parent->watches));
|
||||
kfree(parent);
|
||||
parent = container_of(entry, struct audit_parent, mark);
|
||||
audit_free_parent(parent);
|
||||
}
|
||||
|
||||
static void audit_get_parent(struct audit_parent *parent)
|
||||
{
|
||||
if (likely(parent))
|
||||
fsnotify_get_mark(&parent->mark);
|
||||
}
|
||||
|
||||
static void audit_put_parent(struct audit_parent *parent)
|
||||
{
|
||||
if (likely(parent))
|
||||
fsnotify_put_mark(&parent->mark);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find and return the audit_parent on the given inode. If found a reference
|
||||
* is taken on this parent.
|
||||
*/
|
||||
static inline struct audit_parent *audit_find_parent(struct inode *inode)
|
||||
{
|
||||
struct audit_parent *parent = NULL;
|
||||
struct fsnotify_mark *entry;
|
||||
|
||||
entry = fsnotify_find_inode_mark(audit_watch_group, inode);
|
||||
if (entry)
|
||||
parent = container_of(entry, struct audit_parent, mark);
|
||||
|
||||
return parent;
|
||||
}
|
||||
|
||||
void audit_get_watch(struct audit_watch *watch)
|
||||
@@ -105,7 +126,7 @@ void audit_put_watch(struct audit_watch *watch)
|
||||
void audit_remove_watch(struct audit_watch *watch)
|
||||
{
|
||||
list_del(&watch->wlist);
|
||||
put_inotify_watch(&watch->parent->wdata);
|
||||
audit_put_parent(watch->parent);
|
||||
watch->parent = NULL;
|
||||
audit_put_watch(watch); /* match initial get */
|
||||
}
|
||||
@@ -115,42 +136,32 @@ char *audit_watch_path(struct audit_watch *watch)
|
||||
return watch->path;
|
||||
}
|
||||
|
||||
struct list_head *audit_watch_rules(struct audit_watch *watch)
|
||||
int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev)
|
||||
{
|
||||
return &watch->rules;
|
||||
}
|
||||
|
||||
unsigned long audit_watch_inode(struct audit_watch *watch)
|
||||
{
|
||||
return watch->ino;
|
||||
}
|
||||
|
||||
dev_t audit_watch_dev(struct audit_watch *watch)
|
||||
{
|
||||
return watch->dev;
|
||||
return (watch->ino != (unsigned long)-1) &&
|
||||
(watch->ino == ino) &&
|
||||
(watch->dev == dev);
|
||||
}
|
||||
|
||||
/* Initialize a parent watch entry. */
|
||||
static struct audit_parent *audit_init_parent(struct nameidata *ndp)
|
||||
{
|
||||
struct inode *inode = ndp->path.dentry->d_inode;
|
||||
struct audit_parent *parent;
|
||||
s32 wd;
|
||||
int ret;
|
||||
|
||||
parent = kzalloc(sizeof(*parent), GFP_KERNEL);
|
||||
if (unlikely(!parent))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&parent->watches);
|
||||
parent->flags = 0;
|
||||
|
||||
inotify_init_watch(&parent->wdata);
|
||||
/* grab a ref so inotify watch hangs around until we take audit_filter_mutex */
|
||||
get_inotify_watch(&parent->wdata);
|
||||
wd = inotify_add_watch(audit_ih, &parent->wdata,
|
||||
ndp->path.dentry->d_inode, AUDIT_IN_WATCH);
|
||||
if (wd < 0) {
|
||||
audit_free_parent(&parent->wdata);
|
||||
return ERR_PTR(wd);
|
||||
fsnotify_init_mark(&parent->mark, audit_watch_free_mark);
|
||||
parent->mark.mask = AUDIT_FS_WATCH;
|
||||
ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode, NULL, 0);
|
||||
if (ret < 0) {
|
||||
audit_free_parent(parent);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return parent;
|
||||
@@ -179,7 +190,7 @@ int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op)
|
||||
{
|
||||
struct audit_watch *watch;
|
||||
|
||||
if (!audit_ih)
|
||||
if (!audit_watch_group)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (path[0] != '/' || path[len-1] == '/' ||
|
||||
@@ -217,7 +228,7 @@ static struct audit_watch *audit_dupe_watch(struct audit_watch *old)
|
||||
|
||||
new->dev = old->dev;
|
||||
new->ino = old->ino;
|
||||
get_inotify_watch(&old->parent->wdata);
|
||||
audit_get_parent(old->parent);
|
||||
new->parent = old->parent;
|
||||
|
||||
out:
|
||||
@@ -251,15 +262,19 @@ static void audit_update_watch(struct audit_parent *parent,
|
||||
struct audit_entry *oentry, *nentry;
|
||||
|
||||
mutex_lock(&audit_filter_mutex);
|
||||
/* Run all of the watches on this parent looking for the one that
|
||||
* matches the given dname */
|
||||
list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) {
|
||||
if (audit_compare_dname_path(dname, owatch->path, NULL))
|
||||
continue;
|
||||
|
||||
/* If the update involves invalidating rules, do the inode-based
|
||||
* filtering now, so we don't omit records. */
|
||||
if (invalidating && current->audit_context)
|
||||
if (invalidating && !audit_dummy_context())
|
||||
audit_filter_inodes(current, current->audit_context);
|
||||
|
||||
/* updating ino will likely change which audit_hash_list we
|
||||
* are on so we need a new watch for the new list */
|
||||
nwatch = audit_dupe_watch(owatch);
|
||||
if (IS_ERR(nwatch)) {
|
||||
mutex_unlock(&audit_filter_mutex);
|
||||
@@ -275,12 +290,21 @@ static void audit_update_watch(struct audit_parent *parent,
|
||||
list_del(&oentry->rule.rlist);
|
||||
list_del_rcu(&oentry->list);
|
||||
|
||||
nentry = audit_dupe_rule(&oentry->rule, nwatch);
|
||||
nentry = audit_dupe_rule(&oentry->rule);
|
||||
if (IS_ERR(nentry)) {
|
||||
list_del(&oentry->rule.list);
|
||||
audit_panic("error updating watch, removing");
|
||||
} else {
|
||||
int h = audit_hash_ino((u32)ino);
|
||||
|
||||
/*
|
||||
* nentry->rule.watch == oentry->rule.watch so
|
||||
* we must drop that reference and set it to our
|
||||
* new watch.
|
||||
*/
|
||||
audit_put_watch(nentry->rule.watch);
|
||||
audit_get_watch(nwatch);
|
||||
nentry->rule.watch = nwatch;
|
||||
list_add(&nentry->rule.rlist, &nwatch->rules);
|
||||
list_add_rcu(&nentry->list, &audit_inode_hash[h]);
|
||||
list_replace(&oentry->rule.list,
|
||||
@@ -312,7 +336,6 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
|
||||
struct audit_entry *e;
|
||||
|
||||
mutex_lock(&audit_filter_mutex);
|
||||
parent->flags |= AUDIT_PARENT_INVALID;
|
||||
list_for_each_entry_safe(w, nextw, &parent->watches, wlist) {
|
||||
list_for_each_entry_safe(r, nextr, &w->rules, rlist) {
|
||||
e = container_of(r, struct audit_entry, rule);
|
||||
@@ -325,20 +348,8 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
|
||||
audit_remove_watch(w);
|
||||
}
|
||||
mutex_unlock(&audit_filter_mutex);
|
||||
}
|
||||
|
||||
/* Unregister inotify watches for parents on in_list.
|
||||
* Generates an IN_IGNORED event. */
|
||||
void audit_inotify_unregister(struct list_head *in_list)
|
||||
{
|
||||
struct audit_parent *p, *n;
|
||||
|
||||
list_for_each_entry_safe(p, n, in_list, ilist) {
|
||||
list_del(&p->ilist);
|
||||
inotify_rm_watch(audit_ih, &p->wdata);
|
||||
/* the unpin matching the pin in audit_do_del_rule() */
|
||||
unpin_inotify_watch(&p->wdata);
|
||||
}
|
||||
fsnotify_destroy_mark(&parent->mark);
|
||||
}
|
||||
|
||||
/* Get path information necessary for adding watches. */
|
||||
@@ -389,7 +400,7 @@ static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw)
|
||||
}
|
||||
}
|
||||
|
||||
/* Associate the given rule with an existing parent inotify_watch.
|
||||
/* Associate the given rule with an existing parent.
|
||||
* Caller must hold audit_filter_mutex. */
|
||||
static void audit_add_to_parent(struct audit_krule *krule,
|
||||
struct audit_parent *parent)
|
||||
@@ -397,6 +408,8 @@ static void audit_add_to_parent(struct audit_krule *krule,
|
||||
struct audit_watch *w, *watch = krule->watch;
|
||||
int watch_found = 0;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&audit_filter_mutex));
|
||||
|
||||
list_for_each_entry(w, &parent->watches, wlist) {
|
||||
if (strcmp(watch->path, w->path))
|
||||
continue;
|
||||
@@ -413,7 +426,7 @@ static void audit_add_to_parent(struct audit_krule *krule,
|
||||
}
|
||||
|
||||
if (!watch_found) {
|
||||
get_inotify_watch(&parent->wdata);
|
||||
audit_get_parent(parent);
|
||||
watch->parent = parent;
|
||||
|
||||
list_add(&watch->wlist, &parent->watches);
|
||||
@@ -423,13 +436,12 @@ static void audit_add_to_parent(struct audit_krule *krule,
|
||||
|
||||
/* Find a matching watch entry, or add this one.
|
||||
* Caller must hold audit_filter_mutex. */
|
||||
int audit_add_watch(struct audit_krule *krule)
|
||||
int audit_add_watch(struct audit_krule *krule, struct list_head **list)
|
||||
{
|
||||
struct audit_watch *watch = krule->watch;
|
||||
struct inotify_watch *i_watch;
|
||||
struct audit_parent *parent;
|
||||
struct nameidata *ndp = NULL, *ndw = NULL;
|
||||
int ret = 0;
|
||||
int h, ret = 0;
|
||||
|
||||
mutex_unlock(&audit_filter_mutex);
|
||||
|
||||
@@ -441,47 +453,38 @@ int audit_add_watch(struct audit_krule *krule)
|
||||
goto error;
|
||||
}
|
||||
|
||||
mutex_lock(&audit_filter_mutex);
|
||||
|
||||
/* update watch filter fields */
|
||||
if (ndw) {
|
||||
watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev;
|
||||
watch->ino = ndw->path.dentry->d_inode->i_ino;
|
||||
}
|
||||
|
||||
/* The audit_filter_mutex must not be held during inotify calls because
|
||||
* we hold it during inotify event callback processing. If an existing
|
||||
* inotify watch is found, inotify_find_watch() grabs a reference before
|
||||
* returning.
|
||||
*/
|
||||
if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode,
|
||||
&i_watch) < 0) {
|
||||
/* either find an old parent or attach a new one */
|
||||
parent = audit_find_parent(ndp->path.dentry->d_inode);
|
||||
if (!parent) {
|
||||
parent = audit_init_parent(ndp);
|
||||
if (IS_ERR(parent)) {
|
||||
/* caller expects mutex locked */
|
||||
mutex_lock(&audit_filter_mutex);
|
||||
ret = PTR_ERR(parent);
|
||||
goto error;
|
||||
}
|
||||
} else
|
||||
parent = container_of(i_watch, struct audit_parent, wdata);
|
||||
}
|
||||
|
||||
mutex_lock(&audit_filter_mutex);
|
||||
audit_add_to_parent(krule, parent);
|
||||
|
||||
/* parent was moved before we took audit_filter_mutex */
|
||||
if (parent->flags & AUDIT_PARENT_INVALID)
|
||||
ret = -ENOENT;
|
||||
else
|
||||
audit_add_to_parent(krule, parent);
|
||||
|
||||
/* match get in audit_init_parent or inotify_find_watch */
|
||||
put_inotify_watch(&parent->wdata);
|
||||
/* match get in audit_find_parent or audit_init_parent */
|
||||
audit_put_parent(parent);
|
||||
|
||||
h = audit_hash_ino((u32)watch->ino);
|
||||
*list = &audit_inode_hash[h];
|
||||
error:
|
||||
audit_put_nd(ndp, ndw); /* NULL args OK */
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
|
||||
void audit_remove_watch_rule(struct audit_krule *krule)
|
||||
{
|
||||
struct audit_watch *watch = krule->watch;
|
||||
struct audit_parent *parent = watch->parent;
|
||||
@@ -492,53 +495,74 @@ void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
|
||||
audit_remove_watch(watch);
|
||||
|
||||
if (list_empty(&parent->watches)) {
|
||||
/* Put parent on the inotify un-registration
|
||||
* list. Grab a reference before releasing
|
||||
* audit_filter_mutex, to be released in
|
||||
* audit_inotify_unregister().
|
||||
* If filesystem is going away, just leave
|
||||
* the sucker alone, eviction will take
|
||||
* care of it. */
|
||||
if (pin_inotify_watch(&parent->wdata))
|
||||
list_add(&parent->ilist, list);
|
||||
audit_get_parent(parent);
|
||||
fsnotify_destroy_mark(&parent->mark);
|
||||
audit_put_parent(parent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Update watch data in audit rules based on inotify events. */
|
||||
static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask,
|
||||
u32 cookie, const char *dname, struct inode *inode)
|
||||
static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode,
|
||||
struct fsnotify_mark *inode_mark,
|
||||
struct fsnotify_mark *vfsmount_mark,
|
||||
__u32 mask, void *data, int data_type)
|
||||
{
|
||||
struct audit_parent *parent;
|
||||
|
||||
parent = container_of(i_watch, struct audit_parent, wdata);
|
||||
|
||||
if (mask & (IN_CREATE|IN_MOVED_TO) && inode)
|
||||
audit_update_watch(parent, dname, inode->i_sb->s_dev,
|
||||
inode->i_ino, 0);
|
||||
else if (mask & (IN_DELETE|IN_MOVED_FROM))
|
||||
audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1);
|
||||
/* inotify automatically removes the watch and sends IN_IGNORED */
|
||||
else if (mask & (IN_DELETE_SELF|IN_UNMOUNT))
|
||||
audit_remove_parent_watches(parent);
|
||||
/* inotify does not remove the watch, so remove it manually */
|
||||
else if(mask & IN_MOVE_SELF) {
|
||||
audit_remove_parent_watches(parent);
|
||||
inotify_remove_watch_locked(audit_ih, i_watch);
|
||||
} else if (mask & IN_IGNORED)
|
||||
put_inotify_watch(i_watch);
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct inotify_operations audit_inotify_ops = {
|
||||
.handle_event = audit_handle_ievent,
|
||||
.destroy_watch = audit_free_parent,
|
||||
/* Update watch data in audit rules based on fsnotify events. */
|
||||
static int audit_watch_handle_event(struct fsnotify_group *group,
|
||||
struct fsnotify_mark *inode_mark,
|
||||
struct fsnotify_mark *vfsmount_mark,
|
||||
struct fsnotify_event *event)
|
||||
{
|
||||
struct inode *inode;
|
||||
__u32 mask = event->mask;
|
||||
const char *dname = event->file_name;
|
||||
struct audit_parent *parent;
|
||||
|
||||
parent = container_of(inode_mark, struct audit_parent, mark);
|
||||
|
||||
BUG_ON(group != audit_watch_group);
|
||||
|
||||
switch (event->data_type) {
|
||||
case (FSNOTIFY_EVENT_FILE):
|
||||
inode = event->file->f_path.dentry->d_inode;
|
||||
break;
|
||||
case (FSNOTIFY_EVENT_INODE):
|
||||
inode = event->inode;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
inode = NULL;
|
||||
break;
|
||||
};
|
||||
|
||||
if (mask & (FS_CREATE|FS_MOVED_TO) && inode)
|
||||
audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0);
|
||||
else if (mask & (FS_DELETE|FS_MOVED_FROM))
|
||||
audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1);
|
||||
else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF))
|
||||
audit_remove_parent_watches(parent);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct fsnotify_ops audit_watch_fsnotify_ops = {
|
||||
.should_send_event = audit_watch_should_send_event,
|
||||
.handle_event = audit_watch_handle_event,
|
||||
.free_group_priv = NULL,
|
||||
.freeing_mark = NULL,
|
||||
.free_event_priv = NULL,
|
||||
};
|
||||
|
||||
static int __init audit_watch_init(void)
|
||||
{
|
||||
audit_ih = inotify_init(&audit_inotify_ops);
|
||||
if (IS_ERR(audit_ih))
|
||||
audit_panic("cannot initialize inotify handle");
|
||||
audit_watch_group = fsnotify_alloc_group(&audit_watch_fsnotify_ops);
|
||||
if (IS_ERR(audit_watch_group)) {
|
||||
audit_watch_group = NULL;
|
||||
audit_panic("cannot create audit fsnotify group");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(audit_watch_init);
|
||||
device_initcall(audit_watch_init);
|
||||
|
@@ -71,6 +71,7 @@ static inline void audit_free_rule(struct audit_entry *e)
|
||||
{
|
||||
int i;
|
||||
struct audit_krule *erule = &e->rule;
|
||||
|
||||
/* some rules don't have associated watches */
|
||||
if (erule->watch)
|
||||
audit_put_watch(erule->watch);
|
||||
@@ -746,8 +747,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
|
||||
* rule with the new rule in the filterlist, then free the old rule.
|
||||
* The rlist element is undefined; list manipulations are handled apart from
|
||||
* the initial copy. */
|
||||
struct audit_entry *audit_dupe_rule(struct audit_krule *old,
|
||||
struct audit_watch *watch)
|
||||
struct audit_entry *audit_dupe_rule(struct audit_krule *old)
|
||||
{
|
||||
u32 fcount = old->field_count;
|
||||
struct audit_entry *entry;
|
||||
@@ -769,8 +769,8 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
|
||||
new->prio = old->prio;
|
||||
new->buflen = old->buflen;
|
||||
new->inode_f = old->inode_f;
|
||||
new->watch = NULL;
|
||||
new->field_count = old->field_count;
|
||||
|
||||
/*
|
||||
* note that we are OK with not refcounting here; audit_match_tree()
|
||||
* never dereferences tree and we can't get false positives there
|
||||
@@ -811,9 +811,9 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
|
||||
}
|
||||
}
|
||||
|
||||
if (watch) {
|
||||
audit_get_watch(watch);
|
||||
new->watch = watch;
|
||||
if (old->watch) {
|
||||
audit_get_watch(old->watch);
|
||||
new->watch = old->watch;
|
||||
}
|
||||
|
||||
return entry;
|
||||
@@ -866,7 +866,7 @@ static inline int audit_add_rule(struct audit_entry *entry)
|
||||
struct audit_watch *watch = entry->rule.watch;
|
||||
struct audit_tree *tree = entry->rule.tree;
|
||||
struct list_head *list;
|
||||
int h, err;
|
||||
int err;
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
int dont_count = 0;
|
||||
|
||||
@@ -889,15 +889,11 @@ static inline int audit_add_rule(struct audit_entry *entry)
|
||||
|
||||
if (watch) {
|
||||
/* audit_filter_mutex is dropped and re-taken during this call */
|
||||
err = audit_add_watch(&entry->rule);
|
||||
err = audit_add_watch(&entry->rule, &list);
|
||||
if (err) {
|
||||
mutex_unlock(&audit_filter_mutex);
|
||||
goto error;
|
||||
}
|
||||
/* entry->rule.watch may have changed during audit_add_watch() */
|
||||
watch = entry->rule.watch;
|
||||
h = audit_hash_ino((u32)audit_watch_inode(watch));
|
||||
list = &audit_inode_hash[h];
|
||||
}
|
||||
if (tree) {
|
||||
err = audit_add_tree_rule(&entry->rule);
|
||||
@@ -949,7 +945,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
|
||||
struct audit_watch *watch = entry->rule.watch;
|
||||
struct audit_tree *tree = entry->rule.tree;
|
||||
struct list_head *list;
|
||||
LIST_HEAD(inotify_list);
|
||||
int ret = 0;
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
int dont_count = 0;
|
||||
@@ -969,7 +964,7 @@ static inline int audit_del_rule(struct audit_entry *entry)
|
||||
}
|
||||
|
||||
if (e->rule.watch)
|
||||
audit_remove_watch_rule(&e->rule, &inotify_list);
|
||||
audit_remove_watch_rule(&e->rule);
|
||||
|
||||
if (e->rule.tree)
|
||||
audit_remove_tree_rule(&e->rule);
|
||||
@@ -987,9 +982,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
|
||||
#endif
|
||||
mutex_unlock(&audit_filter_mutex);
|
||||
|
||||
if (!list_empty(&inotify_list))
|
||||
audit_inotify_unregister(&inotify_list);
|
||||
|
||||
out:
|
||||
if (watch)
|
||||
audit_put_watch(watch); /* match initial get */
|
||||
@@ -1323,30 +1315,23 @@ static int update_lsm_rule(struct audit_krule *r)
|
||||
{
|
||||
struct audit_entry *entry = container_of(r, struct audit_entry, rule);
|
||||
struct audit_entry *nentry;
|
||||
struct audit_watch *watch;
|
||||
struct audit_tree *tree;
|
||||
int err = 0;
|
||||
|
||||
if (!security_audit_rule_known(r))
|
||||
return 0;
|
||||
|
||||
watch = r->watch;
|
||||
tree = r->tree;
|
||||
nentry = audit_dupe_rule(r, watch);
|
||||
nentry = audit_dupe_rule(r);
|
||||
if (IS_ERR(nentry)) {
|
||||
/* save the first error encountered for the
|
||||
* return value */
|
||||
err = PTR_ERR(nentry);
|
||||
audit_panic("error updating LSM filters");
|
||||
if (watch)
|
||||
if (r->watch)
|
||||
list_del(&r->rlist);
|
||||
list_del_rcu(&entry->list);
|
||||
list_del(&r->list);
|
||||
} else {
|
||||
if (watch) {
|
||||
list_add(&nentry->rule.rlist, audit_watch_rules(watch));
|
||||
list_del(&r->rlist);
|
||||
} else if (tree)
|
||||
if (r->watch || r->tree)
|
||||
list_replace_init(&r->rlist, &nentry->rule.rlist);
|
||||
list_replace_rcu(&entry->list, &nentry->list);
|
||||
list_replace(&r->list, &nentry->rule.list);
|
||||
|
@@ -65,7 +65,6 @@
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/inotify.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/fs_struct.h>
|
||||
|
||||
@@ -549,9 +548,8 @@ static int audit_filter_rules(struct task_struct *tsk,
|
||||
}
|
||||
break;
|
||||
case AUDIT_WATCH:
|
||||
if (name && audit_watch_inode(rule->watch) != (unsigned long)-1)
|
||||
result = (name->dev == audit_watch_dev(rule->watch) &&
|
||||
name->ino == audit_watch_inode(rule->watch));
|
||||
if (name)
|
||||
result = audit_watch_compare(rule->watch, name->ino, name->dev);
|
||||
break;
|
||||
case AUDIT_DIR:
|
||||
if (ctx)
|
||||
@@ -1726,7 +1724,7 @@ static inline void handle_one(const struct inode *inode)
|
||||
struct audit_tree_refs *p;
|
||||
struct audit_chunk *chunk;
|
||||
int count;
|
||||
if (likely(list_empty(&inode->inotify_watches)))
|
||||
if (likely(hlist_empty(&inode->i_fsnotify_marks)))
|
||||
return;
|
||||
context = current->audit_context;
|
||||
p = context->trees;
|
||||
@@ -1769,7 +1767,7 @@ retry:
|
||||
seq = read_seqbegin(&rename_lock);
|
||||
for(;;) {
|
||||
struct inode *inode = d->d_inode;
|
||||
if (inode && unlikely(!list_empty(&inode->inotify_watches))) {
|
||||
if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) {
|
||||
struct audit_chunk *chunk;
|
||||
chunk = audit_tree_lookup(inode);
|
||||
if (chunk) {
|
||||
|
@@ -181,3 +181,7 @@ cond_syscall(sys_eventfd2);
|
||||
|
||||
/* performance counters: */
|
||||
cond_syscall(sys_perf_event_open);
|
||||
|
||||
/* fanotify! */
|
||||
cond_syscall(sys_fanotify_init);
|
||||
cond_syscall(sys_fanotify_mark);
|
||||
|
@@ -44,6 +44,7 @@
|
||||
#include <linux/times.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/dnotify.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/nfs_fs.h>
|
||||
@@ -131,6 +132,9 @@ static int min_percpu_pagelist_fract = 8;
|
||||
|
||||
static int ngroups_max = NGROUPS_MAX;
|
||||
|
||||
#ifdef CONFIG_INOTIFY_USER
|
||||
#include <linux/inotify.h>
|
||||
#endif
|
||||
#ifdef CONFIG_SPARC
|
||||
#include <asm/system.h>
|
||||
#endif
|
||||
@@ -207,9 +211,6 @@ static struct ctl_table fs_table[];
|
||||
static struct ctl_table debug_table[];
|
||||
static struct ctl_table dev_table[];
|
||||
extern struct ctl_table random_table[];
|
||||
#ifdef CONFIG_INOTIFY_USER
|
||||
extern struct ctl_table inotify_table[];
|
||||
#endif
|
||||
#ifdef CONFIG_EPOLL
|
||||
extern struct ctl_table epoll_table[];
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user