^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include "audit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/fsnotify_backend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) struct audit_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct audit_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct audit_tree {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) refcount_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) int goner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct audit_chunk *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct list_head chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct list_head rules;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct list_head same_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct rcu_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) char pathname[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct audit_chunk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct list_head hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned long key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct fsnotify_mark *mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct list_head trees; /* with root here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) atomic_long_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct rcu_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct audit_tree *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned index; /* index; upper bit indicates 'will prune' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) } owners[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct audit_tree_mark {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct fsnotify_mark mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct audit_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static LIST_HEAD(tree_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static LIST_HEAD(prune_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static struct task_struct *prune_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * One struct chunk is attached to each inode of interest through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * untagging, the mark is stable as long as there is chunk attached. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * association between mark and chunk is protected by hash_lock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * audit_tree_group->mark_mutex. Thus as long as we hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * audit_tree_group->mark_mutex and check that the mark is alive by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * the current chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Rules have pointer to struct audit_tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Rules have struct list_head rlist forming a list of rules over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * the same tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * References to struct chunk are collected at audit_inode{,_child}()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * time and used in AUDIT_TREE rule matching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * These references are dropped at the same time we are calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * audit_free_names(), etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Cyclic lists galore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * tree.chunks anchors chunk.owners[].list hash_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * tree.rules anchors rule.rlist audit_filter_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * chunk.trees anchors tree.same_root hash_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * chunk.hash is a hash with middle bits of watch.inode as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * a hash function. RCU, hash_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * tree is refcounted; one reference for "some rules on rules_list refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * it", one for each chunk with pointer to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * one chunk reference. This reference is dropped either when a mark is going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * to be freed (corresponding inode goes away) or when chunk attached to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * mark gets replaced. This reference must be dropped using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * grace period as it protects RCU readers of the hash table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * node.index allows to get from node.list to containing chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * MSB of that sucker is stolen to mark taggings that we might have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * revert - several operations have very unpleasant cleanup logics and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * that makes a difference. Some.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static struct fsnotify_group *audit_tree_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static struct audit_tree *alloc_tree(const char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct audit_tree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) refcount_set(&tree->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) tree->goner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) INIT_LIST_HEAD(&tree->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) INIT_LIST_HEAD(&tree->rules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) INIT_LIST_HEAD(&tree->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) INIT_LIST_HEAD(&tree->same_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) tree->root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) strcpy(tree->pathname, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static inline void get_tree(struct audit_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) refcount_inc(&tree->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline void put_tree(struct audit_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (refcount_dec_and_test(&tree->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) kfree_rcu(tree, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* to avoid bringing the entire thing in audit.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) const char *audit_tree_path(struct audit_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return tree->pathname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void free_chunk(struct audit_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) for (i = 0; i < chunk->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (chunk->owners[i].owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) put_tree(chunk->owners[i].owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) kfree(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void audit_put_chunk(struct audit_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (atomic_long_dec_and_test(&chunk->refs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) free_chunk(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void __put_chunk(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) audit_put_chunk(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Drop reference to the chunk that was held by the mark. This is the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * that gets dropped after we've removed the chunk from the hash table and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * use it to make sure chunk cannot be freed before RCU grace period expires.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void audit_mark_put_chunk(struct audit_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) call_rcu(&chunk->head, __put_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return container_of(mark, struct audit_tree_mark, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return audit_mark(mark)->chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static struct fsnotify_mark *alloc_mark(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct audit_tree_mark *amark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (!amark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) fsnotify_init_mark(&amark->mark, audit_tree_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) amark->mark.mask = FS_IN_IGNORED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return &amark->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static struct audit_chunk *alloc_chunk(int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct audit_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) INIT_LIST_HEAD(&chunk->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) INIT_LIST_HEAD(&chunk->trees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) chunk->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) atomic_long_set(&chunk->refs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) INIT_LIST_HEAD(&chunk->owners[i].list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) chunk->owners[i].index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) enum {HASH_SIZE = 128};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static struct list_head chunk_hash_heads[HASH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Function to return search key in our hash from inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static unsigned long inode_to_key(const struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Use address pointed to by connector->obj as the key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return (unsigned long)&inode->i_fsnotify_marks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static inline struct list_head *chunk_hash(unsigned long key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned long n = key / L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return chunk_hash_heads + n % HASH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* hash_lock & mark->group->mark_mutex is held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void insert_hash(struct audit_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct list_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Make sure chunk is fully initialized before making it visible in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * hash. Pairs with a data dependency barrier in READ_ONCE() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * audit_tree_lookup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) WARN_ON_ONCE(!chunk->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) list = chunk_hash(chunk->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) list_add_rcu(&chunk->hash, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* called under rcu_read_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct audit_chunk *audit_tree_lookup(const struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned long key = inode_to_key(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct list_head *list = chunk_hash(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct audit_chunk *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) list_for_each_entry_rcu(p, list, hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * We use a data dependency barrier in READ_ONCE() to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * the chunk we see is fully initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (READ_ONCE(p->key) == key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) atomic_long_inc(&p->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) for (n = 0; n < chunk->count; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (chunk->owners[n].owner == tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* tagging and untagging inodes with trees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static struct audit_chunk *find_chunk(struct node *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int index = p->index & ~(1U<<31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) p -= index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return container_of(p, struct audit_chunk, owners[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void replace_mark_chunk(struct fsnotify_mark *mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct audit_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct audit_chunk *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) assert_spin_locked(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) old = mark_chunk(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) audit_mark(mark)->chunk = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) chunk->mark = mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) old->mark = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct audit_tree *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) new->key = old->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) list_splice_init(&old->trees, &new->trees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) list_for_each_entry(owner, &new->trees, same_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) owner->root = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) for (i = j = 0; j < old->count; i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!old->owners[j].owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) owner = old->owners[j].owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) new->owners[i].owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) new->owners[i].index = old->owners[j].index - j + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (!owner) /* result of earlier fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) get_tree(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) list_replace_init(&old->owners[j].list, &new->owners[i].list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) replace_mark_chunk(old->mark, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Make sure chunk is fully initialized before making it visible in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * hash. Pairs with a data dependency barrier in READ_ONCE() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * audit_tree_lookup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) list_replace_rcu(&old->hash, &new->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct audit_tree *owner = p->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (owner->root == chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) list_del_init(&owner->same_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) owner->root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) list_del_init(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) p->owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) put_tree(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static int chunk_count_trees(struct audit_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) for (i = 0; i < chunk->count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (chunk->owners[i].owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct audit_chunk *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mutex_lock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * mark_mutex stabilizes chunk attached to the mark so we can check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * whether it didn't change while we've dropped hash_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) mark_chunk(mark) != chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) size = chunk_count_trees(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) list_del_init(&chunk->trees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) list_del_rcu(&chunk->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) replace_mark_chunk(mark, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) fsnotify_detach_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) audit_mark_put_chunk(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) fsnotify_free_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) new = alloc_chunk(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * This has to go last when updating chunk as once replace_chunk() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * called, new RCU readers can see the new chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) replace_chunk(new, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) audit_mark_put_chunk(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Call with group->mark_mutex held, releases it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static int create_chunk(struct inode *inode, struct audit_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct fsnotify_mark *mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct audit_chunk *chunk = alloc_chunk(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) mark = alloc_mark();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!mark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) kfree(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) fsnotify_put_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) kfree(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (tree->goner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) fsnotify_detach_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) fsnotify_free_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) fsnotify_put_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) kfree(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) replace_mark_chunk(mark, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) chunk->owners[0].index = (1U << 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) chunk->owners[0].owner = tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) get_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) list_add(&chunk->owners[0].list, &tree->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!tree->root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) tree->root = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) list_add(&tree->same_root, &chunk->trees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) chunk->key = inode_to_key(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Inserting into the hash table has to go last as once we do that RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * readers can see the chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) insert_hash(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Drop our initial reference. When mark we point to is getting freed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * we get notification through ->freeing_mark callback and cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * chunk pointing to this mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) fsnotify_put_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* the first tagged inode becomes root of tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int tag_chunk(struct inode *inode, struct audit_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct fsnotify_mark *mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct audit_chunk *chunk, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) mutex_lock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return create_chunk(inode, tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * Found mark is guaranteed to be attached and mark_mutex protects mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * from getting detached and thus it makes sure there is chunk attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * to the mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* are we already there? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) old = mark_chunk(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) for (n = 0; n < old->count; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (old->owners[n].owner == tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) fsnotify_put_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) chunk = alloc_chunk(old->count + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) fsnotify_put_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (tree->goner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) fsnotify_put_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) kfree(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) p = &chunk->owners[chunk->count - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) p->index = (chunk->count - 1) | (1U<<31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) p->owner = tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) get_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) list_add(&p->list, &tree->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!tree->root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) tree->root = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) list_add(&tree->same_root, &chunk->trees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * This has to go last when updating chunk as once replace_chunk() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * called, new RCU readers can see the new chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) replace_chunk(chunk, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) mutex_unlock(&audit_tree_group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) audit_mark_put_chunk(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static void audit_tree_log_remove_rule(struct audit_context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct audit_krule *rule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct audit_buffer *ab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!audit_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (unlikely(!ab))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) audit_log_format(ab, "op=remove_rule dir=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) audit_log_untrustedstring(ab, rule->tree->pathname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) audit_log_key(ab, rule->filterkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) audit_log_format(ab, " list=%d res=1", rule->listnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) audit_log_end(ab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static void kill_rules(struct audit_context *context, struct audit_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct audit_krule *rule, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct audit_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) entry = container_of(rule, struct audit_entry, rule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) list_del_init(&rule->rlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (rule->tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* not a half-baked one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) audit_tree_log_remove_rule(context, rule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (entry->rule.exe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) audit_remove_mark(entry->rule.exe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) rule->tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) list_del_rcu(&entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) list_del(&entry->rule.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) call_rcu(&entry->rcu, audit_free_rule_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * chunks. The function expects tagged chunks are all at the beginning of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * chunks list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) while (!list_empty(&victim->chunks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct audit_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct fsnotify_mark *mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) p = list_first_entry(&victim->chunks, struct node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* have we run out of marked? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (tagged && !(p->index & (1U<<31)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) chunk = find_chunk(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) mark = chunk->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) remove_chunk_node(chunk, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* Racing with audit_tree_freeing_mark()? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) fsnotify_get_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) untag_chunk(chunk, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) fsnotify_put_mark(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * finish killing struct audit_tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static void prune_one(struct audit_tree *victim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) prune_tree_chunks(victim, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) put_tree(victim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /* trim the uncommitted chunks from tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static void trim_marked(struct audit_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct list_head *p, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (tree->goner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* reorder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) for (p = tree->chunks.next; p != &tree->chunks; p = q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct node *node = list_entry(p, struct node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) q = p->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (node->index & (1U<<31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) list_del_init(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) list_add(p, &tree->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) prune_tree_chunks(tree, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!tree->root && !tree->goner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) tree->goner = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) kill_rules(audit_context(), tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) list_del_init(&tree->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) prune_one(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static void audit_schedule_prune(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* called with audit_filter_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int audit_remove_tree_rule(struct audit_krule *rule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct audit_tree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) tree = rule->tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) list_del_init(&rule->rlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (list_empty(&tree->rules) && !tree->goner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) tree->root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) list_del_init(&tree->same_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) tree->goner = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) list_move(&tree->list, &prune_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) rule->tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) audit_schedule_prune();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) rule->tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static int compare_root(struct vfsmount *mnt, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) (unsigned long)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) void audit_trim_trees(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct list_head cursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) list_add(&cursor, &tree_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) while (cursor.next != &tree_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct audit_tree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct vfsmount *root_mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) tree = container_of(cursor.next, struct audit_tree, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) get_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) list_del(&cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) list_add(&cursor, &tree->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) err = kern_path(tree->pathname, 0, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) goto skip_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) root_mnt = collect_mounts(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (IS_ERR(root_mnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) goto skip_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) list_for_each_entry(node, &tree->chunks, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct audit_chunk *chunk = find_chunk(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* this could be NULL if the watch is dying else where... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) node->index |= 1U<<31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (iterate_mounts(compare_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) (void *)(chunk->key),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) root_mnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) node->index &= ~(1U<<31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) trim_marked(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) drop_collected_mounts(root_mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) skip_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) put_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) list_del(&cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (pathname[0] != '/' ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) rule->listnr != AUDIT_FILTER_EXIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) op != Audit_equal ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) rule->inode_f || rule->watch || rule->tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) rule->tree = alloc_tree(pathname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (!rule->tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) void audit_put_tree(struct audit_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) put_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static int tag_mount(struct vfsmount *mnt, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * That gets run when evict_chunk() ends up needing to kill audit_tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * Runs from a separate thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static int prune_tree_thread(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (list_empty(&prune_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) audit_ctl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) while (!list_empty(&prune_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct audit_tree *victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) victim = list_entry(prune_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct audit_tree, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) list_del_init(&victim->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) prune_one(victim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) audit_ctl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static int audit_launch_prune(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (prune_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) prune_thread = kthread_run(prune_tree_thread, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) "audit_prune_tree");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (IS_ERR(prune_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) pr_err("cannot start thread audit_prune_tree");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) prune_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* called with audit_filter_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) int audit_add_tree_rule(struct audit_krule *rule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct audit_tree *seed = rule->tree, *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct vfsmount *mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) rule->tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) list_for_each_entry(tree, &tree_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (!strcmp(seed->pathname, tree->pathname)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) put_tree(seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) rule->tree = tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) list_add(&rule->rlist, &tree->rules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) tree = seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) list_add(&tree->list, &tree_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) list_add(&rule->rlist, &tree->rules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* do not set rule->tree yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (unlikely(!prune_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) err = audit_launch_prune();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) goto Err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) err = kern_path(tree->pathname, 0, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) goto Err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) mnt = collect_mounts(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (IS_ERR(mnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) err = PTR_ERR(mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) goto Err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) get_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) err = iterate_mounts(tag_mount, tree, mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) drop_collected_mounts(mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) list_for_each_entry(node, &tree->chunks, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) node->index &= ~(1U<<31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) trim_marked(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) goto Err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (list_empty(&rule->rlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) put_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) rule->tree = tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) put_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) Err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) list_del_init(&tree->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) list_del_init(&tree->rules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) put_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int audit_tag_tree(char *old, char *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct list_head cursor, barrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int failed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct path path1, path2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct vfsmount *tagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) err = kern_path(new, 0, &path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) tagged = collect_mounts(&path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) path_put(&path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (IS_ERR(tagged))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return PTR_ERR(tagged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) err = kern_path(old, 0, &path1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) drop_collected_mounts(tagged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) list_add(&barrier, &tree_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) list_add(&cursor, &barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) while (cursor.next != &tree_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct audit_tree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) int good_one = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) tree = container_of(cursor.next, struct audit_tree, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) get_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) list_del(&cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) list_add(&cursor, &tree->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) err = kern_path(tree->pathname, 0, &path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) good_one = path_is_under(&path1, &path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) path_put(&path2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!good_one) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) put_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) failed = iterate_mounts(tag_mount, tree, tagged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) put_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (!tree->goner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) list_del(&tree->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) list_add(&tree->list, &tree_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) put_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) while (barrier.prev != &tree_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct audit_tree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) tree = container_of(barrier.prev, struct audit_tree, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) get_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) list_del(&tree->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) list_add(&tree->list, &barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (!failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) list_for_each_entry(node, &tree->chunks, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) node->index &= ~(1U<<31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) trim_marked(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) put_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) list_del(&barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) list_del(&cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) path_put(&path1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) drop_collected_mounts(tagged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static void audit_schedule_prune(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) wake_up_process(prune_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * ... and that one is done if evict_chunk() decides to delay until the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * of syscall. Runs synchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) void audit_kill_trees(struct audit_context *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct list_head *list = &context->killed_trees;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) audit_ctl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) while (!list_empty(list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct audit_tree *victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) victim = list_entry(list->next, struct audit_tree, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) kill_rules(context, victim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) list_del_init(&victim->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) prune_one(victim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) audit_ctl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * Here comes the stuff asynchronous to auditctl operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static void evict_chunk(struct audit_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct audit_tree *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct list_head *postponed = audit_killed_trees();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) int need_prune = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) mutex_lock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) while (!list_empty(&chunk->trees)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) owner = list_entry(chunk->trees.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct audit_tree, same_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) owner->goner = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) owner->root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) list_del_init(&owner->same_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (!postponed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) kill_rules(audit_context(), owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) list_move(&owner->list, &prune_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) need_prune = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) list_move(&owner->list, postponed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) list_del_rcu(&chunk->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) for (n = 0; n < chunk->count; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) list_del_init(&chunk->owners[n].list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) mutex_unlock(&audit_filter_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (need_prune)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) audit_schedule_prune();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct inode *inode, struct inode *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) const struct qstr *file_name, u32 cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct fsnotify_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct audit_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) mutex_lock(&mark->group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) spin_lock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) chunk = mark_chunk(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) replace_mark_chunk(mark, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) spin_unlock(&hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) mutex_unlock(&mark->group->mark_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) evict_chunk(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) audit_mark_put_chunk(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * We are guaranteed to have at least one reference to the mark from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * either the inode or the caller of fsnotify_destroy_mark().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) BUG_ON(refcount_read(&mark->refcnt) < 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) static const struct fsnotify_ops audit_tree_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) .handle_inode_event = audit_tree_handle_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) .freeing_mark = audit_tree_freeing_mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) .free_mark = audit_tree_destroy_watch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static int __init audit_tree_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (IS_ERR(audit_tree_group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) audit_panic("cannot initialize fsnotify group for rectree watches");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) for (i = 0; i < HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) INIT_LIST_HEAD(&chunk_hash_heads[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) __initcall(audit_tree_init);