^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Basic idea behind the notification queue: An fsnotify group (like inotify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * sends the userspace notification about events asynchronously some time after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * the event happened. When inotify gets an event it will need to add that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * event to the group notify queue. Since a single event might need to be on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * multiple group's notification queues we can't add the event directly to each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * queue and instead add a small "event_holder" to each queue. This event_holder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * has a pointer back to the original event. Since the majority of events are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * going to end up on one, and only one, notification queue we embed one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * event_holder into each event. This means we have a single allocation instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * of always needing two. If the embedded event_holder is already in use by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * another group a new event_holder (from fsnotify_event_holder_cachep) will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * allocated and used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/path.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/fsnotify_backend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "fsnotify.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Called from fsnotify_move, which is inlined into filesystem modules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 fsnotify_get_cookie(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return atomic_inc_return(&fsnotify_sync_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* return true if the notify queue is empty, false otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) assert_spin_locked(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return list_empty(&group->notification_list) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void fsnotify_destroy_event(struct fsnotify_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct fsnotify_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Overflow events are per-group and we don't want to free them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!event || event == group->overflow_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * If the event is still queued, we have a problem... Do an unreliable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * lockless check first to avoid locking in the common case. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * locking may be necessary for permission events which got removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * from the list by a different CPU than the one freeing the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (!list_empty(&event->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spin_lock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) WARN_ON(!list_empty(&event->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) spin_unlock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) group->ops->free_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Add an event to the group notification queue. The group can later pull this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * event off the queue to deal with. The function returns 0 if the event was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * added to the queue, 1 if the event was merged with some other queued event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * 2 if the event was not queued - either the queue of events has overflown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * or the group is shutting down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int fsnotify_add_event(struct fsnotify_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct fsnotify_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int (*merge)(struct list_head *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct fsnotify_event *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct list_head *list = &group->notification_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) pr_debug("%s: group=%p event=%p\n", __func__, group, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) spin_lock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (group->shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) spin_unlock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (event == group->overflow_event ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) group->q_len >= group->max_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ret = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Queue overflow event only if it isn't already queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!list_empty(&group->overflow_event->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) spin_unlock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) event = group->overflow_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!list_empty(list) && merge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ret = merge(list, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spin_unlock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) group->q_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) list_add_tail(&event->list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spin_unlock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) wake_up(&group->notification_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void fsnotify_remove_queued_event(struct fsnotify_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct fsnotify_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) assert_spin_locked(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * We need to init list head for the case of overflow event so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * check in fsnotify_add_event() works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) list_del_init(&event->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) group->q_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Remove and return the first event from the notification list. It is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * responsibility of the caller to destroy the obtained event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct fsnotify_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) assert_spin_locked(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) pr_debug("%s: group=%p\n", __func__, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) event = list_first_entry(&group->notification_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct fsnotify_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) fsnotify_remove_queued_event(group, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * This will not remove the event, that must be done with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * fsnotify_remove_first_event()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) assert_spin_locked(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return list_first_entry(&group->notification_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct fsnotify_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Called when a group is being torn down to clean up any outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * event notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void fsnotify_flush_notify(struct fsnotify_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct fsnotify_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) spin_lock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) while (!fsnotify_notify_queue_is_empty(group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) event = fsnotify_remove_first_event(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) spin_unlock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) fsnotify_destroy_event(group, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) spin_lock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) spin_unlock(&group->notification_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }