^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * fs/kernfs/file.c - kernfs file implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2001-3 Patrick Mochel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2007 SUSE Linux Products GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/fsnotify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "kernfs-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * There's one kernfs_open_file for each open file and one kernfs_open_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * for each kernfs_node with one or more open files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * kernfs_node->attr.open points to kernfs_open_node. attr.open is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * protected by kernfs_open_node_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * filp->private_data points to seq_file whose ->private points to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * kernfs_open_file. kernfs_open_files are chained at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static DEFINE_SPINLOCK(kernfs_open_node_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static DEFINE_MUTEX(kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct kernfs_open_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) atomic_t refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) atomic_t event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) wait_queue_head_t poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct list_head files; /* goes through kernfs_open_file.list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * kernfs_notify() may be called from any context and bounces notifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * through a work item. To minimize space overhead in kernfs_node, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * pending queue is implemented as a singly linked list of kernfs_nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * The list is terminated with the self pointer so that whether a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * kernfs_node is on the list or not can be determined by testing the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * pointer for NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static DEFINE_SPINLOCK(kernfs_notify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static struct kernfs_open_file *kernfs_of(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return ((struct seq_file *)file->private_data)->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * Determine the kernfs_ops for the given kernfs_node. This function must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * be called while holding an active reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (kn->flags & KERNFS_LOCKDEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) lockdep_assert_held(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return kn->attr.ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * As kernfs_seq_stop() is also called after kernfs_seq_start() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * a seq_file iteration which is fully initialized with an active reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * or an aborted kernfs_seq_start() due to get_active failure. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * position pointer is the only context for each seq_file iteration and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * thus the stop condition should be encoded in it. As the return value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * choice to indicate get_active failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Unfortunately, this is complicated due to the optional custom seq_file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * custom seq_file operations and thus can't decide whether put_active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * should be performed or not only on ERR_PTR(-ENODEV).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * This is worked around by factoring out the custom seq_stop() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * put_active part into kernfs_seq_stop_active(), skipping it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * that kernfs_seq_stop_active() is skipped only after get_active failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct kernfs_open_file *of = sf->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) const struct kernfs_ops *ops = kernfs_ops(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (ops->seq_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ops->seq_stop(sf, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct kernfs_open_file *of = sf->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) const struct kernfs_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * @of->mutex nests outside active ref and is primarily to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * the ops aren't called concurrently for the same open file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) mutex_lock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!kernfs_get_active(of->kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ops = kernfs_ops(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (ops->seq_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void *next = ops->seq_start(sf, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* see the comment above kernfs_seq_stop_active() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (next == ERR_PTR(-ENODEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) kernfs_seq_stop_active(sf, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * The same behavior and code as single_open(). Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * !NULL if pos is at the beginning; otherwise, NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return NULL + !*ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct kernfs_open_file *of = sf->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) const struct kernfs_ops *ops = kernfs_ops(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (ops->seq_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void *next = ops->seq_next(sf, v, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* see the comment above kernfs_seq_stop_active() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (next == ERR_PTR(-ENODEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) kernfs_seq_stop_active(sf, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * The same behavior and code as single_open(), always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * terminate after the initial read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ++*ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static void kernfs_seq_stop(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct kernfs_open_file *of = sf->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (v != ERR_PTR(-ENODEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) kernfs_seq_stop_active(sf, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) mutex_unlock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int kernfs_seq_show(struct seq_file *sf, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct kernfs_open_file *of = sf->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) of->event = atomic_read(&of->kn->attr.open->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return of->kn->attr.ops->seq_show(sf, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static const struct seq_operations kernfs_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) .start = kernfs_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) .next = kernfs_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) .stop = kernfs_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) .show = kernfs_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * As reading a bin file can have side-effects, the exact offset and bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * specified in read(2) call should be passed to the read callback making
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * it difficult to use seq_file. Implement simplistic custom buffering for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * bin files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) const struct kernfs_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) buf = of->prealloc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mutex_lock(&of->prealloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) buf = kmalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * @of->mutex nests outside active ref and is used both to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * the ops aren't called concurrently for the same open file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mutex_lock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!kernfs_get_active(of->kn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) len = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) mutex_unlock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) of->event = atomic_read(&of->kn->attr.open->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ops = kernfs_ops(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ops->read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) len = ops->read(of, buf, len, iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) len = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) mutex_unlock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (copy_to_iter(buf, len, iter) != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) len = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) iocb->ki_pos += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (buf == of->prealloc_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) mutex_unlock(&of->prealloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return seq_read_iter(iocb, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return kernfs_file_read_iter(iocb, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Copy data in from userland and pass it to the matching kernfs write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * There is no easy way for us to know if userspace is only doing a partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * write, so we don't support them. We expect the entire buffer to come on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * the first write. Hint: if you're writing a value, first read the file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * modify only the the value you're changing, then write entire buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ssize_t len = iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) const struct kernfs_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (of->atomic_write_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (len > of->atomic_write_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) len = min_t(size_t, len, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) buf = of->prealloc_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) mutex_lock(&of->prealloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) buf = kmalloc(len + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (copy_from_iter(buf, len, iter) != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) len = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) buf[len] = '\0'; /* guarantee string termination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * @of->mutex nests outside active ref and is used both to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * the ops aren't called concurrently for the same open file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) mutex_lock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (!kernfs_get_active(of->kn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) mutex_unlock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) len = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ops = kernfs_ops(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (ops->write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) len = ops->write(of, buf, len, iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) len = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) mutex_unlock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) iocb->ki_pos += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (buf == of->prealloc_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mutex_unlock(&of->prealloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static void kernfs_vma_open(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct file *file = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct kernfs_open_file *of = kernfs_of(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (!of->vm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (!kernfs_get_active(of->kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (of->vm_ops->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) of->vm_ops->open(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct file *file = vmf->vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct kernfs_open_file *of = kernfs_of(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!of->vm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!kernfs_get_active(of->kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ret = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (of->vm_ops->fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ret = of->vm_ops->fault(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct file *file = vmf->vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct kernfs_open_file *of = kernfs_of(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) vm_fault_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (!of->vm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!kernfs_get_active(of->kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (of->vm_ops->page_mkwrite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ret = of->vm_ops->page_mkwrite(vmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) file_update_time(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) void *buf, int len, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct file *file = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct kernfs_open_file *of = kernfs_of(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!of->vm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!kernfs_get_active(of->kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (of->vm_ops->access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ret = of->vm_ops->access(vma, addr, buf, len, write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static int kernfs_vma_set_policy(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct mempolicy *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct file *file = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct kernfs_open_file *of = kernfs_of(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!of->vm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!kernfs_get_active(of->kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (of->vm_ops->set_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ret = of->vm_ops->set_policy(vma, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct file *file = vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct kernfs_open_file *of = kernfs_of(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct mempolicy *pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!of->vm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return vma->vm_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!kernfs_get_active(of->kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return vma->vm_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) pol = vma->vm_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (of->vm_ops->get_policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) pol = of->vm_ops->get_policy(vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return pol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static const struct vm_operations_struct kernfs_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) .open = kernfs_vma_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) .fault = kernfs_vma_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) .page_mkwrite = kernfs_vma_page_mkwrite,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .access = kernfs_vma_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) .set_policy = kernfs_vma_set_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) .get_policy = kernfs_vma_get_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct kernfs_open_file *of = kernfs_of(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) const struct kernfs_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * mmap path and of->mutex are prone to triggering spurious lockdep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * warnings and we don't want to add spurious locking dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * between the two. Check whether mmap is actually implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * without grabbing @of->mutex by testing HAS_MMAP flag. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * comment in kernfs_file_open() for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!(of->kn->flags & KERNFS_HAS_MMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mutex_lock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!kernfs_get_active(of->kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ops = kernfs_ops(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) rc = ops->mmap(of, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * to satisfy versions of X which crash if the mmap fails: that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * substitutes a new vm_file, and we don't then want bin_vm_ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (vma->vm_file != file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (of->mmapped && of->vm_ops != vma->vm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * It is not possible to successfully wrap close.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * So error if someone is trying to use close.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (vma->vm_ops && vma->vm_ops->close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) of->mmapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) of->vm_ops = vma->vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) vma->vm_ops = &kernfs_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) kernfs_put_active(of->kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mutex_unlock(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * kernfs_get_open_node - get or create kernfs_open_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @kn: target kernfs_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * @of: kernfs_open_file for this instance of open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * If @kn->attr.open exists, increment its reference count; otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * create one. @of is chained to the files list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * Kernel thread context (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static int kernfs_get_open_node(struct kernfs_node *kn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct kernfs_open_file *of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct kernfs_open_node *on, *new_on = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) mutex_lock(&kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) spin_lock_irq(&kernfs_open_node_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!kn->attr.open && new_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) kn->attr.open = new_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) new_on = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) on = kn->attr.open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) atomic_inc(&on->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) list_add_tail(&of->list, &on->files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) spin_unlock_irq(&kernfs_open_node_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) mutex_unlock(&kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) kfree(new_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* not there, initialize a new one and retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!new_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) atomic_set(&new_on->refcnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) atomic_set(&new_on->event, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) init_waitqueue_head(&new_on->poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) INIT_LIST_HEAD(&new_on->files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * kernfs_put_open_node - put kernfs_open_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * @kn: target kernfs_nodet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * @of: associated kernfs_open_file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * Put @kn->attr.open and unlink @of from the files list. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * reference count reaches zero, disassociate and free it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * None.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void kernfs_put_open_node(struct kernfs_node *kn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct kernfs_open_file *of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct kernfs_open_node *on = kn->attr.open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) mutex_lock(&kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) spin_lock_irqsave(&kernfs_open_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) list_del(&of->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (atomic_dec_and_test(&on->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) kn->attr.open = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) on = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) mutex_unlock(&kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) kfree(on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static int kernfs_fop_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct kernfs_node *kn = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct kernfs_root *root = kernfs_root(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) const struct kernfs_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct kernfs_open_file *of;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) bool has_read, has_write, has_mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int error = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!kernfs_get_active(kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ops = kernfs_ops(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) has_read = ops->seq_show || ops->read || ops->mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) has_write = ops->write || ops->mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) has_mmap = ops->mmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* see the flag definition for details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if ((file->f_mode & FMODE_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) (!(inode->i_mode & S_IWUGO) || !has_write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if ((file->f_mode & FMODE_READ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) (!(inode->i_mode & S_IRUGO) || !has_read))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* allocate a kernfs_open_file for the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (!of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * The following is done to give a different lockdep key to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * @of->mutex for files which implement mmap. This is a rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * crude way to avoid false positive lockdep warning around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * mm->mmap_lock - mmap nests @of->mutex under mm->mmap_lock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * which mm->mmap_lock nests, while holding @of->mutex. As each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * open file has a separate mutex, it's okay as long as those don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * happen on the same file. At this point, we can't easily give
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * each file a separate locking class. Let's differentiate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * whether the file has mmap or not for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Both paths of the branch look the same. They're supposed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * look that way and give @of->mutex different static lockdep keys.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (has_mmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) mutex_init(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) mutex_init(&of->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) of->kn = kn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) of->file = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * Write path needs to atomic_write_len outside active reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Cache it in open_file. See kernfs_fop_write_iter() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) of->atomic_write_len = ops->atomic_write_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * ->seq_show is incompatible with ->prealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * as seq_read does its own allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * ->read must be used instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (ops->prealloc && ops->seq_show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (ops->prealloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int len = of->atomic_write_len ?: PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (!of->prealloc_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) mutex_init(&of->prealloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Always instantiate seq_file even if read access doesn't use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * seq_file or is not requested. This unifies private data access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * and readable regular files are the vast majority anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (ops->seq_show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) error = seq_open(file, &kernfs_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) error = seq_open(file, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) of->seq_file = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) of->seq_file->private = of;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* seq_file clears PWRITE unconditionally, restore it if WRITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (file->f_mode & FMODE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) file->f_mode |= FMODE_PWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* make sure we have open node struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) error = kernfs_get_open_node(kn, of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto err_seq_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (ops->open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* nobody has access to @of yet, skip @of->mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) error = ops->open(of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto err_put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* open succeeded, put active references */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) kernfs_put_active(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) err_put_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) kernfs_put_open_node(kn, of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) err_seq_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) seq_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) kfree(of->prealloc_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) kfree(of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) kernfs_put_active(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* used from release/drain to ensure that ->release() is called exactly once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static void kernfs_release_file(struct kernfs_node *kn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct kernfs_open_file *of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @of is guaranteed to have no other file operations in flight and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * we just want to synchronize release and drain paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * @kernfs_open_file_mutex is enough. @of->mutex can't be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * here because drain path may be called from places which can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * cause circular dependency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) lockdep_assert_held(&kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (!of->released) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * A file is never detached without being released and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * need to be able to release files which are deactivated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * and being drained. Don't use kernfs_ops().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) kn->attr.ops->release(of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) of->released = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int kernfs_fop_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct kernfs_node *kn = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct kernfs_open_file *of = kernfs_of(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (kn->flags & KERNFS_HAS_RELEASE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mutex_lock(&kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) kernfs_release_file(kn, of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) mutex_unlock(&kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) kernfs_put_open_node(kn, of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) seq_release(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) kfree(of->prealloc_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) kfree(of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) void kernfs_drain_open_files(struct kernfs_node *kn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct kernfs_open_node *on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct kernfs_open_file *of;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) spin_lock_irq(&kernfs_open_node_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) on = kn->attr.open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) atomic_inc(&on->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) spin_unlock_irq(&kernfs_open_node_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) mutex_lock(&kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) list_for_each_entry(of, &on->files, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct inode *inode = file_inode(of->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (kn->flags & KERNFS_HAS_MMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) unmap_mapping_range(inode->i_mapping, 0, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (kn->flags & KERNFS_HAS_RELEASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) kernfs_release_file(kn, of);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) mutex_unlock(&kernfs_open_file_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) kernfs_put_open_node(kn, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * Kernfs attribute files are pollable. The idea is that you read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * the content and then you use 'poll' or 'select' to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * the content to change. When the content changes (assuming the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * manager for the kobject supports notification), poll will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * return EPOLLERR|EPOLLPRI, and select will return the fd whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * it is waiting for read, write, or exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * Once poll/select indicates that the value has changed, you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * need to close and re-open the file, or seek to 0 and read again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * Reminder: this only works for attributes which actively support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * it, and it is not possible to test an attribute from userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * to see if it supports poll (Neither 'poll' nor 'select' return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * an appropriate error code). When in doubt, set a suitable timeout value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) __poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct kernfs_open_node *on = kn->attr.open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) poll_wait(of->file, &on->poll, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (of->event != atomic_read(&on->event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return DEFAULT_POLLMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct kernfs_open_file *of = kernfs_of(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) __poll_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!kernfs_get_active(kn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (kn->attr.ops->poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ret = kn->attr.ops->poll(of, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ret = kernfs_generic_poll(of, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) kernfs_put_active(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static void kernfs_notify_workfn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct kernfs_node *kn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct kernfs_super_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /* pop one off the notify_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) spin_lock_irq(&kernfs_notify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) kn = kernfs_notify_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (kn == KERNFS_NOTIFY_EOL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) spin_unlock_irq(&kernfs_notify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) kernfs_notify_list = kn->attr.notify_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) kn->attr.notify_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) spin_unlock_irq(&kernfs_notify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* kick fsnotify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) mutex_lock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct kernfs_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct inode *p_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct qstr name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * We want fsnotify_modify() on @kn but as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * modifications aren't originating from userland don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * have the matching @file available. Look up the inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * and generate the events manually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) inode = ilookup(info->sb, kernfs_ino(kn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) parent = kernfs_get_parent(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) p_inode = ilookup(info->sb, kernfs_ino(parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (p_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) fsnotify(FS_MODIFY | FS_EVENT_ON_CHILD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) inode, FSNOTIFY_EVENT_INODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) p_inode, &name, inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) iput(p_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) kernfs_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (!p_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) fsnotify_inode(inode, FS_MODIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) mutex_unlock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) kernfs_put(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * kernfs_notify - notify a kernfs file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * @kn: file to notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) void kernfs_notify(struct kernfs_node *kn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct kernfs_open_node *on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* kick poll immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) spin_lock_irqsave(&kernfs_open_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) on = kn->attr.open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) atomic_inc(&on->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) wake_up_interruptible(&on->poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* schedule work to kick fsnotify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) spin_lock_irqsave(&kernfs_notify_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (!kn->attr.notify_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) kernfs_get(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) kn->attr.notify_next = kernfs_notify_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) kernfs_notify_list = kn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) schedule_work(&kernfs_notify_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) spin_unlock_irqrestore(&kernfs_notify_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) EXPORT_SYMBOL_GPL(kernfs_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) const struct file_operations kernfs_file_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .read_iter = kernfs_fop_read_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) .write_iter = kernfs_fop_write_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) .mmap = kernfs_fop_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) .open = kernfs_fop_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) .release = kernfs_fop_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) .poll = kernfs_fop_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) .fsync = noop_fsync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .splice_read = generic_file_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) .splice_write = iter_file_splice_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * __kernfs_create_file - kernfs internal function to create a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * @parent: directory to create the file in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * @name: name of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * @mode: mode of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * @uid: uid of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * @gid: gid of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * @size: size of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * @ops: kernfs operations for the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * @priv: private data for the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * @ns: optional namespace tag of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Returns the created node on success, ERR_PTR() value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) umode_t mode, kuid_t uid, kgid_t gid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) loff_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) const struct kernfs_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) void *priv, const void *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct lock_class_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct kernfs_node *kn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) unsigned flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) flags = KERNFS_FILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) uid, gid, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (!kn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) kn->attr.ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) kn->attr.size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) kn->ns = ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) kn->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) lockdep_init_map(&kn->dep_map, "kn->active", key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) kn->flags |= KERNFS_LOCKDEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * kn->attr.ops is accesible only while holding active ref. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * need to know whether some ops are implemented outside active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * ref. Cache their existence in flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (ops->seq_show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) kn->flags |= KERNFS_HAS_SEQ_SHOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (ops->mmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) kn->flags |= KERNFS_HAS_MMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (ops->release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) kn->flags |= KERNFS_HAS_RELEASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) rc = kernfs_add_one(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) kernfs_put(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return kn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }