^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * fs/kernfs/inode.c - kernfs inode implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2001-3 Patrick Mochel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2007 SUSE Linux Products GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/xattr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "kernfs-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static const struct address_space_operations kernfs_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) .readpage = simple_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) .write_begin = simple_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) .write_end = simple_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static const struct inode_operations kernfs_iops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) .permission = kernfs_iop_permission,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) .setattr = kernfs_iop_setattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) .getattr = kernfs_iop_getattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) .listxattr = kernfs_iop_listxattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, int alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static DEFINE_MUTEX(iattr_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct kernfs_iattrs *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) mutex_lock(&iattr_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (kn->iattr || !alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) kn->iattr = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (!kn->iattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* assign default attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) kn->iattr->ia_uid = GLOBAL_ROOT_UID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) kn->iattr->ia_gid = GLOBAL_ROOT_GID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ktime_get_real_ts64(&kn->iattr->ia_atime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) kn->iattr->ia_mtime = kn->iattr->ia_atime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) kn->iattr->ia_ctime = kn->iattr->ia_atime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) simple_xattrs_init(&kn->iattr->xattrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) atomic_set(&kn->iattr->nr_user_xattrs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) atomic_set(&kn->iattr->user_xattr_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ret = kn->iattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mutex_unlock(&iattr_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return __kernfs_iattrs(kn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static struct kernfs_iattrs *kernfs_iattrs_noalloc(struct kernfs_node *kn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return __kernfs_iattrs(kn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct kernfs_iattrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned int ia_valid = iattr->ia_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) attrs = kernfs_iattrs(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (ia_valid & ATTR_UID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) attrs->ia_uid = iattr->ia_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (ia_valid & ATTR_GID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) attrs->ia_gid = iattr->ia_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (ia_valid & ATTR_ATIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) attrs->ia_atime = iattr->ia_atime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (ia_valid & ATTR_MTIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) attrs->ia_mtime = iattr->ia_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (ia_valid & ATTR_CTIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) attrs->ia_ctime = iattr->ia_ctime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (ia_valid & ATTR_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) kn->mode = iattr->ia_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * kernfs_setattr - set iattr on a node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @kn: target node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @iattr: iattr to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Returns 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mutex_lock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ret = __kernfs_setattr(kn, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) mutex_unlock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct kernfs_node *kn = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (!kn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) mutex_lock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) error = setattr_prepare(dentry, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) error = __kernfs_setattr(kn, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* this ignores size changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) setattr_copy(inode, iattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) mutex_unlock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct kernfs_node *kn = kernfs_dentry_node(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct kernfs_iattrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) attrs = kernfs_iattrs(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return simple_xattr_list(d_inode(dentry), &attrs->xattrs, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) inode->i_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) inode->i_atime = inode->i_mtime =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline void set_inode_attr(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct kernfs_iattrs *attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) inode->i_uid = attrs->ia_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) inode->i_gid = attrs->ia_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) inode->i_atime = attrs->ia_atime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) inode->i_mtime = attrs->ia_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) inode->i_ctime = attrs->ia_ctime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct kernfs_iattrs *attrs = kn->iattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) inode->i_mode = kn->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * kernfs_node has non-default attributes get them from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * persistent copy in kernfs_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) set_inode_attr(inode, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (kernfs_type(kn) == KERNFS_DIR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) set_nlink(inode, kn->dir.subdirs + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int kernfs_iop_getattr(const struct path *path, struct kstat *stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u32 request_mask, unsigned int query_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct inode *inode = d_inode(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct kernfs_node *kn = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mutex_lock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) kernfs_refresh_inode(kn, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) mutex_unlock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) generic_fillattr(inode, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) kernfs_get(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) inode->i_private = kn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) inode->i_mapping->a_ops = &kernfs_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) inode->i_op = &kernfs_iops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) inode->i_generation = kernfs_gen(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) set_default_inode_attr(inode, kn->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) kernfs_refresh_inode(kn, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* initialize inode according to type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) switch (kernfs_type(kn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) case KERNFS_DIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) inode->i_op = &kernfs_dir_iops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) inode->i_fop = &kernfs_dir_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (kn->flags & KERNFS_EMPTY_DIR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) make_empty_dir_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) case KERNFS_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) inode->i_size = kn->attr.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) inode->i_fop = &kernfs_file_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) case KERNFS_LINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) inode->i_op = &kernfs_symlink_iops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unlock_new_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * kernfs_get_inode - get inode for kernfs_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * @sb: super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @kn: kernfs_node to allocate inode for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Get inode for @kn. If such inode doesn't exist, a new inode is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * allocated and basics are initialized. New inode is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Kernel thread context (may sleep).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Pointer to allocated inode on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) inode = iget_locked(sb, kernfs_ino(kn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (inode && (inode->i_state & I_NEW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) kernfs_init_inode(kn, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * The kernfs_node serves as both an inode and a directory entry for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * kernfs. To prevent the kernfs inode numbers from being freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * prematurely we take a reference to kernfs_node from the kernfs inode. A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * super_operations.evict_inode() implementation is needed to drop that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * reference upon inode destruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) void kernfs_evict_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct kernfs_node *kn = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) truncate_inode_pages_final(&inode->i_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) clear_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) kernfs_put(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int kernfs_iop_permission(struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct kernfs_node *kn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (mask & MAY_NOT_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) kn = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) mutex_lock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) kernfs_refresh_inode(kn, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) mutex_unlock(&kernfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return generic_permission(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) void *value, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct kernfs_iattrs *attrs = kernfs_iattrs_noalloc(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return simple_xattr_get(&attrs->xattrs, name, value, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) const void *value, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct kernfs_iattrs *attrs = kernfs_iattrs(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return simple_xattr_set(&attrs->xattrs, name, value, size, flags, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static int kernfs_vfs_xattr_get(const struct xattr_handler *handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct dentry *unused, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) const char *suffix, void *value, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) const char *name = xattr_full_name(handler, suffix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct kernfs_node *kn = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return kernfs_xattr_get(kn, name, value, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int kernfs_vfs_xattr_set(const struct xattr_handler *handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct dentry *unused, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) const char *suffix, const void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) const char *name = xattr_full_name(handler, suffix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct kernfs_node *kn = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return kernfs_xattr_set(kn, name, value, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) const char *full_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct simple_xattrs *xattrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) const void *value, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) atomic_t *sz = &kn->iattr->user_xattr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) atomic_t *nr = &kn->iattr->nr_user_xattrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ssize_t removed_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (atomic_inc_return(nr) > KERNFS_MAX_USER_XATTRS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) goto dec_count_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (atomic_add_return(size, sz) > KERNFS_USER_XATTR_SIZE_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) goto dec_size_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ret = simple_xattr_set(xattrs, full_name, value, size, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) &removed_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!ret && removed_size >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) size = removed_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) else if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dec_size_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) atomic_sub(size, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dec_count_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) atomic_dec(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) const char *full_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct simple_xattrs *xattrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) const void *value, size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) atomic_t *sz = &kn->iattr->user_xattr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) atomic_t *nr = &kn->iattr->nr_user_xattrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ssize_t removed_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ret = simple_xattr_set(xattrs, full_name, value, size, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) &removed_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (removed_size >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) atomic_sub(removed_size, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) atomic_dec(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct dentry *unused, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) const char *suffix, const void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) size_t size, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) const char *full_name = xattr_full_name(handler, suffix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct kernfs_node *kn = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct kernfs_iattrs *attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!(kernfs_root(kn)->flags & KERNFS_ROOT_SUPPORT_USER_XATTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) attrs = kernfs_iattrs(kn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return kernfs_vfs_user_xattr_add(kn, full_name, &attrs->xattrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) value, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return kernfs_vfs_user_xattr_rm(kn, full_name, &attrs->xattrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) value, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static const struct xattr_handler kernfs_trusted_xattr_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) .prefix = XATTR_TRUSTED_PREFIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .get = kernfs_vfs_xattr_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) .set = kernfs_vfs_xattr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static const struct xattr_handler kernfs_security_xattr_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) .prefix = XATTR_SECURITY_PREFIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) .get = kernfs_vfs_xattr_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) .set = kernfs_vfs_xattr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static const struct xattr_handler kernfs_user_xattr_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) .prefix = XATTR_USER_PREFIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) .get = kernfs_vfs_xattr_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) .set = kernfs_vfs_user_xattr_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) const struct xattr_handler *kernfs_xattr_handlers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) &kernfs_trusted_xattr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) &kernfs_security_xattr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) &kernfs_user_xattr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) };