^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/namei.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Some corrections by tytso.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * lookup logic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/fsnotify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/ima.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/device_cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/fs_struct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/posix_acl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/init_task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <trace/events/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* [Feb-1997 T. Schoebel-Theuer]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Fundamental changes in the pathname lookup mechanisms (namei)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * were necessary because of omirr. The reason is that omirr needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * to know the _real_ pathname, not the user-supplied one, in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * of symlinks (and also when transname replacements occur).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * The new code replaces the old recursive symlink resolution with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * an iterative one (in case of non-nested symlink chains). It does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * this with calls to <fs>_follow_link().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * As a side effect, dir_namei(), _namei() and follow_link() are now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * replaced with a single function lookup_dentry() that can handle all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * the special cases of the former code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * With the new dcache, the pathname is stored at each inode, at least as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * long as the refcount of the inode is positive. As a side effect, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * size of the dcache depends on the inode cache and thus is dynamic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * resolution to correspond with current state of the code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Note that the symlink resolution is not *completely* iterative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * There is still a significant amount of tail- and mid- recursion in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * the algorithm. Also, note that <fs>_readlink() is not used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * may return different results than <fs>_follow_link(). Many virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * filesystems (including /proc) exhibit this behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * and the name already exists in form of a symlink, try to create the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * name indicated by the symlink. The old code always complained that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * name already exists, due to not following the symlink even if its target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * is nonexistent. The new semantics affects also mknod() and link() when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * the name is a symlink pointing to a non-existent name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * I don't know which semantics is the right one, since I have no access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * to standards. But I found by trial that HP-UX 9.0 has the full "new"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * "old" one. Personally, I think the new semantics is much more logical.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Note that "ln old new" where "new" is a symlink pointing to a non-existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * file does succeed in both HP-UX and SunOs, but not in Solaris
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * and in the old Linux semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * semantics. See the comments in "open_namei" and "do_link" below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * [10-Sep-98 Alan Modra] Another symlink change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * inside the path - always follow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * in the last component in creation/removal/renaming - never follow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * if LOOKUP_FOLLOW passed - follow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * if the pathname has trailing slashes - follow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * otherwise - don't follow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * (applied in that order).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * restored for 2.4. This is the last surviving part of old 4.2BSD bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * During the 2.4 we need to fix the userland stuff depending on it -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * hopefully we will be able to get rid of that wart in 2.5. So far only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * XEmacs seems to be relying on it...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * any extra contention...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* In order to reduce some races, while at the same time doing additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * checking and hopefully speeding things up, we copy filenames to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * kernel data space before using them..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * PATH_MAX includes the nul terminator --RR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct filename *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) getname_flags(const char __user *filename, int flags, int *empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct filename *result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) char *kname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) result = audit_reusename(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) result = __getname();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (unlikely(!result))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * First, try to embed the struct filename inside the names_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) kname = (char *)result->iname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) result->name = kname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (unlikely(len < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) __putname(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return ERR_PTR(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * separate struct filename so we can dedicate the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * names_cache allocation for the pathname, and re-do the copy from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * userland.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (unlikely(len == EMBEDDED_NAME_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) const size_t size = offsetof(struct filename, iname[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) kname = (char *)result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * size is chosen that way we to guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * result->iname[0] is within the same object and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * kname can't be equal to result->iname, no matter what.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) result = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (unlikely(!result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) __putname(kname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) result->name = kname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) len = strncpy_from_user(kname, filename, PATH_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (unlikely(len < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) __putname(kname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) kfree(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return ERR_PTR(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (unlikely(len == PATH_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) __putname(kname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) kfree(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return ERR_PTR(-ENAMETOOLONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) result->refcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* The empty path is special. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (unlikely(!len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *empty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!(flags & LOOKUP_EMPTY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) putname(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) result->uptr = filename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) result->aname = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) audit_getname(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct filename *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) getname(const char __user * filename)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return getname_flags(filename, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct filename *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) getname_kernel(const char * filename)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct filename *result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int len = strlen(filename) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) result = __getname();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (unlikely(!result))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (len <= EMBEDDED_NAME_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) result->name = (char *)result->iname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) } else if (len <= PATH_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) const size_t size = offsetof(struct filename, iname[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct filename *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) tmp = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (unlikely(!tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __putname(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) tmp->name = (char *)result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) result = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) __putname(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return ERR_PTR(-ENAMETOOLONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) memcpy((char *)result->name, filename, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) result->uptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) result->aname = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) result->refcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) audit_getname(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void putname(struct filename *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) BUG_ON(name->refcnt <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (--name->refcnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (name->name != name->iname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) __putname(name->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __putname(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int check_acl(struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #ifdef CONFIG_FS_POSIX_ACL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct posix_acl *acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (mask & MAY_NOT_BLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!acl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* no ->get_acl() calls in RCU mode... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (is_uncached_acl(acl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return posix_acl_permission(inode, acl, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) acl = get_acl(inode, ACL_TYPE_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (IS_ERR(acl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return PTR_ERR(acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (acl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int error = posix_acl_permission(inode, acl, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) posix_acl_release(acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * This does the basic UNIX permission checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Note that the POSIX ACL check cares about the MAY_NOT_BLOCK bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * for RCU walking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static int acl_permission_check(struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned int mode = inode->i_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Are we the owner? If so, ACL's don't matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (likely(uid_eq(current_fsuid(), inode->i_uid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) mask &= 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) mode >>= 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return (mask & ~mode) ? -EACCES : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Do we have ACL's? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int error = check_acl(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (error != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Only RWX matters for group/other mode bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) mask &= 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * Are the group permissions different from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * the other permissions in the bits we care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * about? Need to check group ownership if so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (mask & (mode ^ (mode >> 3))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (in_group_p(inode->i_gid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mode >>= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Bits in 'mode' clear that we require? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return (mask & ~mode) ? -EACCES : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * generic_permission - check for access rights on a Posix-like filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * @inode: inode to check access rights for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * %MAY_NOT_BLOCK ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Used to check for read/write/execute permissions on a file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * We use "fsuid" for this, letting us set arbitrary permissions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * for filesystem access without changing the "normal" uids which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * are used for other things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * request cannot be satisfied (eg. requires blocking or too much complexity).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * It would then be called again in ref-walk mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int generic_permission(struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Do the basic permission checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ret = acl_permission_check(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (ret != -EACCES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (S_ISDIR(inode->i_mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* DACs are overridable for directories */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!(mask & MAY_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (capable_wrt_inode_uidgid(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) CAP_DAC_READ_SEARCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Searching includes executable on directories, else just read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (mask == MAY_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Read/write DACs are always overridable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * Executable DACs are overridable when there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * at least one exec bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) EXPORT_SYMBOL(generic_permission);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * We _really_ want to just do "generic_permission()" without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * even looking at the inode->i_op values. So we keep a cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * flag in inode->i_opflags, that says "this has not special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * permission function, use the fast case".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static inline int do_inode_permission(struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (likely(inode->i_op->permission))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return inode->i_op->permission(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* This gets set once for the inode lifetime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) inode->i_opflags |= IOP_FASTPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return generic_permission(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * sb_permission - Check superblock-level permissions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * @sb: Superblock of inode to check permission on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * @inode: Inode to check permission on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Separate out file-system wide checks from inode-specific permission checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (unlikely(mask & MAY_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) umode_t mode = inode->i_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* Nobody gets write access to a read-only fs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (sb_rdonly(sb) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * inode_permission - Check for access rights to a given inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * @inode: Inode to check permission on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Check for read/write/execute permissions on an inode. We use fs[ug]id for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * this, letting us set arbitrary permissions for filesystem access without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * changing the "normal" UIDs which are used for other things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int inode_permission(struct inode *inode, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) retval = sb_permission(inode->i_sb, inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (unlikely(mask & MAY_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Nobody gets write access to an immutable file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (IS_IMMUTABLE(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Updating mtime will likely cause i_uid and i_gid to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * written back improperly if their true value is unknown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * to the vfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (HAS_UNMAPPED_ID(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) retval = do_inode_permission(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) retval = devcgroup_inode_permission(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return security_inode_permission(inode, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) EXPORT_SYMBOL(inode_permission);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * path_get - get a reference to a path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * @path: path to get the reference to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * Given a path increment the reference count to the dentry and the vfsmount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) void path_get(const struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) mntget(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) dget(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) EXPORT_SYMBOL(path_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * path_put - put a reference to a path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * @path: path to put the reference to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * Given a path decrement the reference count to the dentry and the vfsmount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) void path_put(const struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dput(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mntput(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) EXPORT_SYMBOL(path_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #define EMBEDDED_LEVELS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct nameidata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct qstr last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct path root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct inode *inode; /* path.dentry.d_inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned seq, m_seq, r_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int last_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) unsigned depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int total_link_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct saved {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct path link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct delayed_call done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) } *stack, internal[EMBEDDED_LEVELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct filename *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct nameidata *saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned root_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int dfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) kuid_t dir_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) umode_t dir_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } __randomize_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct nameidata *old = current->nameidata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) p->stack = p->internal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) p->dfd = dfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) p->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) p->total_link_count = old ? old->total_link_count : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) p->saved = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) current->nameidata = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static void restore_nameidata(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct nameidata *now = current->nameidata, *old = now->saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) current->nameidata = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) old->total_link_count = now->total_link_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (now->stack != now->internal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) kfree(now->stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static bool nd_alloc_stack(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct saved *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) nd->flags & LOOKUP_RCU ? GFP_ATOMIC : GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) memcpy(p, nd->internal, sizeof(nd->internal));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) nd->stack = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * path_connected - Verify that a dentry is below mnt.mnt_root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Rename can sometimes move a file or directory outside of a bind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * mount, path_connected allows those cases to be detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static bool path_connected(struct vfsmount *mnt, struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct super_block *sb = mnt->mnt_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* Bind mounts can have disconnected paths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (mnt->mnt_root == sb->s_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return is_subdir(dentry, mnt->mnt_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static void drop_links(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) int i = nd->depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct saved *last = nd->stack + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) do_delayed_call(&last->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) clear_delayed_call(&last->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void terminate_walk(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) drop_links(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (!(nd->flags & LOOKUP_RCU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) path_put(&nd->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) for (i = 0; i < nd->depth; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) path_put(&nd->stack[i].link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (nd->flags & LOOKUP_ROOT_GRABBED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) path_put(&nd->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) nd->flags &= ~LOOKUP_ROOT_GRABBED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) nd->flags &= ~LOOKUP_RCU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) nd->depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* path_put is needed afterwards regardless of success or failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static bool __legitimize_path(struct path *path, unsigned seq, unsigned mseq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int res = __legitimize_mnt(path->mnt, mseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (unlikely(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (res > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) path->mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) path->dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) path->dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return !read_seqcount_retry(&path->dentry->d_seq, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static inline bool legitimize_path(struct nameidata *nd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct path *path, unsigned seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return __legitimize_path(path, seq, nd->m_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static bool legitimize_links(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) for (i = 0; i < nd->depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct saved *last = nd->stack + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) drop_links(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) nd->depth = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static bool legitimize_root(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * For scoped-lookups (where nd->root has been zeroed), we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * restart the whole lookup from scratch -- because set_root() is wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * for these lookups (nd->dfd is the root, not the filesystem root).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (!nd->root.mnt && (nd->flags & LOOKUP_IS_SCOPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* Nothing to do if nd->root is zero or is managed by the VFS user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (!nd->root.mnt || (nd->flags & LOOKUP_ROOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) nd->flags |= LOOKUP_ROOT_GRABBED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return legitimize_path(nd, &nd->root, nd->root_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * Path walking has 2 modes, rcu-walk and ref-walk (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * Documentation/filesystems/path-lookup.txt). In situations when we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * normal reference counts on dentries and vfsmounts to transition to ref-walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * mode. Refcounts are grabbed at the last known good point before rcu-walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * got stuck, so ref-walk may continue from there. If this is not successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * (eg. a seqcount has changed), then failure is returned and it's up to caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * to restart the path walk from the beginning in ref-walk mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * try_to_unlazy - try to switch to ref-walk mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * @nd: nameidata pathwalk data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Returns: true on success, false on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * try_to_unlazy attempts to legitimize the current nd->path and nd->root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * for ref-walk mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * Must be called from rcu-walk context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Nothing should touch nameidata between try_to_unlazy() failure and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * terminate_walk().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static bool try_to_unlazy(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct dentry *parent = nd->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) BUG_ON(!(nd->flags & LOOKUP_RCU));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) nd->flags &= ~LOOKUP_RCU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (unlikely(!legitimize_links(nd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (unlikely(!legitimize_root(nd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) BUG_ON(nd->inode != parent->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) nd->path.mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) nd->path.dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * unlazy_child - try to switch to ref-walk mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * @nd: nameidata pathwalk data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * @dentry: child of nd->path.dentry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * @seq: seq number to check dentry against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * Returns: 0 on success, -ECHILD on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * unlazy_child attempts to legitimize the current nd->path, nd->root and dentry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * for ref-walk mode. @dentry must be a path found by a do_lookup call on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * @nd. Must be called from rcu-walk context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Nothing should touch nameidata between unlazy_child() failure and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * terminate_walk().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) BUG_ON(!(nd->flags & LOOKUP_RCU));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) nd->flags &= ~LOOKUP_RCU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (unlikely(!legitimize_links(nd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * We need to move both the parent and the dentry from the RCU domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * to be properly refcounted. And the sequence number in the dentry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * validates *both* dentry counters, since we checked the sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * number of the parent after we got the child sequence number. So we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * know the parent must still be valid if the child sequence number is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (unlikely(!lockref_get_not_dead(&dentry->d_lockref)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * Sequence counts matched. Now make sure that the root is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * still valid and get it if required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (unlikely(!legitimize_root(nd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) nd->path.mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) nd->path.dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) out_dput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return dentry->d_op->d_revalidate(dentry, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) #define INIT_PATH_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static void success_walk_trace(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct path *pt = &nd->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct inode *i = nd->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) char buf[INIT_PATH_SIZE], *try_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) int cur_path_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* When eBPF/ tracepoint is disabled, keep overhead low. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!trace_inodepath_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* First try stack allocated buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) try_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) cur_path_size = INIT_PATH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) while (cur_path_size <= PATH_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* Free previous heap allocation if we are now trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * a second or later heap allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (try_buf != buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) kfree(try_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* All but the first alloc are on the heap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (cur_path_size != INIT_PATH_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) try_buf = kmalloc(cur_path_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!try_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) try_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) sprintf(try_buf, "error:buf_alloc_failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) p = d_path(pt, try_buf, cur_path_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!IS_ERR(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) char *end = mangle_path(try_buf, p, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) try_buf[end - try_buf] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* On mangle errors, double path size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * till PATH_MAX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) cur_path_size = cur_path_size << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (PTR_ERR(p) == -ENAMETOOLONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* If d_path complains that name is too long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * then double path size till PATH_MAX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) cur_path_size = cur_path_size << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) sprintf(try_buf, "error:d_path_failed_%lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) -1 * PTR_ERR(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (cur_path_size > PATH_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) sprintf(try_buf, "error:d_path_name_too_long");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) trace_inodepath(i, try_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (try_buf != buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) kfree(try_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * complete_walk - successful completion of path walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * @nd: pointer nameidata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * If we had been in RCU mode, drop out of it and legitimize nd->path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Revalidate the final result, unless we'd already done that during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * the path walk or the filesystem doesn't ask for it. Return 0 on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * success, -error on failure. In case of failure caller does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * need to drop nd->path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) static int complete_walk(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct dentry *dentry = nd->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * We don't want to zero nd->root for scoped-lookups or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * externally-managed nd->root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (!(nd->flags & (LOOKUP_ROOT | LOOKUP_IS_SCOPED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) nd->root.mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!try_to_unlazy(nd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * While the guarantee of LOOKUP_IS_SCOPED is (roughly) "don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * ever step outside the root during lookup" and should already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * be guaranteed by the rest of namei, we want to avoid a namei
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * BUG resulting in userspace being given a path that was not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * scoped within the root at some point during the lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * So, do a final sanity-check to make sure that in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * worst-case scenario (a complete bypass of LOOKUP_IS_SCOPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * we won't silently return an fd completely outside of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * requested root to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * Userspace could move the path outside the root after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * check, but as discussed elsewhere this is not a concern (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * resolved file was inside the root at some point).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (!path_is_under(&nd->path, &nd->root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (likely(!(nd->flags & LOOKUP_JUMPED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) success_walk_trace(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) success_walk_trace(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (status > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) success_walk_trace(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) status = -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static int set_root(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct fs_struct *fs = current->fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * Jumping to the real root in a scoped-lookup is a BUG in namei, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * still have to ensure it doesn't happen because it will cause a breakout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * from the dirfd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (WARN_ON(nd->flags & LOOKUP_IS_SCOPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return -ENOTRECOVERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) seq = read_seqcount_begin(&fs->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) nd->root = fs->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) } while (read_seqcount_retry(&fs->seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) get_fs_root(fs, &nd->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) nd->flags |= LOOKUP_ROOT_GRABBED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static int nd_jump_root(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (unlikely(nd->flags & LOOKUP_BENEATH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* Absolute path arguments to path_init() are allowed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (nd->path.mnt != NULL && nd->path.mnt != nd->root.mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!nd->root.mnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) int error = set_root(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct dentry *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) nd->path = nd->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) d = nd->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) nd->inode = d->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) nd->seq = nd->root_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) path_put(&nd->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) nd->path = nd->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) path_get(&nd->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) nd->inode = nd->path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) nd->flags |= LOOKUP_JUMPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * Helper to directly jump to a known parsed path from ->get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * caller must have taken a reference to path beforehand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int nd_jump_link(struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) int error = -ELOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct nameidata *nd = current->nameidata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (unlikely(nd->flags & LOOKUP_NO_MAGICLINKS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) error = -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (nd->path.mnt != path->mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* Not currently safe for scoped-lookups. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (unlikely(nd->flags & LOOKUP_IS_SCOPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) path_put(&nd->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) nd->path = *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) nd->inode = nd->path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) nd->flags |= LOOKUP_JUMPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) path_put(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static inline void put_link(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct saved *last = nd->stack + --nd->depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) do_delayed_call(&last->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (!(nd->flags & LOOKUP_RCU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) path_put(&last->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int sysctl_protected_symlinks __read_mostly = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) int sysctl_protected_hardlinks __read_mostly = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) int sysctl_protected_fifos __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) int sysctl_protected_regular __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * may_follow_link - Check symlink following for unsafe situations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * @nd: nameidata pathwalk data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * In the case of the sysctl_protected_symlinks sysctl being enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * in a sticky world-writable directory. This is to protect privileged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * processes from failing races against path names that may change out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * from under them by way of other users creating malicious symlinks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * It will permit symlinks to be followed only when outside a sticky
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * world-writable directory, or when the uid of the symlink and follower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * match, or when the directory owner matches the symlink's owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * Returns 0 if following the symlink is allowed, -ve on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static inline int may_follow_link(struct nameidata *nd, const struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (!sysctl_protected_symlinks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* Allowed if owner and follower match. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (uid_eq(current_cred()->fsuid, inode->i_uid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* Allowed if parent directory not sticky and world-writable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if ((nd->dir_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* Allowed if parent directory and link owner match. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (uid_valid(nd->dir_uid) && uid_eq(nd->dir_uid, inode->i_uid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (nd->flags & LOOKUP_RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) audit_inode(nd->name, nd->stack[0].link.dentry, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) audit_log_path_denied(AUDIT_ANOM_LINK, "follow_link");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * safe_hardlink_source - Check for safe hardlink conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * @inode: the source inode to hardlink from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * Return false if at least one of the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * - inode is not a regular file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * - inode is setuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * - inode is setgid and group-exec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * - access failure for read and write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * Otherwise returns true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static bool safe_hardlink_source(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) umode_t mode = inode->i_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /* Special files should not get pinned to the filesystem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!S_ISREG(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /* Setuid files should not get pinned to the filesystem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (mode & S_ISUID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* Executable setgid files should not get pinned to the filesystem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* Hardlinking to unreadable or unwritable sources is dangerous. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (inode_permission(inode, MAY_READ | MAY_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * may_linkat - Check permissions for creating a hardlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * @link: the source to hardlink from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * Block hardlink when all of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * - sysctl_protected_hardlinks enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * - fsuid does not match inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * - hardlink source is unsafe (see safe_hardlink_source() above)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * - not CAP_FOWNER in a namespace with the inode owner uid mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * Returns 0 if successful, -ve on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) int may_linkat(struct path *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct inode *inode = link->dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* Inode writeback is not safe when the uid or gid are invalid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (!sysctl_protected_hardlinks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * otherwise, it must be a safe source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (safe_hardlink_source(inode) || inode_owner_or_capable(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) audit_log_path_denied(AUDIT_ANOM_LINK, "linkat");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * should be allowed, or not, on files that already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * @dir_mode: mode bits of directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * @dir_uid: owner of directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * @inode: the inode of the file to open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * Block an O_CREAT open of a FIFO (or a regular file) when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * - sysctl_protected_fifos (or sysctl_protected_regular) is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * - the file already exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * - we are in a sticky directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * - we don't own the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * - the owner of the directory doesn't own the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * - the directory is world writable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * the directory doesn't have to be world writable: being group writable will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * be enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * Returns 0 if the open is allowed, -ve on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct inode * const inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) likely(!(dir_mode & S_ISVTX)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) uid_eq(inode->i_uid, dir_uid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) uid_eq(current_fsuid(), inode->i_uid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (likely(dir_mode & 0002) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) (dir_mode & 0020 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) const char *operation = S_ISFIFO(inode->i_mode) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) "sticky_create_fifo" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) "sticky_create_regular";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) audit_log_path_denied(AUDIT_ANOM_CREAT, operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * follow_up - Find the mountpoint of path's vfsmount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * Given a path, find the mountpoint of its source file system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * Replace @path with the path of the mountpoint in the parent mount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * Up is towards /.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * Return 1 if we went up a level and 0 if we were already at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) int follow_up(struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct mount *mnt = real_mount(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) struct mount *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) struct dentry *mountpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) read_seqlock_excl(&mount_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) parent = mnt->mnt_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (parent == mnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) read_sequnlock_excl(&mount_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) mntget(&parent->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) mountpoint = dget(mnt->mnt_mountpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) read_sequnlock_excl(&mount_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) dput(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) path->dentry = mountpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) mntput(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) path->mnt = &parent->mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) EXPORT_SYMBOL(follow_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static bool choose_mountpoint_rcu(struct mount *m, const struct path *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct path *path, unsigned *seqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) while (mnt_has_parent(m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct dentry *mountpoint = m->mnt_mountpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) m = m->mnt_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (unlikely(root->dentry == mountpoint &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) root->mnt == &m->mnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (mountpoint != m->mnt.mnt_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) path->mnt = &m->mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) path->dentry = mountpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) *seqp = read_seqcount_begin(&mountpoint->d_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static bool choose_mountpoint(struct mount *m, const struct path *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) bool found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) unsigned seq, mseq = read_seqbegin(&mount_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) found = choose_mountpoint_rcu(m, root, path, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (unlikely(!found)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (!read_seqretry(&mount_lock, mseq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (likely(__legitimize_path(path, seq, mseq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) path_put(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * Perform an automount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * - return -EISDIR to tell follow_managed() to stop and return the path we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * were called with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) static int follow_automount(struct path *path, int *count, unsigned lookup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct dentry *dentry = path->dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* We don't want to mount if someone's just doing a stat -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * unless they're stat'ing a directory and appended a '/' to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * the name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * We do, however, want to mount if someone wants to open or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * create a file of any type under the mountpoint, wants to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * traverse through the mountpoint or wants to open the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * mounted directory. Also, autofs may mark negative dentries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * as being automount points. These will need the attentions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * of the daemon to instantiate them before they can be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (!(lookup_flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) dentry->d_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return -EISDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (count && (*count)++ >= MAXSYMLINKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return -ELOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return finish_automount(dentry->d_op->d_automount(path), path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * mount traversal - out-of-line part. One note on ->d_flags accesses -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * dentries are pinned but not locked here, so negative dentry can go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * positive right under us. Use of smp_load_acquire() provides a barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * sufficient for ->d_inode and ->d_flags consistency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static int __traverse_mounts(struct path *path, unsigned flags, bool *jumped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) int *count, unsigned lookup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct vfsmount *mnt = path->mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) bool need_mntput = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) while (flags & DCACHE_MANAGED_DENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /* Allow the filesystem to manage the transit without i_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * being held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (flags & DCACHE_MANAGE_TRANSIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) ret = path->dentry->d_op->d_manage(path, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) flags = smp_load_acquire(&path->dentry->d_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (flags & DCACHE_MOUNTED) { // something's mounted on it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct vfsmount *mounted = lookup_mnt(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (mounted) { // ... in our namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) dput(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (need_mntput)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) mntput(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) path->mnt = mounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) path->dentry = dget(mounted->mnt_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) // here we know it's positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) flags = path->dentry->d_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) need_mntput = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (!(flags & DCACHE_NEED_AUTOMOUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) // uncovered automount point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ret = follow_automount(path, count, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) flags = smp_load_acquire(&path->dentry->d_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (ret == -EISDIR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) // possible if you race with several mount --move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (need_mntput && path->mnt == mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) mntput(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!ret && unlikely(d_flags_negative(flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) *jumped = need_mntput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static inline int traverse_mounts(struct path *path, bool *jumped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int *count, unsigned lookup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) unsigned flags = smp_load_acquire(&path->dentry->d_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /* fastpath */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (likely(!(flags & DCACHE_MANAGED_DENTRY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) *jumped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (unlikely(d_flags_negative(flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return __traverse_mounts(path, flags, jumped, count, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) int follow_down_one(struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct vfsmount *mounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) mounted = lookup_mnt(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (mounted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) dput(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) mntput(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) path->mnt = mounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) path->dentry = dget(mounted->mnt_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) EXPORT_SYMBOL(follow_down_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * Follow down to the covering mount currently visible to userspace. At each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * point, the filesystem owning that dentry may be queried as to whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * caller is permitted to proceed or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) int follow_down(struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct vfsmount *mnt = path->mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) bool jumped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) int ret = traverse_mounts(path, &jumped, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (path->mnt != mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) mntput(mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) EXPORT_SYMBOL(follow_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * we meet a managed dentry that would need blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct inode **inode, unsigned *seqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct dentry *dentry = path->dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) unsigned int flags = dentry->d_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (likely(!(flags & DCACHE_MANAGED_DENTRY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (unlikely(nd->flags & LOOKUP_NO_XDEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * Don't forget we might have a non-mountpoint managed dentry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * that wants to block transit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (unlikely(flags & DCACHE_MANAGE_TRANSIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) int res = dentry->d_op->d_manage(path, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return res == -EISDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) flags = dentry->d_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (flags & DCACHE_MOUNTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct mount *mounted = __lookup_mnt(path->mnt, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (mounted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) path->mnt = &mounted->mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) dentry = path->dentry = mounted->mnt.mnt_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) nd->flags |= LOOKUP_JUMPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) *seqp = read_seqcount_begin(&dentry->d_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) *inode = dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * We don't need to re-check ->d_seq after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * ->d_inode read - there will be an RCU delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * between mount hash removal and ->mnt_root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * becoming unpinned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) flags = dentry->d_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (read_seqretry(&mount_lock, nd->m_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return !(flags & DCACHE_NEED_AUTOMOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct path *path, struct inode **inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) unsigned int *seqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) bool jumped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) path->mnt = nd->path.mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) path->dentry = dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) unsigned int seq = *seqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (unlikely(!*inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (unlazy_child(nd, dentry, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) // *path might've been clobbered by __follow_mount_rcu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) path->mnt = nd->path.mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) path->dentry = dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) ret = traverse_mounts(path, &jumped, &nd->total_link_count, nd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (jumped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (unlikely(nd->flags & LOOKUP_NO_XDEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) ret = -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) nd->flags |= LOOKUP_JUMPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) dput(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (path->mnt != nd->path.mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) mntput(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) *inode = d_backing_inode(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) *seqp = 0; /* out of RCU mode, so the value doesn't matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * This looks up the name in dcache and possibly revalidates the found dentry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * NULL is returned if the dentry does not exist in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static struct dentry *lookup_dcache(const struct qstr *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct dentry *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct dentry *dentry = d_lookup(dir, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (dentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) int error = d_revalidate(dentry, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (unlikely(error <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) d_invalidate(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * Parent directory has inode locked exclusive. This is one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * and only case when ->lookup() gets called on non in-lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * dentries - as the matter of fact, this only gets called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * when directory is guaranteed to have no in-lookup children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static struct dentry *__lookup_hash(const struct qstr *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) struct dentry *base, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) struct dentry *dentry = lookup_dcache(name, base, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct dentry *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct inode *dir = base->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) /* Don't create child dentry for a dead directory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (unlikely(IS_DEADDIR(dir)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) dentry = d_alloc(base, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (unlikely(!dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) old = dir->i_op->lookup(dir, dentry, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (unlikely(old)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) dentry = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static struct dentry *lookup_fast(struct nameidata *nd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct inode **inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) unsigned *seqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct dentry *dentry, *parent = nd->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) int status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * Rename seqlock is not required here because in the off chance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * of a false negative due to a concurrent rename, the caller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * going to fall back to non-racy lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) dentry = __d_lookup_rcu(parent, &nd->last, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (unlikely(!dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (!try_to_unlazy(nd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * This sequence count validates that the inode matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * the dentry name information from lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) *inode = d_backing_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * This sequence count validates that the parent had no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * changes while we did the lookup of the dentry above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * The memory barrier in read_seqcount_begin of child is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * enough, we can use __read_seqcount_retry here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) *seqp = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) status = d_revalidate(dentry, nd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (likely(status > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (unlazy_child(nd, dentry, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (unlikely(status == -ECHILD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /* we'd been told to redo it in non-rcu mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) status = d_revalidate(dentry, nd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) dentry = __d_lookup(parent, &nd->last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (unlikely(!dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) status = d_revalidate(dentry, nd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (unlikely(status <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) d_invalidate(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return ERR_PTR(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /* Fast lookup failed, do it the slow way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static struct dentry *__lookup_slow(const struct qstr *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct dentry *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct dentry *dentry, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct inode *inode = dir->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* Don't go there if it's already dead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (unlikely(IS_DEADDIR(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) dentry = d_alloc_parallel(dir, name, &wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (unlikely(!d_in_lookup(dentry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) int error = d_revalidate(dentry, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (unlikely(error <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) d_invalidate(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) dentry = ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) old = inode->i_op->lookup(inode, dentry, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) d_lookup_done(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (unlikely(old)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) dentry = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) static struct dentry *lookup_slow(const struct qstr *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct dentry *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) struct inode *inode = dir->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct dentry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) inode_lock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) res = __lookup_slow(name, dir, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static inline int may_lookup(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (err != -ECHILD || !try_to_unlazy(nd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return inode_permission(nd->inode, MAY_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (unlikely(nd->total_link_count++ >= MAXSYMLINKS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return -ELOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (likely(nd->depth != EMBEDDED_LEVELS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (likely(nd->stack != nd->internal))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (likely(nd_alloc_stack(nd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) // we need to grab link before we do unlazy. And we can't skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) // unlazy even if we fail to grab the link - cleanup needs it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) bool grabbed_link = legitimize_path(nd, link, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (!try_to_unlazy(nd) != 0 || !grabbed_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (nd_alloc_stack(nd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) enum {WALK_TRAILING = 1, WALK_MORE = 2, WALK_NOFOLLOW = 4};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static const char *pick_link(struct nameidata *nd, struct path *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct inode *inode, unsigned seq, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) struct saved *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) const char *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) int error = reserve_stack(nd, link, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (unlikely(error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (!(nd->flags & LOOKUP_RCU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) path_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) last = nd->stack + nd->depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) last->link = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) clear_delayed_call(&last->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) last->seq = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (flags & WALK_TRAILING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) error = may_follow_link(nd, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (unlikely(nd->flags & LOOKUP_NO_SYMLINKS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) unlikely(link->mnt->mnt_flags & MNT_NOSYMFOLLOW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return ERR_PTR(-ELOOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (!(nd->flags & LOOKUP_RCU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) touch_atime(&last->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) } else if (atime_needs_update(&last->link, inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (!try_to_unlazy(nd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) touch_atime(&last->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) error = security_inode_follow_link(link->dentry, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) nd->flags & LOOKUP_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) res = READ_ONCE(inode->i_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) const char * (*get)(struct dentry *, struct inode *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) struct delayed_call *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) get = inode->i_op->get_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) res = get(NULL, inode, &last->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (res == ERR_PTR(-ECHILD) && try_to_unlazy(nd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) res = get(link->dentry, inode, &last->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) res = get(link->dentry, inode, &last->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) goto all_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (IS_ERR(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (*res == '/') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) error = nd_jump_root(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) while (unlikely(*++res == '/'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (*res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) all_done: // pure jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) put_link(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * Do we need to follow links? We _really_ want to be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) * to do this check without having to look at inode->i_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * so we keep a cache of "no, this doesn't need follow_link"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * for the common case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static const char *step_into(struct nameidata *nd, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct dentry *dentry, struct inode *inode, unsigned seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) int err = handle_mounts(nd, dentry, &path, &inode, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (likely(!d_is_symlink(path.dentry)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ((flags & WALK_TRAILING) && !(nd->flags & LOOKUP_FOLLOW)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) (flags & WALK_NOFOLLOW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /* not a symlink or should not follow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (!(nd->flags & LOOKUP_RCU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) dput(nd->path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (nd->path.mnt != path.mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) mntput(nd->path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) nd->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) nd->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) nd->seq = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /* make sure that d_is_symlink above matches inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (read_seqcount_retry(&path.dentry->d_seq, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (path.mnt == nd->path.mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) mntget(path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return pick_link(nd, &path, inode, seq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static struct dentry *follow_dotdot_rcu(struct nameidata *nd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) struct inode **inodep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) unsigned *seqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) struct dentry *parent, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (path_equal(&nd->path, &nd->root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) goto in_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (unlikely(nd->path.dentry == nd->path.mnt->mnt_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (!choose_mountpoint_rcu(real_mount(nd->path.mnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) &nd->root, &path, &seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) goto in_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (unlikely(nd->flags & LOOKUP_NO_XDEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) nd->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) nd->inode = path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) nd->seq = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) /* we know that mountpoint was pinned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) old = nd->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) parent = old->d_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) *inodep = parent->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) *seqp = read_seqcount_begin(&parent->d_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (unlikely(!path_connected(nd->path.mnt, parent)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) in_root:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (unlikely(nd->flags & LOOKUP_BENEATH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static struct dentry *follow_dotdot(struct nameidata *nd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct inode **inodep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) unsigned *seqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) struct dentry *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (path_equal(&nd->path, &nd->root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) goto in_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (unlikely(nd->path.dentry == nd->path.mnt->mnt_root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (!choose_mountpoint(real_mount(nd->path.mnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) &nd->root, &path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) goto in_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) path_put(&nd->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) nd->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) nd->inode = path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (unlikely(nd->flags & LOOKUP_NO_XDEV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return ERR_PTR(-EXDEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /* rare case of legitimate dget_parent()... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) parent = dget_parent(nd->path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (unlikely(!path_connected(nd->path.mnt, parent))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) dput(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) *seqp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) *inodep = parent->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) in_root:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (unlikely(nd->flags & LOOKUP_BENEATH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return ERR_PTR(-EXDEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) dget(nd->path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static const char *handle_dots(struct nameidata *nd, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (type == LAST_DOTDOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) const char *error = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) struct dentry *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (!nd->root.mnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) error = ERR_PTR(set_root(nd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (nd->flags & LOOKUP_RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) parent = follow_dotdot_rcu(nd, &inode, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) parent = follow_dotdot(nd, &inode, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (IS_ERR(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) return ERR_CAST(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (unlikely(!parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) error = step_into(nd, WALK_NOFOLLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) nd->path.dentry, nd->inode, nd->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) error = step_into(nd, WALK_NOFOLLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) parent, inode, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * If there was a racing rename or mount along our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) * path, then we can't be sure that ".." hasn't jumped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * above nd->root (and so userspace should retry or use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * some fallback).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (unlikely(__read_seqcount_retry(&mount_lock.seqcount, nd->m_seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (unlikely(__read_seqcount_retry(&rename_lock.seqcount, nd->r_seq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) static const char *walk_component(struct nameidata *nd, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * "." and ".." are special - ".." especially so because it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * to be able to know about the current root directory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * parent relationships.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (unlikely(nd->last_type != LAST_NORM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (!(flags & WALK_MORE) && nd->depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) put_link(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) return handle_dots(nd, nd->last_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) dentry = lookup_fast(nd, &inode, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return ERR_CAST(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (unlikely(!dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) dentry = lookup_slow(&nd->last, nd->path.dentry, nd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) return ERR_CAST(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (!(flags & WALK_MORE) && nd->depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) put_link(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return step_into(nd, flags, dentry, inode, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * We can do the critical dentry name comparison and hashing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * operations one word at a time, but we are limited to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * - Architectures with fast unaligned word accesses. We could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * do a "get_unaligned()" if this helps and is sufficiently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * fast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * do not trap on the (extremely unlikely) case of a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * crossing operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) * - Furthermore, we need an efficient 64-bit compile for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * 64-bit case in order to generate the "number of bytes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * the final mask". Again, that could be replaced with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * efficient population count instruction or similar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) #ifdef CONFIG_DCACHE_WORD_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) #include <asm/word-at-a-time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) #ifdef HASH_MIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) #elif defined(CONFIG_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * Register pressure in the mixing function is an issue, particularly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * on 32-bit x86, but almost any function requires one state value and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * one temporary. Instead, use a function designed for two state values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * and no temporaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * This function cannot create a collision in only two iterations, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * we have two iterations to achieve avalanche. In those two iterations,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * we have six layers of mixing, which is enough to spread one bit's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * influence out to 2^6 = 64 state bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * Rotate constants are scored by considering either 64 one-bit input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * deltas or 64*63/2 = 2016 two-bit input deltas, and finding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * probability of that delta causing a change to each of the 128 output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * bits, using a sample of random initial states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * The Shannon entropy of the computed probabilities is then summed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * to produce a score. Ideally, any input change has a 50% chance of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * toggling any given output bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * Mixing scores (in bits) for (12,45):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * Input delta: 1-bit 2-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * 1 round: 713.3 42542.6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * 2 rounds: 2753.7 140389.8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) * 3 rounds: 5954.1 233458.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * 4 rounds: 7862.6 256672.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * Perfect: 8192 258048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) * (64*128) (64*63/2 * 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) #define HASH_MIX(x, y, a) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) ( x ^= (a), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) y ^= x, x = rol64(x,12),\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) x += y, y = rol64(y,45),\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) y *= 9 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * Fold two longs into one 32-bit hash value. This must be fast, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * latency isn't quite as critical, as there is a fair bit of additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) * work done before the hash value is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) static inline unsigned int fold_hash(unsigned long x, unsigned long y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) y ^= x * GOLDEN_RATIO_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) y *= GOLDEN_RATIO_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return y >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) #else /* 32-bit case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * Mixing scores (in bits) for (7,20):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * Input delta: 1-bit 2-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * 1 round: 330.3 9201.6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * 2 rounds: 1246.4 25475.4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * 3 rounds: 1907.1 31295.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * 4 rounds: 2042.3 31718.6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * Perfect: 2048 31744
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * (32*64) (32*31/2 * 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) #define HASH_MIX(x, y, a) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) ( x ^= (a), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) y ^= x, x = rol32(x, 7),\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) x += y, y = rol32(y,20),\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) y *= 9 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) static inline unsigned int fold_hash(unsigned long x, unsigned long y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /* Use arch-optimized multiply if one exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return __hash_32(y ^ __hash_32(x));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * Return the hash of a string of known length. This is carfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * designed to match hash_name(), which is the more critical function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * In particular, we must end by hashing a final word containing 0..7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * payload bytes, to match the way that hash_name() iterates until it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * finds the delimiter after the name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) unsigned long a, x = 0, y = (unsigned long)salt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) a = load_unaligned_zeropad(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (len < sizeof(unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) HASH_MIX(x, y, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) name += sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) len -= sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) x ^= a & bytemask_from_count(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) return fold_hash(x, y);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) EXPORT_SYMBOL(full_name_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /* Return the "hash_len" (hash and length) of a null-terminated string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) u64 hashlen_string(const void *salt, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) unsigned long a = 0, x = 0, y = (unsigned long)salt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) unsigned long adata, mask, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) goto inside;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) HASH_MIX(x, y, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) len += sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) inside:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) a = load_unaligned_zeropad(name+len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) } while (!has_zero(a, &adata, &constants));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) adata = prep_zero_mask(a, adata, &constants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) mask = create_zero_mask(adata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) x ^= a & zero_bytemask(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) return hashlen_create(fold_hash(x, y), len + find_zero(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) EXPORT_SYMBOL(hashlen_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) * Calculate the length and hash of the path component, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * return the "hash_len" as the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static inline u64 hash_name(const void *salt, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) unsigned long a = 0, b, x = 0, y = (unsigned long)salt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) unsigned long adata, bdata, mask, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) goto inside;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) HASH_MIX(x, y, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) len += sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) inside:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) a = load_unaligned_zeropad(name+len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) b = a ^ REPEAT_BYTE('/');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) adata = prep_zero_mask(a, adata, &constants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) bdata = prep_zero_mask(b, bdata, &constants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) mask = create_zero_mask(adata | bdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) x ^= a & zero_bytemask(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) return hashlen_create(fold_hash(x, y), len + find_zero(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) #else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) /* Return the hash of a string of known length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) unsigned long hash = init_name_hash(salt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) while (len--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) hash = partial_name_hash((unsigned char)*name++, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) return end_name_hash(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) EXPORT_SYMBOL(full_name_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) /* Return the "hash_len" (hash and length) of a null-terminated string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) u64 hashlen_string(const void *salt, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) unsigned long hash = init_name_hash(salt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) unsigned long len = 0, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) c = (unsigned char)*name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) while (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) hash = partial_name_hash(c, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) c = (unsigned char)name[len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) return hashlen_create(end_name_hash(hash), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) EXPORT_SYMBOL(hashlen_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * We know there's a real path component here of at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) * one character.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) static inline u64 hash_name(const void *salt, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) unsigned long hash = init_name_hash(salt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) unsigned long len = 0, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) c = (unsigned char)*name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) hash = partial_name_hash(c, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) c = (unsigned char)name[len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) } while (c && c != '/');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return hashlen_create(end_name_hash(hash), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) * Name resolution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) * This is the basic name resolution function, turning a pathname into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) * the final dentry. We expect 'base' to be positive and a directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) * Returns 0 and nd will have valid dentry and mnt on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * Returns error and drops reference to input namei data on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) static int link_path_walk(const char *name, struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) int depth = 0; // depth <= nd->depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) nd->last_type = LAST_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) nd->flags |= LOOKUP_PARENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (IS_ERR(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) return PTR_ERR(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) while (*name=='/')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) name++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (!*name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) /* At this point we know we have a real path component. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) for(;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) const char *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) u64 hash_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) err = may_lookup(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) hash_len = hash_name(nd->path.dentry, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) type = LAST_NORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (name[0] == '.') switch (hashlen_len(hash_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (name[1] == '.') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) type = LAST_DOTDOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) nd->flags |= LOOKUP_JUMPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) type = LAST_DOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (likely(type == LAST_NORM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) struct dentry *parent = nd->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) nd->flags &= ~LOOKUP_JUMPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) struct qstr this = { { .hash_len = hash_len }, .name = name };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) err = parent->d_op->d_hash(parent, &this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) hash_len = this.hash_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) name = this.name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) nd->last.hash_len = hash_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) nd->last.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) nd->last_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) name += hashlen_len(hash_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (!*name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) goto OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * If it wasn't NUL, we know it was '/'. Skip that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * slash, and continue until no more slashes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) name++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) } while (unlikely(*name == '/'));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (unlikely(!*name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) /* pathname or trailing symlink, done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if (!depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) nd->dir_uid = nd->inode->i_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) nd->dir_mode = nd->inode->i_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) nd->flags &= ~LOOKUP_PARENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) /* last component of nested symlink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) name = nd->stack[--depth].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) link = walk_component(nd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) /* not the last component */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) link = walk_component(nd, WALK_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (unlikely(link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (IS_ERR(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) return PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) /* a symlink to follow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) nd->stack[depth++].name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) name = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (unlikely(!d_can_lookup(nd->path.dentry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (!try_to_unlazy(nd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return -ENOTDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) /* must be paired with terminate_walk() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) static const char *path_init(struct nameidata *nd, unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) const char *s = nd->name->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (!*s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) flags &= ~LOOKUP_RCU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (flags & LOOKUP_RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) nd->flags = flags | LOOKUP_JUMPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) nd->depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) nd->m_seq = __read_seqcount_begin(&mount_lock.seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) nd->r_seq = __read_seqcount_begin(&rename_lock.seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (flags & LOOKUP_ROOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) struct dentry *root = nd->root.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) struct inode *inode = root->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (*s && unlikely(!d_can_lookup(root)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) return ERR_PTR(-ENOTDIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) nd->path = nd->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) nd->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) nd->root_seq = nd->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) path_get(&nd->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) nd->root.mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) nd->path.mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) nd->path.dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) error = nd_jump_root(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /* Relative pathname -- get the starting-point it is relative to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) if (nd->dfd == AT_FDCWD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) struct fs_struct *fs = current->fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) seq = read_seqcount_begin(&fs->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) nd->path = fs->pwd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) nd->inode = nd->path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) } while (read_seqcount_retry(&fs->seq, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) get_fs_pwd(current->fs, &nd->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) nd->inode = nd->path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) /* Caller must check execute permissions on the starting path component */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) struct fd f = fdget_raw(nd->dfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) dentry = f.file->f_path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (*s && unlikely(!d_can_lookup(dentry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) return ERR_PTR(-ENOTDIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) nd->path = f.file->f_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) nd->inode = nd->path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) path_get(&nd->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) nd->inode = nd->path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) /* For scoped-lookups we need to set the root to the dirfd as well. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (flags & LOOKUP_IS_SCOPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) nd->root = nd->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) nd->root_seq = nd->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) path_get(&nd->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) nd->flags |= LOOKUP_ROOT_GRABBED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) static inline const char *lookup_last(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) return walk_component(nd, WALK_TRAILING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) static int handle_lookup_down(struct nameidata *nd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (!(nd->flags & LOOKUP_RCU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) dget(nd->path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return PTR_ERR(step_into(nd, WALK_NOFOLLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) nd->path.dentry, nd->inode, nd->seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) const char *s = path_init(nd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (unlikely(flags & LOOKUP_DOWN) && !IS_ERR(s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) err = handle_lookup_down(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) s = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) while (!(err = link_path_walk(s, nd)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) (s = lookup_last(nd)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) err = handle_lookup_down(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) err = complete_walk(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (!err && nd->flags & LOOKUP_DIRECTORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) if (!d_can_lookup(nd->path.dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) err = -ENOTDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) *path = nd->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) nd->path.mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) nd->path.dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) terminate_walk(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) int filename_lookup(int dfd, struct filename *name, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) struct path *path, struct path *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) struct nameidata nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) if (IS_ERR(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) return PTR_ERR(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (unlikely(root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) nd.root = *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) flags |= LOOKUP_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) set_nameidata(&nd, dfd, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) retval = path_lookupat(&nd, flags | LOOKUP_RCU, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (unlikely(retval == -ECHILD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) retval = path_lookupat(&nd, flags, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) if (unlikely(retval == -ESTALE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) if (likely(!retval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) audit_inode(name, path->dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) flags & LOOKUP_MOUNTPOINT ? AUDIT_INODE_NOEVAL : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) restore_nameidata();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) putname(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) static int path_parentat(struct nameidata *nd, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) struct path *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) const char *s = path_init(nd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) int err = link_path_walk(s, nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) err = complete_walk(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) *parent = nd->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) nd->path.mnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) nd->path.dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) terminate_walk(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) static struct filename *filename_parentat(int dfd, struct filename *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) unsigned int flags, struct path *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) struct qstr *last, int *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) struct nameidata nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (IS_ERR(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) return name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) set_nameidata(&nd, dfd, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (unlikely(retval == -ECHILD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) retval = path_parentat(&nd, flags, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if (unlikely(retval == -ESTALE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) if (likely(!retval)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) *last = nd.last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) *type = nd.last_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) audit_inode(name, parent->dentry, AUDIT_INODE_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) putname(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) name = ERR_PTR(retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) restore_nameidata();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) return name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) /* does lookup, returns the object with parent locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) struct dentry *kern_path_locked(const char *name, struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) struct filename *filename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) struct dentry *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) struct qstr last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) &last, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) if (IS_ERR(filename))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) return ERR_CAST(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) if (unlikely(type != LAST_NORM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) path_put(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) putname(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) d = __lookup_hash(&last, path->dentry, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) if (IS_ERR(d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) inode_unlock(path->dentry->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) path_put(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) putname(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) int kern_path(const char *name, unsigned int flags, struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) return filename_lookup(AT_FDCWD, getname_kernel(name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) flags, path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) EXPORT_SYMBOL(kern_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * @dentry: pointer to dentry of the base directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * @mnt: pointer to vfs mount of the base directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * @name: pointer to file name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) * @flags: lookup flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) * @path: pointer to struct path to fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) const char *name, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) struct path root = {.mnt = mnt, .dentry = dentry};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) /* the first argument of filename_lookup() is ignored with root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) return filename_lookup(AT_FDCWD, getname_kernel(name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) flags , path, &root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) EXPORT_SYMBOL(vfs_path_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) static int lookup_one_len_common(const char *name, struct dentry *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) int len, struct qstr *this)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) this->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) this->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) this->hash = full_name_hash(base, name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) if (unlikely(name[0] == '.')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) if (len < 2 || (len == 2 && name[1] == '.'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) while (len--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) unsigned int c = *(const unsigned char *)name++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) if (c == '/' || c == '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) * See if the low-level filesystem might want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) * to use its own hash..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) if (base->d_flags & DCACHE_OP_HASH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) int err = base->d_op->d_hash(base, this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) return inode_permission(base->d_inode, MAY_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) * try_lookup_one_len - filesystem helper to lookup single pathname component
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) * @name: pathname component to lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) * @base: base directory to lookup from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * @len: maximum length @len should be interpreted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) * Look up a dentry by name in the dcache, returning NULL if it does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) * currently exist. The function does not try to create a dentry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) * Note that this routine is purely a helper for filesystem usage and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) * not be called by generic code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) * The caller must hold base->i_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) struct dentry *try_lookup_one_len(const char *name, struct dentry *base, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) struct qstr this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) WARN_ON_ONCE(!inode_is_locked(base->d_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) err = lookup_one_len_common(name, base, len, &this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) return lookup_dcache(&this, base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) EXPORT_SYMBOL(try_lookup_one_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) * lookup_one_len - filesystem helper to lookup single pathname component
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) * @name: pathname component to lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) * @base: base directory to lookup from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) * @len: maximum length @len should be interpreted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) * Note that this routine is purely a helper for filesystem usage and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) * not be called by generic code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) * The caller must hold base->i_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) struct qstr this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) WARN_ON_ONCE(!inode_is_locked(base->d_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) err = lookup_one_len_common(name, base, len, &this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) dentry = lookup_dcache(&this, base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) return dentry ? dentry : __lookup_slow(&this, base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) EXPORT_SYMBOL(lookup_one_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * lookup_one_len_unlocked - filesystem helper to lookup single pathname component
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * @name: pathname component to lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * @base: base directory to lookup from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) * @len: maximum length @len should be interpreted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) * Note that this routine is purely a helper for filesystem usage and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * not be called by generic code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * Unlike lookup_one_len, it should be called without the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) * i_mutex held, and will take the i_mutex itself if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) struct dentry *lookup_one_len_unlocked(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) struct dentry *base, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) struct qstr this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) struct dentry *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) err = lookup_one_len_common(name, base, len, &this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) ret = lookup_dcache(&this, base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) ret = lookup_slow(&this, base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) EXPORT_SYMBOL(lookup_one_len_unlocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) * Like lookup_one_len_unlocked(), except that it yields ERR_PTR(-ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) * on negatives. Returns known positive or ERR_PTR(); that's what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) * most of the users want. Note that pinned negative with unlocked parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * _can_ become positive at any time, so callers of lookup_one_len_unlocked()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * need to be very careful; pinned positives have ->d_inode stable, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) * this one avoids such problems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) struct dentry *lookup_positive_unlocked(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) struct dentry *base, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) struct dentry *ret = lookup_one_len_unlocked(name, base, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) dput(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) ret = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) EXPORT_SYMBOL(lookup_positive_unlocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) #ifdef CONFIG_UNIX98_PTYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) int path_pts(struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /* Find something mounted on "pts" in the same directory as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * the input path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) struct dentry *parent = dget_parent(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct dentry *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) struct qstr this = QSTR_INIT("pts", 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) if (unlikely(!path_connected(path->mnt, parent))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) dput(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) dput(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) path->dentry = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) child = d_hash_and_lookup(parent, &this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) path->dentry = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) dput(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) follow_down(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) struct path *path, int *empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) return filename_lookup(dfd, getname_flags(name, flags, empty),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) flags, path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) EXPORT_SYMBOL(user_path_at_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) int __check_sticky(struct inode *dir, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) kuid_t fsuid = current_fsuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) if (uid_eq(inode->i_uid, fsuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (uid_eq(dir->i_uid, fsuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) EXPORT_SYMBOL(__check_sticky);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) * Check whether we can remove a link victim from directory dir, check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) * whether the type of victim is right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) * 1. We can't do it if dir is read-only (done in permission())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) * 2. We should have write and exec permissions on dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) * 3. We can't remove anything from append-only dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) * 4. We can't do anything with immutable dir (done in permission())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) * 5. If the sticky bit on dir is set we should either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * a. be owner of dir, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) * b. be owner of victim, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) * c. have CAP_FOWNER capability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) * 6. If the victim is append-only or immutable we can't do antyhing with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) * links pointing to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) * 7. If the victim has an unknown uid or gid we can't change the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) * 8. If we were asked to remove a directory and victim isn't one - ENOTDIR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * 9. If we were asked to remove a non-directory and victim isn't one - EISDIR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) * 10. We can't remove a root or mountpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) * 11. We don't allow removal of NFS sillyrenamed files; it's handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) * nfs_async_unlink().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) struct inode *inode = d_backing_inode(victim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (d_is_negative(victim))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) BUG_ON(!inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) BUG_ON(victim->d_parent->d_inode != dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) /* Inode writeback is not safe when the uid or gid are invalid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) if (IS_APPEND(dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if (check_sticky(dir, inode) || IS_APPEND(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) || HAS_UNMAPPED_ID(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (isdir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) if (!d_is_dir(victim))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) return -ENOTDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (IS_ROOT(victim))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) } else if (d_is_dir(victim))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) return -EISDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if (IS_DEADDIR(dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) if (victim->d_flags & DCACHE_NFSFS_RENAMED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) /* Check whether we can create an object with dentry child in directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) * dir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) * 1. We can't do it if child already exists (open has special treatment for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) * this case, but since we are inlined it's OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) * 2. We can't do it if dir is read-only (done in permission())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) * 3. We can't do it if the fs can't represent the fsuid or fsgid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) * 4. We should have write and exec permissions on dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) * 5. We can't do it if dir is immutable (done in permission())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) static inline int may_create(struct inode *dir, struct dentry *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) struct user_namespace *s_user_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) if (child->d_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) if (IS_DEADDIR(dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) s_user_ns = dir->i_sb->s_user_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) !kgid_has_mapping(s_user_ns, current_fsgid()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) return inode_permission(dir, MAY_WRITE | MAY_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) * p1 and p2 should be directories on the same fs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) struct dentry *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) if (p1 == p2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) mutex_lock(&p1->d_sb->s_vfs_rename_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) p = d_ancestor(p2, p1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) inode_lock_nested(p2->d_inode, I_MUTEX_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) inode_lock_nested(p1->d_inode, I_MUTEX_CHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) p = d_ancestor(p1, p2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) inode_lock_nested(p2->d_inode, I_MUTEX_CHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) inode_lock_nested(p1->d_inode, I_MUTEX_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) EXPORT_SYMBOL(lock_rename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) void unlock_rename(struct dentry *p1, struct dentry *p2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) inode_unlock(p1->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (p1 != p2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) inode_unlock(p2->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) mutex_unlock(&p1->d_sb->s_vfs_rename_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) EXPORT_SYMBOL(unlock_rename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) bool want_excl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) int error = may_create(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (!dir->i_op->create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) return -EACCES; /* shouldn't it be ENOSYS? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) mode &= S_IALLUGO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) mode |= S_IFREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) error = security_inode_create(dir, dentry, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) error = dir->i_op->create(dir, dentry, mode, want_excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) fsnotify_create(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) EXPORT_SYMBOL_NS(vfs_create, ANDROID_GKI_VFS_EXPORT_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) int vfs_mkobj(struct dentry *dentry, umode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) int (*f)(struct dentry *, umode_t, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) struct inode *dir = dentry->d_parent->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) int error = may_create(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) mode &= S_IALLUGO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) mode |= S_IFREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) error = security_inode_create(dir, dentry, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) error = f(dentry, mode, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) fsnotify_create(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) EXPORT_SYMBOL(vfs_mkobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) bool may_open_dev(const struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) return !(path->mnt->mnt_flags & MNT_NODEV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) !(path->mnt->mnt_sb->s_iflags & SB_I_NODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) static int may_open(const struct path *path, int acc_mode, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) struct dentry *dentry = path->dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) struct inode *inode = dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) switch (inode->i_mode & S_IFMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) case S_IFLNK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) return -ELOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) case S_IFDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) if (acc_mode & MAY_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) return -EISDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (acc_mode & MAY_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) case S_IFBLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) case S_IFCHR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) if (!may_open_dev(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) case S_IFIFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) case S_IFSOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) if (acc_mode & MAY_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) flag &= ~O_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) case S_IFREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) if ((acc_mode & MAY_EXEC) && path_noexec(path))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) error = inode_permission(inode, MAY_OPEN | acc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) * An append-only file must be opened in append mode for writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) if (IS_APPEND(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) if (flag & O_TRUNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) /* O_NOATIME can only be set by the owner or superuser */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (flag & O_NOATIME && !inode_owner_or_capable(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) static int handle_truncate(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) const struct path *path = &filp->f_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) struct inode *inode = path->dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) int error = get_write_access(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) * Refuse to truncate files with mandatory locks held on them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) error = locks_verify_locked(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) error = security_path_truncate(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) error = do_truncate(path->dentry, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) put_write_access(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) static inline int open_to_namei_flags(int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if ((flag & O_ACCMODE) == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) flag--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) return flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) struct user_namespace *s_user_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) int error = security_path_mknod(dir, dentry, mode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) s_user_ns = dir->dentry->d_sb->s_user_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) !kgid_has_mapping(s_user_ns, current_fsgid()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) return security_inode_create(dir->dentry->d_inode, dentry, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) * Attempt to atomically look up, create and open a file from a negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) * dentry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) * Returns 0 if successful. The file will have been created and attached to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) * @file by the filesystem calling finish_open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) * If the file was looked up only or didn't need creating, FMODE_OPENED won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) * be set. The caller will need to perform the open themselves. @path will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) * have been updated to point to the new dentry. This may be negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) * Returns an error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) static struct dentry *atomic_open(struct nameidata *nd, struct dentry *dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) int open_flag, umode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) struct inode *dir = nd->path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (nd->flags & LOOKUP_DIRECTORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) open_flag |= O_DIRECTORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) file->f_path.dentry = DENTRY_NOT_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) file->f_path.mnt = nd->path.mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) error = dir->i_op->atomic_open(dir, dentry, file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) open_to_namei_flags(open_flag), mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) d_lookup_done(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) if (file->f_mode & FMODE_OPENED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) if (unlikely(dentry != file->f_path.dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) dentry = dget(file->f_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) } else if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (file->f_path.dentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) dentry = file->f_path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) if (unlikely(d_is_negative(dentry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) dentry = ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) * Look up and maybe create and open the last component.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) * Must be called with parent locked (exclusive in O_CREAT case).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) * Returns 0 on success, that is, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) * the file was successfully atomically created (if necessary) and opened, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) * the file was not completely opened at this time, though lookups and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * creations were performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) * These case are distinguished by presence of FMODE_OPENED on file->f_mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) * In the latter case dentry returned in @path might be negative if O_CREAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) * hadn't been specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) * An error code is returned on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) const struct open_flags *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) bool got_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) struct dentry *dir = nd->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) struct inode *dir_inode = dir->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) int open_flag = op->open_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) int error, create_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) umode_t mode = op->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) if (unlikely(IS_DEADDIR(dir_inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) file->f_mode &= ~FMODE_CREATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) dentry = d_lookup(dir, &nd->last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) if (!dentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) dentry = d_alloc_parallel(dir, &nd->last, &wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) if (d_in_lookup(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) error = d_revalidate(dentry, nd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (likely(error > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) d_invalidate(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) if (dentry->d_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) /* Cached positive dentry: will open in f_op->open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) * Checking write permission is tricky, bacuse we don't know if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) * going to actually need it: O_CREAT opens should work as long as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) * file exists. But checking existence breaks atomicity. The trick is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) * to check access and if not granted clear O_CREAT from the flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) * Another problem is returing the "right" error value (e.g. for an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) * O_EXCL open we want to return EEXIST not EROFS).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) if (unlikely(!got_write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) open_flag &= ~O_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) if (open_flag & O_CREAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (open_flag & O_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) open_flag &= ~O_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) if (!IS_POSIXACL(dir->d_inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) mode &= ~current_umask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) if (likely(got_write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) create_error = may_o_create(&nd->path, dentry, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) create_error = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) if (create_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) open_flag &= ~O_CREAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) if (dir_inode->i_op->atomic_open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) dentry = atomic_open(nd, dentry, file, open_flag, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) if (unlikely(create_error) && dentry == ERR_PTR(-ENOENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) dentry = ERR_PTR(create_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (d_in_lookup(dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) struct dentry *res = dir_inode->i_op->lookup(dir_inode, dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) nd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) d_lookup_done(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) if (unlikely(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) if (IS_ERR(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) error = PTR_ERR(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) dentry = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) /* Negative dentry, just create the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if (!dentry->d_inode && (open_flag & O_CREAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) file->f_mode |= FMODE_CREATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (!dir_inode->i_op->create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) error = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) error = dir_inode->i_op->create(dir_inode, dentry, mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) open_flag & O_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) if (unlikely(create_error) && !dentry->d_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) error = create_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) out_dput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) static const char *open_last_lookups(struct nameidata *nd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) struct file *file, const struct open_flags *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) struct dentry *dir = nd->path.dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) int open_flag = op->open_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) bool got_write = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) const char *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) nd->flags |= op->intent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (nd->last_type != LAST_NORM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) if (nd->depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) put_link(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) return handle_dots(nd, nd->last_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) if (!(open_flag & O_CREAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) if (nd->last.name[nd->last.len])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) /* we _can_ be in RCU mode here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) dentry = lookup_fast(nd, &inode, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) return ERR_CAST(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) if (likely(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) goto finish_lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) BUG_ON(nd->flags & LOOKUP_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) /* create side of things */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) if (nd->flags & LOOKUP_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) if (!try_to_unlazy(nd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) /* trailing slashes? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) if (unlikely(nd->last.name[nd->last.len]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) return ERR_PTR(-EISDIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) got_write = !mnt_want_write(nd->path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) * do _not_ fail yet - we might not need that or fail with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) * a different error; let lookup_open() decide; we'll be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) * dropping this one anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) if (open_flag & O_CREAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) inode_lock(dir->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) inode_lock_shared(dir->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) dentry = lookup_open(nd, file, op, got_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) if (!IS_ERR(dentry) && (file->f_mode & FMODE_CREATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) fsnotify_create(dir->d_inode, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) if (open_flag & O_CREAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) inode_unlock(dir->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) inode_unlock_shared(dir->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) if (got_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) mnt_drop_write(nd->path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) return ERR_CAST(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) if (file->f_mode & (FMODE_OPENED | FMODE_CREATED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) dput(nd->path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) nd->path.dentry = dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) finish_lookup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) if (nd->depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) put_link(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) res = step_into(nd, WALK_TRAILING, dentry, inode, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) if (unlikely(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) * Handle the last step of open()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) static int do_open(struct nameidata *nd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) struct file *file, const struct open_flags *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) int open_flag = op->open_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) bool do_truncate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) int acc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) if (!(file->f_mode & (FMODE_OPENED | FMODE_CREATED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) error = complete_walk(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (!(file->f_mode & FMODE_CREATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) audit_inode(nd->name, nd->path.dentry, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) if (open_flag & O_CREAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) if ((open_flag & O_EXCL) && !(file->f_mode & FMODE_CREATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) if (d_is_dir(nd->path.dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) return -EISDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) error = may_create_in_sticky(nd->dir_mode, nd->dir_uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) d_backing_inode(nd->path.dentry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) return -ENOTDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) do_truncate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) acc_mode = op->acc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) if (file->f_mode & FMODE_CREATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) /* Don't check for write permission, don't truncate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) open_flag &= ~O_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) acc_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) } else if (d_is_reg(nd->path.dentry) && open_flag & O_TRUNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) error = mnt_want_write(nd->path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) do_truncate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) error = may_open(&nd->path, acc_mode, open_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) if (!error && !(file->f_mode & FMODE_OPENED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) error = vfs_open(&nd->path, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) error = ima_file_check(file, op->acc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) if (!error && do_truncate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) error = handle_truncate(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) if (unlikely(error > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (do_truncate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) mnt_drop_write(nd->path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) struct dentry *child = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) struct inode *dir = dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) /* we want directory to be writable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) error = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) if (!dir->i_op->tmpfile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) child = d_alloc(dentry, &slash_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) if (unlikely(!child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) error = dir->i_op->tmpfile(dir, child, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) inode = child->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) if (unlikely(!inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (!(open_flag & O_EXCL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) inode->i_state |= I_LINKABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) ima_post_create_tmpfile(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) return child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) dput(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) EXPORT_SYMBOL(vfs_tmpfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) static int do_tmpfile(struct nameidata *nd, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) const struct open_flags *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) struct dentry *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) error = mnt_want_write(path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) child = vfs_tmpfile(path.dentry, op->mode, op->open_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) error = PTR_ERR(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) if (IS_ERR(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) dput(path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) path.dentry = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) audit_inode(nd->name, child, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) /* Don't check for other permissions, the inode was just created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) error = may_open(&path, 0, op->open_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) file->f_path.mnt = path.mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) error = finish_open(file, child, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) mnt_drop_write(path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) int error = path_lookupat(nd, flags, &path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) audit_inode(nd->name, path.dentry, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) error = vfs_open(&path, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) static struct file *path_openat(struct nameidata *nd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) const struct open_flags *op, unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) file = alloc_empty_file(op->open_flag, current_cred());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) if (IS_ERR(file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) return file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) if (unlikely(file->f_flags & __O_TMPFILE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) error = do_tmpfile(nd, flags, op, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) } else if (unlikely(file->f_flags & O_PATH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) error = do_o_path(nd, flags, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) const char *s = path_init(nd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) while (!(error = link_path_walk(s, nd)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) (s = open_last_lookups(nd, file, op)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) error = do_open(nd, file, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) terminate_walk(nd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) if (likely(!error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) if (likely(file->f_mode & FMODE_OPENED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) return file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) fput(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) if (error == -EOPENSTALE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) if (flags & LOOKUP_RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) error = -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) error = -ESTALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) struct file *do_filp_open(int dfd, struct filename *pathname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) const struct open_flags *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) struct nameidata nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) int flags = op->lookup_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) struct file *filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) set_nameidata(&nd, dfd, pathname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) filp = path_openat(&nd, op, flags | LOOKUP_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) if (unlikely(filp == ERR_PTR(-ECHILD)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) filp = path_openat(&nd, op, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) if (unlikely(filp == ERR_PTR(-ESTALE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) restore_nameidata();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) return filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) const char *name, const struct open_flags *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) struct nameidata nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) struct filename *filename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) int flags = op->lookup_flags | LOOKUP_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) nd.root.mnt = mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) nd.root.dentry = dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) return ERR_PTR(-ELOOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) filename = getname_kernel(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) if (IS_ERR(filename))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) return ERR_CAST(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) set_nameidata(&nd, -1, filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) file = path_openat(&nd, op, flags | LOOKUP_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) if (unlikely(file == ERR_PTR(-ECHILD)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) file = path_openat(&nd, op, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) if (unlikely(file == ERR_PTR(-ESTALE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) file = path_openat(&nd, op, flags | LOOKUP_REVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) restore_nameidata();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) putname(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) return file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) static struct dentry *filename_create(int dfd, struct filename *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) struct path *path, unsigned int lookup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) struct dentry *dentry = ERR_PTR(-EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) struct qstr last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) int err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) * other flags passed in are ignored!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) lookup_flags &= LOOKUP_REVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) name = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) if (IS_ERR(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) return ERR_CAST(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) * Yucky last component or no last component at all?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) * (foo/., foo/.., /////)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) if (unlikely(type != LAST_NORM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) /* don't fail immediately if it's r/o, at least try to report other errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) err2 = mnt_want_write(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) * Do the final lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) dentry = __lookup_hash(&last, path->dentry, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) error = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) if (d_is_positive(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) * Special case - lookup gave negative, but... we had foo/bar/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) * From the vfs_mknod() POV we just have a negative dentry -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) * all is fine. Let's be bastards - you had / on the end, you've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) * been asking for (non-existent) directory. -ENOENT for you.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) if (unlikely(!is_dir && last.name[last.len])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) if (unlikely(err2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) error = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) putname(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) dentry = ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) inode_unlock(path->dentry->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) if (!err2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) mnt_drop_write(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) path_put(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) putname(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) struct dentry *kern_path_create(int dfd, const char *pathname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) struct path *path, unsigned int lookup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) return filename_create(dfd, getname_kernel(pathname),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) path, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) EXPORT_SYMBOL(kern_path_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) void done_path_create(struct path *path, struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) inode_unlock(path->dentry->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) mnt_drop_write(path->mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) path_put(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) EXPORT_SYMBOL(done_path_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) inline struct dentry *user_path_create(int dfd, const char __user *pathname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) struct path *path, unsigned int lookup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) return filename_create(dfd, getname(pathname), path, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) EXPORT_SYMBOL(user_path_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) bool is_whiteout = S_ISCHR(mode) && dev == WHITEOUT_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) int error = may_create(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) if ((S_ISCHR(mode) || S_ISBLK(mode)) && !is_whiteout &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) !capable(CAP_MKNOD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) if (!dir->i_op->mknod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) error = devcgroup_inode_mknod(mode, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) error = security_inode_mknod(dir, dentry, mode, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) error = dir->i_op->mknod(dir, dentry, mode, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) fsnotify_create(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) EXPORT_SYMBOL(vfs_mknod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) static int may_mknod(umode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) switch (mode & S_IFMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) case S_IFREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) case S_IFCHR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) case S_IFBLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) case S_IFIFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) case S_IFSOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) case 0: /* zero mode translates to S_IFREG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) case S_IFDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) static long do_mknodat(int dfd, const char __user *filename, umode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) unsigned int dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) unsigned int lookup_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) error = may_mknod(mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) dentry = user_path_create(dfd, filename, &path, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) return PTR_ERR(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) if (!IS_POSIXACL(path.dentry->d_inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) mode &= ~current_umask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) error = security_path_mknod(&path, dentry, mode, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) switch (mode & S_IFMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) case 0: case S_IFREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) error = vfs_create(path.dentry->d_inode,dentry,mode,true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) ima_post_path_mknod(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) case S_IFCHR: case S_IFBLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) error = vfs_mknod(path.dentry->d_inode,dentry,mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) new_decode_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) case S_IFIFO: case S_IFSOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) done_path_create(&path, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) if (retry_estale(error, lookup_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) lookup_flags |= LOOKUP_REVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) unsigned int, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) return do_mknodat(dfd, filename, mode, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) return do_mknodat(AT_FDCWD, filename, mode, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) int error = may_create(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) unsigned max_links = dir->i_sb->s_max_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) if (!dir->i_op->mkdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) mode &= (S_IRWXUGO|S_ISVTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) error = security_inode_mkdir(dir, dentry, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) if (max_links && dir->i_nlink >= max_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) return -EMLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) error = dir->i_op->mkdir(dir, dentry, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) fsnotify_mkdir(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) EXPORT_SYMBOL_NS(vfs_mkdir, ANDROID_GKI_VFS_EXPORT_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) static long do_mkdirat(int dfd, const char __user *pathname, umode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) unsigned int lookup_flags = LOOKUP_DIRECTORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) dentry = user_path_create(dfd, pathname, &path, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) return PTR_ERR(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) if (!IS_POSIXACL(path.dentry->d_inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) mode &= ~current_umask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) error = security_path_mkdir(&path, dentry, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) done_path_create(&path, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) if (retry_estale(error, lookup_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) lookup_flags |= LOOKUP_REVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) return do_mkdirat(dfd, pathname, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) return do_mkdirat(AT_FDCWD, pathname, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) int vfs_rmdir(struct inode *dir, struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) int error = may_delete(dir, dentry, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) if (!dir->i_op->rmdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) dget(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) inode_lock(dentry->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) if (is_local_mountpoint(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) error = security_inode_rmdir(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) error = dir->i_op->rmdir(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) shrink_dcache_parent(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) dentry->d_inode->i_flags |= S_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) dont_mount(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) detach_mounts(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) inode_unlock(dentry->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) d_delete_notify(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) EXPORT_SYMBOL_NS(vfs_rmdir, ANDROID_GKI_VFS_EXPORT_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) long do_rmdir(int dfd, struct filename *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) struct qstr last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) unsigned int lookup_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) name = filename_parentat(dfd, name, lookup_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) &path, &last, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) if (IS_ERR(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) return PTR_ERR(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) case LAST_DOTDOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) error = -ENOTEMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) goto exit1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) case LAST_DOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) goto exit1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) case LAST_ROOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) goto exit1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) error = mnt_want_write(path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) goto exit1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) dentry = __lookup_hash(&last, path.dentry, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) error = PTR_ERR(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) goto exit2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) if (!dentry->d_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) goto exit3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) error = security_path_rmdir(&path, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) goto exit3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) error = vfs_rmdir(path.dentry->d_inode, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) exit3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) exit2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) inode_unlock(path.dentry->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) mnt_drop_write(path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) exit1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) if (retry_estale(error, lookup_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) lookup_flags |= LOOKUP_REVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) putname(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) return do_rmdir(AT_FDCWD, getname(pathname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) * vfs_unlink - unlink a filesystem object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) * @dir: parent directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) * @dentry: victim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) * @delegated_inode: returns victim inode, if the inode is delegated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) * The caller must hold dir->i_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) * return a reference to the inode in delegated_inode. The caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) * should then break the delegation on that inode and retry. Because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) * breaking a delegation may take a long time, the caller should drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) * dir->i_mutex before doing so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) * Alternatively, a caller may pass NULL for delegated_inode. This may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) * be appropriate for callers that expect the underlying filesystem not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) * to be NFS exported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) struct inode *target = dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) int error = may_delete(dir, dentry, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) if (!dir->i_op->unlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) inode_lock(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) if (is_local_mountpoint(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) error = security_inode_unlink(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) error = try_break_deleg(target, delegated_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) error = dir->i_op->unlink(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) dont_mount(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) detach_mounts(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) inode_unlock(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) /* We don't d_delete() NFS sillyrenamed files--they still exist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) fsnotify_unlink(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) } else if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) fsnotify_link_count(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) d_delete_notify(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) EXPORT_SYMBOL_NS(vfs_unlink, ANDROID_GKI_VFS_EXPORT_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) * Make sure that the actual truncation of the file will occur outside its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) * directory's i_mutex. Truncate can take a long time if there is a lot of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) * writeout happening, and we don't want to prevent access to the directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) * while waiting on the I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) long do_unlinkat(int dfd, struct filename *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) struct qstr last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) struct inode *inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) struct inode *delegated_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) unsigned int lookup_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) name = filename_parentat(dfd, name, lookup_flags, &path, &last, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) if (IS_ERR(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) return PTR_ERR(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) error = -EISDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) if (type != LAST_NORM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) goto exit1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) error = mnt_want_write(path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) goto exit1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) retry_deleg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) dentry = __lookup_hash(&last, path.dentry, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) error = PTR_ERR(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (!IS_ERR(dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) /* Why not before? Because we want correct error value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) if (last.name[last.len])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) goto slashes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) inode = dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) if (d_is_negative(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) goto slashes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) ihold(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) error = security_path_unlink(&path, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) goto exit2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) exit2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) dput(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) inode_unlock(path.dentry->d_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) if (inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) iput(inode); /* truncate the inode here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) if (delegated_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) error = break_deleg_wait(&delegated_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) goto retry_deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) mnt_drop_write(path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) exit1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) path_put(&path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) if (retry_estale(error, lookup_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) lookup_flags |= LOOKUP_REVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) putname(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) slashes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) if (d_is_negative(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) else if (d_is_dir(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) error = -EISDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) error = -ENOTDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) goto exit2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) if ((flag & ~AT_REMOVEDIR) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) if (flag & AT_REMOVEDIR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) return do_rmdir(dfd, getname(pathname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) return do_unlinkat(dfd, getname(pathname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) SYSCALL_DEFINE1(unlink, const char __user *, pathname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) return do_unlinkat(AT_FDCWD, getname(pathname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) int error = may_create(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) if (!dir->i_op->symlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) error = security_inode_symlink(dir, dentry, oldname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) error = dir->i_op->symlink(dir, dentry, oldname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) fsnotify_create(dir, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) EXPORT_SYMBOL(vfs_symlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) static long do_symlinkat(const char __user *oldname, int newdfd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) const char __user *newname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) struct filename *from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) unsigned int lookup_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) from = getname(oldname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) if (IS_ERR(from))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) return PTR_ERR(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) dentry = user_path_create(newdfd, newname, &path, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) error = PTR_ERR(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) goto out_putname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) error = security_path_symlink(&path, dentry, from->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) done_path_create(&path, dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) if (retry_estale(error, lookup_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) lookup_flags |= LOOKUP_REVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) out_putname:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) putname(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) int, newdfd, const char __user *, newname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) return do_symlinkat(oldname, newdfd, newname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) return do_symlinkat(oldname, AT_FDCWD, newname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) * vfs_link - create a new link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) * @old_dentry: object to be linked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) * @dir: new parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) * @new_dentry: where to create the new link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) * @delegated_inode: returns inode needing a delegation break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) * The caller must hold dir->i_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) * If vfs_link discovers a delegation on the to-be-linked file in need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) * of breaking, it will return -EWOULDBLOCK and return a reference to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) * inode in delegated_inode. The caller should then break the delegation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) * and retry. Because breaking a delegation may take a long time, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) * caller should drop the i_mutex before doing so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) * Alternatively, a caller may pass NULL for delegated_inode. This may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) * be appropriate for callers that expect the underlying filesystem not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) * to be NFS exported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) struct inode *inode = old_dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) unsigned max_links = dir->i_sb->s_max_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) error = may_create(dir, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) if (dir->i_sb != inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) return -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) * A link to an append-only or immutable file cannot be created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) * Updating the link count will likely cause i_uid and i_gid to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) * be writen back improperly if their true value is unknown to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) * the vfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) if (HAS_UNMAPPED_ID(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) if (!dir->i_op->link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) if (S_ISDIR(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) error = security_inode_link(old_dentry, dir, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) /* Make sure we don't allow creating hardlink to an unlinked file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) else if (max_links && inode->i_nlink >= max_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) error = -EMLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) error = try_break_deleg(inode, delegated_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) error = dir->i_op->link(old_dentry, dir, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) if (!error && (inode->i_state & I_LINKABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) inode->i_state &= ~I_LINKABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) fsnotify_link(dir, inode, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) EXPORT_SYMBOL_NS(vfs_link, ANDROID_GKI_VFS_EXPORT_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) * Hardlinks are often used in delicate situations. We avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) * security-related surprises by not following symlinks on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) * newname. --KAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) * We don't follow them on the oldname either to be compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) * with linux 2.0, and to avoid hard-linking to directories
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) * and other special files. --ADM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) static int do_linkat(int olddfd, const char __user *oldname, int newdfd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) const char __user *newname, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) struct dentry *new_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) struct path old_path, new_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) struct inode *delegated_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) int how = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) * To use null names we require CAP_DAC_READ_SEARCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) * This ensures that not everyone will be able to create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) * handlink using the passed filedescriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) if (flags & AT_EMPTY_PATH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) if (!capable(CAP_DAC_READ_SEARCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) how = LOOKUP_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) if (flags & AT_SYMLINK_FOLLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) how |= LOOKUP_FOLLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) error = user_path_at(olddfd, oldname, how, &old_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) new_dentry = user_path_create(newdfd, newname, &new_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) (how & LOOKUP_REVAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) error = PTR_ERR(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) if (IS_ERR(new_dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) error = -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) if (old_path.mnt != new_path.mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) error = may_linkat(&old_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) if (unlikely(error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) error = security_path_link(old_path.dentry, &new_path, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) out_dput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) done_path_create(&new_path, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) if (delegated_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) error = break_deleg_wait(&delegated_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) path_put(&old_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) if (retry_estale(error, how)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) path_put(&old_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) how |= LOOKUP_REVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) path_put(&old_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) int, newdfd, const char __user *, newname, int, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) return do_linkat(olddfd, oldname, newdfd, newname, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) * vfs_rename - rename a filesystem object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) * @old_dir: parent of source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) * @old_dentry: source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) * @new_dir: parent of destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) * @new_dentry: destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) * @delegated_inode: returns an inode needing a delegation break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) * @flags: rename flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) * The caller must hold multiple mutexes--see lock_rename()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) * If vfs_rename discovers a delegation in need of breaking at either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) * the source or destination, it will return -EWOULDBLOCK and return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) * reference to the inode in delegated_inode. The caller should then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) * break the delegation and retry. Because breaking a delegation may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) * take a long time, the caller should drop all locks before doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) * so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) * Alternatively, a caller may pass NULL for delegated_inode. This may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) * be appropriate for callers that expect the underlying filesystem not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) * to be NFS exported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) * The worst of all namespace operations - renaming directory. "Perverted"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) * Problems:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) * a) we can get into loop creation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) * b) race potential - two innocent renames can create a loop together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) * That's where 4.4 screws up. Current fix: serialization on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) * story.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) * c) we have to lock _four_ objects - parents and victim (if it exists),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) * and source (if it is not a directory).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) * And that - after we got ->i_mutex on parents (until then we don't know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) * whether the target exists). Solution: try to be smart with locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) * order for inodes. We rely on the fact that tree topology may change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) * only under ->s_vfs_rename_mutex _and_ that parent of the object we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) * move will be locked. Thus we can rank directories by the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) * (ancestors first) and rank all non-directories after them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) * That works since everybody except rename does "lock parent, lookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) * lock child" and rename is under ->s_vfs_rename_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) * HOWEVER, it relies on the assumption that any object with ->lookup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) * has no more than 1 dentry. If "hybrid" objects will ever appear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) * we'd better make sure that there's no link(2) for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) * d) conversion from fhandle to dentry may come in the wrong moment - when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) * we are removing the target. Solution: we will have to grab ->i_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) * ->i_mutex on parents, which works but leads to some truly excessive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) * locking].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) struct inode *new_dir, struct dentry *new_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) struct inode **delegated_inode, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) bool is_dir = d_is_dir(old_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) struct inode *source = old_dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) struct inode *target = new_dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) bool new_is_dir = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) unsigned max_links = new_dir->i_sb->s_max_links;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) struct name_snapshot old_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) if (source == target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) error = may_delete(old_dir, old_dentry, is_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) if (!target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) error = may_create(new_dir, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) new_is_dir = d_is_dir(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) if (!(flags & RENAME_EXCHANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) error = may_delete(new_dir, new_dentry, is_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) error = may_delete(new_dir, new_dentry, new_is_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) if (!old_dir->i_op->rename)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) * If we are going to change the parent - check write permissions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) * we'll need to flip '..'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) if (new_dir != old_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) if (is_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) error = inode_permission(source, MAY_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) if ((flags & RENAME_EXCHANGE) && new_is_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) error = inode_permission(target, MAY_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) take_dentry_name_snapshot(&old_name, old_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) dget(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) if (!is_dir || (flags & RENAME_EXCHANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) lock_two_nondirectories(source, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) else if (target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) inode_lock(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) if (max_links && new_dir != old_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) error = -EMLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) old_dir->i_nlink >= max_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) if (!is_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) error = try_break_deleg(source, delegated_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) if (target && !new_is_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) error = try_break_deleg(target, delegated_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) error = old_dir->i_op->rename(old_dir, old_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) new_dir, new_dentry, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) if (!(flags & RENAME_EXCHANGE) && target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) if (is_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) shrink_dcache_parent(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) target->i_flags |= S_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) dont_mount(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) detach_mounts(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) if (!(flags & RENAME_EXCHANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) d_move(old_dentry, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) d_exchange(old_dentry, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) if (!is_dir || (flags & RENAME_EXCHANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) unlock_two_nondirectories(source, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) else if (target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) inode_unlock(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) dput(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) fsnotify_move(old_dir, new_dir, &old_name.name, is_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) if (flags & RENAME_EXCHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) fsnotify_move(new_dir, old_dir, &old_dentry->d_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) new_is_dir, NULL, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) release_dentry_name_snapshot(&old_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) EXPORT_SYMBOL_NS(vfs_rename, ANDROID_GKI_VFS_EXPORT_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) static int do_renameat2(int olddfd, const char __user *oldname, int newdfd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) const char __user *newname, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) struct dentry *old_dentry, *new_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) struct dentry *trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) struct path old_path, new_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) struct qstr old_last, new_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) int old_type, new_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) struct inode *delegated_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) struct filename *from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) struct filename *to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) bool should_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) (flags & RENAME_EXCHANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) if (flags & RENAME_EXCHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) target_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) from = filename_parentat(olddfd, getname(oldname), lookup_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) &old_path, &old_last, &old_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) if (IS_ERR(from)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) error = PTR_ERR(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) to = filename_parentat(newdfd, getname(newname), lookup_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) &new_path, &new_last, &new_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) if (IS_ERR(to)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) error = PTR_ERR(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) goto exit1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) error = -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) if (old_path.mnt != new_path.mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) goto exit2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) if (old_type != LAST_NORM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) goto exit2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) if (flags & RENAME_NOREPLACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) error = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) if (new_type != LAST_NORM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) goto exit2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) error = mnt_want_write(old_path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) goto exit2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) retry_deleg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) trap = lock_rename(new_path.dentry, old_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) error = PTR_ERR(old_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) if (IS_ERR(old_dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) goto exit3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) /* source must exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) if (d_is_negative(old_dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) goto exit4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) error = PTR_ERR(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) if (IS_ERR(new_dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) goto exit4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) error = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) goto exit5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) if (flags & RENAME_EXCHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) if (d_is_negative(new_dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) goto exit5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) if (!d_is_dir(new_dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) error = -ENOTDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) if (new_last.name[new_last.len])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) goto exit5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) /* unless the source is a directory trailing slashes give -ENOTDIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) if (!d_is_dir(old_dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) error = -ENOTDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) if (old_last.name[old_last.len])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) goto exit5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) goto exit5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) /* source should not be ancestor of target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) if (old_dentry == trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) goto exit5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) /* target should not be an ancestor of source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) if (!(flags & RENAME_EXCHANGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) error = -ENOTEMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) if (new_dentry == trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) goto exit5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) error = security_path_rename(&old_path, old_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) &new_path, new_dentry, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) goto exit5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) error = vfs_rename(old_path.dentry->d_inode, old_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) new_path.dentry->d_inode, new_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) &delegated_inode, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) exit5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) dput(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) exit4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) dput(old_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) exit3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) unlock_rename(new_path.dentry, old_path.dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) if (delegated_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) error = break_deleg_wait(&delegated_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) goto retry_deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) mnt_drop_write(old_path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) exit2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) if (retry_estale(error, lookup_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) should_retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) path_put(&new_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) putname(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) exit1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) path_put(&old_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) putname(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) if (should_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) should_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) lookup_flags |= LOOKUP_REVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) int, newdfd, const char __user *, newname, unsigned int, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) return do_renameat2(olddfd, oldname, newdfd, newname, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) int, newdfd, const char __user *, newname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) return do_renameat2(olddfd, oldname, newdfd, newname, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) return do_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) int readlink_copy(char __user *buffer, int buflen, const char *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) int len = PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) if (IS_ERR(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) len = strlen(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) if (len > (unsigned) buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) len = buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) if (copy_to_user(buffer, link, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) len = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) * vfs_readlink - copy symlink body into userspace buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) * @dentry: dentry on which to get symbolic link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) * @buffer: user memory pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) * @buflen: size of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) * Does not touch atime. That's up to the caller if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) * Does not call security hook.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) DEFINE_DELAYED_CALL(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) const char *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) if (unlikely(inode->i_op->readlink))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) return inode->i_op->readlink(dentry, buffer, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) if (!d_is_symlink(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) inode->i_opflags |= IOP_DEFAULT_READLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) link = READ_ONCE(inode->i_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) if (!link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) link = inode->i_op->get_link(dentry, inode, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) if (IS_ERR(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) return PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) res = readlink_copy(buffer, buflen, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) do_delayed_call(&done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) EXPORT_SYMBOL(vfs_readlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) * vfs_get_link - get symlink body
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) * @dentry: dentry on which to get symbolic link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) * @done: caller needs to free returned data with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) * Calls security hook and i_op->get_link() on the supplied inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) * It does not touch atime. That's up to the caller if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) * Does not work on "special" symlinks like /proc/$$/fd/N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) const char *res = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) if (d_is_symlink(dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) res = ERR_PTR(security_inode_readlink(dentry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) res = inode->i_op->get_link(dentry, inode, done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) EXPORT_SYMBOL(vfs_get_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) /* get the link contents into pagecache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) const char *page_get_link(struct dentry *dentry, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) struct delayed_call *callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) if (!dentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) page = find_get_page(mapping, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) return ERR_PTR(-ECHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) page = read_mapping_page(mapping, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) return (char*)page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) set_delayed_call(callback, page_put_link, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) kaddr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) return kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) EXPORT_SYMBOL(page_get_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) void page_put_link(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) put_page(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) EXPORT_SYMBOL(page_put_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) DEFINE_DELAYED_CALL(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) int res = readlink_copy(buffer, buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) page_get_link(dentry, d_inode(dentry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) &done));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) do_delayed_call(&done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) EXPORT_SYMBOL(page_readlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) void *fsdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) if (nofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) flags |= AOP_FLAG_NOFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) err = pagecache_write_begin(NULL, mapping, 0, len-1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) flags, &page, &fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) memcpy(page_address(page), symname, len-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) page, fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) if (err < len-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) EXPORT_SYMBOL(__page_symlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) int page_symlink(struct inode *inode, const char *symname, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) return __page_symlink(inode, symname, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) !mapping_gfp_constraint(inode->i_mapping, __GFP_FS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) EXPORT_SYMBOL(page_symlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) const struct inode_operations page_symlink_inode_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) .get_link = page_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) EXPORT_SYMBOL(page_symlink_inode_operations);