Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  linux/fs/exec.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 1991, 1992  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * #!-checking implemented by tytso.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * Demand-loading implemented 01.12.91 - no need to read anything but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * the header into memory. The inode of the executable is put into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * "current->executable", and page faults do the actual loading. Clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Once more I can proudly say that linux stood up to being changed: it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * was less than 2 hours work to get demand-loading completely implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * current->executable is only used by the procfs.  This allows a dispatch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * table to check for several different types  of binary formats.  We keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * trying until we recognize the file or we run out of supported binary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * formats.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/kernel_read_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/fdtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/vmacache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/sched/coredump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/sched/numa_balancing.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/binfmts.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/pid_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <linux/tsacct_kern.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <linux/cn_proc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <linux/fsnotify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <linux/fs_struct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <linux/oom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <linux/io_uring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #include <trace/events/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #include <trace/events/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) EXPORT_TRACEPOINT_SYMBOL_GPL(task_rename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static int bprm_creds_from_file(struct linux_binprm *bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) int suid_dumpable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static LIST_HEAD(formats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static DEFINE_RWLOCK(binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) void __register_binfmt(struct linux_binfmt * fmt, int insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	BUG_ON(!fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	if (WARN_ON(!fmt->load_binary))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	write_lock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	insert ? list_add(&fmt->lh, &formats) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		 list_add_tail(&fmt->lh, &formats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	write_unlock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) EXPORT_SYMBOL(__register_binfmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) void unregister_binfmt(struct linux_binfmt * fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	write_lock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	list_del(&fmt->lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	write_unlock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) EXPORT_SYMBOL(unregister_binfmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static inline void put_binfmt(struct linux_binfmt * fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	module_put(fmt->module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) bool path_noexec(const struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return (path->mnt->mnt_flags & MNT_NOEXEC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	       (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #ifdef CONFIG_USELIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * Note that a shared library must be both readable and executable due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * security reasons.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * Also note that we take the address to load from from the file itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) SYSCALL_DEFINE1(uselib, const char __user *, library)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	struct linux_binfmt *fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	struct filename *tmp = getname(library);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	int error = PTR_ERR(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	static const struct open_flags uselib_flags = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		.acc_mode = MAY_READ | MAY_EXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		.intent = LOOKUP_OPEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		.lookup_flags = LOOKUP_FOLLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	if (IS_ERR(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	putname(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	error = PTR_ERR(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	if (IS_ERR(file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	 * may_open() has already checked for this, so it should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	 * impossible to trip now. But we need to be extra cautious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	 * and check again at the very end too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	error = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			 path_noexec(&file->f_path)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	fsnotify_open(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	error = -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	read_lock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	list_for_each_entry(fmt, &formats, lh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		if (!fmt->load_shlib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		if (!try_module_get(fmt->module))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		read_unlock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		error = fmt->load_shlib(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		read_lock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		put_binfmt(fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		if (error != -ENOEXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	read_unlock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	fput(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)   	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #endif /* #ifdef CONFIG_USELIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * The nascent bprm->mm is not visible until exec_mmap() but it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * use a lot of memory, account these pages in current->mm temporary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * change the counter back via acct_arg_size(0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	long diff = (long)(pages - bprm->vma_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	if (!mm || !diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	bprm->vma_pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	add_mm_counter(mm, MM_ANONPAGES, diff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	unsigned int gup_flags = FOLL_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #ifdef CONFIG_STACK_GROWSUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		ret = expand_downwards(bprm->vma, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		gup_flags |= FOLL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	 * We are doing an exec().  'current' is the process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	 * doing the exec and bprm->mm is the new process's mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 			&page, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		acct_arg_size(bprm, vma_pages(bprm->vma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static void put_arg_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	put_user_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static void free_arg_pages(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	flush_cache_page(bprm->vma, pos, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static int __bprm_mm_init(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct vm_area_struct *vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct mm_struct *mm = bprm->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	bprm->vma = vma = vm_area_alloc(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	vma_set_anonymous(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (mmap_write_lock_killable(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		err = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 * Place the stack at the largest stack address the architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 * supports. Later, we'll move this to an appropriate place. We don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 * use STACK_TOP because that can depend on attributes which aren't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	 * configured yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	vma->vm_end = STACK_TOP_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	vma->vm_start = vma->vm_end - PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	err = insert_vm_struct(mm, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	mm->stack_vm = mm->total_vm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	bprm->p = vma->vm_end - sizeof(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	bprm->vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	vm_area_free(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) static bool valid_arg_len(struct linux_binprm *bprm, long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	return len <= MAX_ARG_STRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	page = bprm->page[pos / PAGE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (!page && write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		bprm->page[pos / PAGE_SIZE] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) static void put_arg_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static void free_arg_page(struct linux_binprm *bprm, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	if (bprm->page[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		__free_page(bprm->page[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		bprm->page[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) static void free_arg_pages(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	for (i = 0; i < MAX_ARG_PAGES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		free_arg_page(bprm, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static int __bprm_mm_init(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) static bool valid_arg_len(struct linux_binprm *bprm, long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	return len <= bprm->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  * Create a new mm_struct and populate it with a temporary stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * vm_area_struct.  We don't have enough context at this point to set the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * flags, permissions, and offset, so we use temporary values.  We'll update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * them later in setup_arg_pages().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static int bprm_mm_init(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	struct mm_struct *mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	bprm->mm = mm = mm_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	/* Save current stack limit for all calculations made during exec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	task_lock(current->group_leader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	task_unlock(current->group_leader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	err = __bprm_mm_init(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		bprm->mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		mmdrop(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) struct user_arg_ptr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	bool is_compat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		const char __user *const __user *native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		const compat_uptr_t __user *compat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	} ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	const char __user *native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	if (unlikely(argv.is_compat)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		compat_uptr_t compat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		if (get_user(compat, argv.ptr.compat + nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		return compat_ptr(compat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (get_user(native, argv.ptr.native + nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	return native;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  * count() counts the number of strings in array ARGV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static int count(struct user_arg_ptr argv, int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	if (argv.ptr.native != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			const char __user *p = get_user_arg_ptr(argv, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			if (i >= max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 				return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				return -ERESTARTNOHAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static int count_strings_kernel(const char *const *argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (!argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	for (i = 0; argv[i]; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		if (i >= MAX_ARG_STRINGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			return -ERESTARTNOHAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) static int bprm_stack_limits(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	unsigned long limit, ptr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	 * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	 * (whichever is smaller) for the argv+env strings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	 * This ensures that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	 *  - the remaining binfmt code will not run out of stack space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	 *  - the program will have a reasonable amount of stack left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	 *    to work from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	limit = _STK_LIM / 4 * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 * We've historically supported up to 32 pages (ARG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	 * of argument strings even with small stacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	limit = max_t(unsigned long, limit, ARG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	 * We must account for the size of all the argv and envp pointers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	 * the argv and envp strings, since they will also take up space in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	 * the stack. They aren't stored until much later when we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	 * signal to the parent that the child has run out of stack space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	 * Instead, calculate it here so it's possible to fail gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	 * In the case of argc = 0, make sure there is space for adding a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	 * empty string (which will bump argc to 1), to ensure confused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	 * userspace programs don't start processing from argv[1], thinking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	 * argc can never be 0, to keep them from walking envp by accident.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	 * See do_execveat_common().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (limit <= ptr_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	limit -= ptr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	bprm->argmin = bprm->p - limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  * 'copy_strings()' copies argument/environment strings from the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * processes's memory to the new process's stack.  The call to get_user_pages()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * ensures the destination page is created and not swapped out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) static int copy_strings(int argc, struct user_arg_ptr argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct page *kmapped_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	char *kaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	unsigned long kpos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	while (argc-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		const char __user *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		unsigned long pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		str = get_user_arg_ptr(argv, argc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		if (IS_ERR(str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		len = strnlen_user(str, MAX_ARG_STRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		ret = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		if (!valid_arg_len(bprm, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		/* We're going to work our way backwords. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		pos = bprm->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		str += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		bprm->p -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		if (bprm->p < bprm->argmin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			int offset, bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				ret = -ERESTARTNOHAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			offset = pos % PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			if (offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 				offset = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			bytes_to_copy = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			if (bytes_to_copy > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 				bytes_to_copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			offset -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			pos -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			str -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			len -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 				struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 				page = get_arg_page(bprm, pos, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 					ret = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 				if (kmapped_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 					flush_kernel_dcache_page(kmapped_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 					kunmap(kmapped_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 					put_arg_page(kmapped_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 				kmapped_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				kaddr = kmap(kmapped_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 				kpos = pos & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 				flush_arg_page(bprm, kpos, kmapped_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 			if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 				ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (kmapped_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		flush_kernel_dcache_page(kmapped_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		kunmap(kmapped_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		put_arg_page(kmapped_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  * Copy and argument/environment string from the kernel to the processes stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	unsigned long pos = bprm->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	if (!valid_arg_len(bprm, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	/* We're going to work our way backwards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	arg += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	bprm->p -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		unsigned int bytes_to_copy = min_t(unsigned int, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 				min_not_zero(offset_in_page(pos), PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		pos -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		arg -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		len -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		page = get_arg_page(bprm, pos, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		flush_arg_page(bprm, pos & PAGE_MASK, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		flush_kernel_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		put_arg_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) EXPORT_SYMBOL(copy_string_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) static int copy_strings_kernel(int argc, const char *const *argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			       struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	while (argc-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		int ret = copy_string_kernel(argv[argc], bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			return -ERESTARTNOHAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  * the binfmt code determines where the new stack should reside, we shift it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  * its final location.  The process proceeds as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * 1) Use shift to calculate the new vma endpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * 2) Extend vma to cover both the old and new ranges.  This ensures the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  *    arguments passed to subsequent functions are consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * 3) Move vma's page tables to the new range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  * 4) Free up any cleared pgd range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  * 5) Shrink the vma to cover only the new range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	unsigned long old_start = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	unsigned long old_end = vma->vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	unsigned long length = old_end - old_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	unsigned long new_start = old_start - shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	unsigned long new_end = old_end - shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	struct mmu_gather tlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	BUG_ON(new_start > new_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	 * ensure there are no vmas between where we want to go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 * and where we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (vma != find_vma(mm, new_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 * cover the whole range: [new_start, old_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	 * move the page tables downwards, on failure we rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	 * process cleanup to remove whatever mess we made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (length != move_page_tables(vma, old_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 				       vma, new_start, length, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	lru_add_drain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	tlb_gather_mmu(&tlb, mm, old_start, old_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (new_end > old_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		 * when the old and new regions overlap clear from new_end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		free_pgd_range(&tlb, new_end, old_end, new_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		 * otherwise, clean from old_start; this is done to not touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		 * the address space in [new_end, old_start) some architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		 * have constraints on va-space that make this illegal (IA64) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		 * for the others its just a little faster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		free_pgd_range(&tlb, old_start, old_end, new_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	tlb_finish_mmu(&tlb, old_start, old_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	 * Shrink the vma to just the new range.  Always succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  * the stack is optionally relocated, and some extra space is added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) int setup_arg_pages(struct linux_binprm *bprm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		    unsigned long stack_top,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		    int executable_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	unsigned long stack_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct vm_area_struct *vma = bprm->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	struct vm_area_struct *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	unsigned long vm_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	unsigned long stack_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	unsigned long stack_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	unsigned long stack_expand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	unsigned long rlim_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) #ifdef CONFIG_STACK_GROWSUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	/* Limit stack size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	stack_base = bprm->rlim_stack.rlim_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (stack_base > STACK_SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		stack_base = STACK_SIZE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	/* Add space for stack randomization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	stack_base += (STACK_RND_MASK << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	/* Make sure we didn't let the argument array grow too large. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	if (vma->vm_end - vma->vm_start > stack_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	stack_base = PAGE_ALIGN(stack_top - stack_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	stack_shift = vma->vm_start - stack_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	mm->arg_start = bprm->p - stack_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	bprm->p = vma->vm_end - stack_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	stack_top = arch_align_stack(stack_top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	stack_top = PAGE_ALIGN(stack_top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	if (unlikely(stack_top < mmap_min_addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	    unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	stack_shift = vma->vm_end - stack_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	bprm->p -= stack_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	mm->arg_start = bprm->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (bprm->loader)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		bprm->loader -= stack_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	bprm->exec -= stack_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	if (mmap_write_lock_killable(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	vm_flags = VM_STACK_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	 * Adjust stack execute permissions; explicitly enable for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	 * (arch default) otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	if (unlikely(executable_stack == EXSTACK_ENABLE_X))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		vm_flags |= VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	else if (executable_stack == EXSTACK_DISABLE_X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		vm_flags &= ~VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	vm_flags |= mm->def_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	vm_flags |= VM_STACK_INCOMPLETE_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			vm_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	BUG_ON(prev != vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (unlikely(vm_flags & VM_EXEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		pr_warn_once("process '%pD4' started with executable stack\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			     bprm->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	/* Move stack pages down in memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (stack_shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		ret = shift_arg_pages(vma, stack_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/* mprotect_fixup is overkill to remove the temporary stack flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	stack_size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	 * Align this down to a page boundary as expand_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	 * will align it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) #ifdef CONFIG_STACK_GROWSUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (stack_size + stack_expand > rlim_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		stack_base = vma->vm_start + rlim_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		stack_base = vma->vm_end + stack_expand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (stack_size + stack_expand > rlim_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		stack_base = vma->vm_end - rlim_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		stack_base = vma->vm_start - stack_expand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	current->mm->start_stack = bprm->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	ret = expand_stack(vma, stack_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) EXPORT_SYMBOL(setup_arg_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  * Transfer the program arguments and environment from the holding pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  * onto the stack. The provided stack pointer is adjusted accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) int transfer_args_to_stack(struct linux_binprm *bprm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			   unsigned long *sp_location)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	unsigned long index, stop, sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	stop = bprm->p >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	sp = *sp_location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		char *src = kmap(bprm->page[index]) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		sp -= PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		kunmap(bprm->page[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	*sp_location = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) EXPORT_SYMBOL(transfer_args_to_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) static struct file *do_open_execat(int fd, struct filename *name, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct open_flags open_exec_flags = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		.acc_mode = MAY_EXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		.intent = LOOKUP_OPEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		.lookup_flags = LOOKUP_FOLLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (flags & AT_SYMLINK_NOFOLLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (flags & AT_EMPTY_PATH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	file = do_filp_open(fd, name, &open_exec_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	if (IS_ERR(file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 * may_open() has already checked for this, so it should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	 * impossible to trip now. But we need to be extra cautious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	 * and check again at the very end too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	err = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			 path_noexec(&file->f_path)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	err = deny_write_access(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (name->name[0] != '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		fsnotify_open(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	return file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	fput(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) struct file *open_exec(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	struct filename *filename = getname_kernel(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	struct file *f = ERR_CAST(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (!IS_ERR(filename)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		f = do_open_execat(AT_FDCWD, filename, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		putname(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) EXPORT_SYMBOL(open_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) #if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)     defined(CONFIG_BINFMT_ELF_FDPIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (res > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		flush_icache_user_range(addr, addr + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) EXPORT_SYMBOL(read_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  * Maps the mm_struct mm into the current task struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  * On success, this function returns with exec_update_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  * held for writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) static int exec_mmap(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct mm_struct *old_mm, *active_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	/* Notify parent that we're no longer interested in the old VM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	old_mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	exec_mm_release(tsk, old_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (old_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		sync_mm_rss(old_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	ret = down_write_killable(&tsk->signal->exec_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	if (old_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		 * Make sure that if there is a core dump in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		 * for the old mm, we get out and die instead of going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		 * through with the exec.  We must hold mmap_lock around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		 * checking core_state and changing tsk->mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		mmap_read_lock(old_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		if (unlikely(old_mm->core_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			mmap_read_unlock(old_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			up_write(&tsk->signal->exec_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	task_lock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	membarrier_exec_mmap(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	active_mm = tsk->active_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	tsk->active_mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	tsk->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * This prevents preemption while active_mm is being loaded and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 * it and mm are being updated, which could cause problems for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	 * lazy tlb mm refcounting when these are updated by context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	 * switches. Not all architectures can handle irqs off over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	 * activate_mm yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	activate_mm(active_mm, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	tsk->mm->vmacache_seqnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	vmacache_flush(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	task_unlock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (old_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		mmap_read_unlock(old_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		BUG_ON(active_mm != old_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		mm_update_next_owner(old_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		mmput(old_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	mmdrop(active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int de_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	struct signal_struct *sig = tsk->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	struct sighand_struct *oldsighand = tsk->sighand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	spinlock_t *lock = &oldsighand->siglock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	if (thread_group_empty(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		goto no_thread_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	 * Kill all other threads in the thread group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	spin_lock_irq(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (signal_group_exit(sig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		 * Another group action in progress, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		 * return so that the signal is processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		spin_unlock_irq(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	sig->group_exit_task = tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	sig->notify_count = zap_other_threads(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (!thread_group_leader(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		sig->notify_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	while (sig->notify_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		__set_current_state(TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		spin_unlock_irq(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		if (__fatal_signal_pending(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			goto killed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		spin_lock_irq(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	spin_unlock_irq(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	 * At this point all other threads have exited, all we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	 * do is to wait for the thread group leader to become inactive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	 * and to assume its PID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	if (!thread_group_leader(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		struct task_struct *leader = tsk->group_leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			cgroup_threadgroup_change_begin(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			write_lock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			 * Do this under tasklist_lock to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			 * exit_notify() can't miss ->group_exit_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			sig->notify_count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			if (likely(leader->exit_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			__set_current_state(TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			cgroup_threadgroup_change_end(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			if (__fatal_signal_pending(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 				goto killed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		 * The only record we have of the real-time age of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		 * process, regardless of execs it's done, is start_time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		 * All the past CPU time is accumulated in signal_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		 * from sister threads now dead.  But in this non-leader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		 * exec, nothing survives from the original leader thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		 * whose birth marks the true age of this process now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		 * When we take on its identity by switching to its PID, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		 * also take its birthdate (always earlier than our own).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		tsk->start_time = leader->start_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		tsk->start_boottime = leader->start_boottime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		BUG_ON(!same_thread_group(leader, tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		 * An exec() starts a new thread group with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		 * TGID of the previous thread group. Rehash the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		 * two threads with a switched PID, and release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		 * the former thread group leader:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		/* Become a process group leader with the old leader's pid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		 * The old leader becomes a thread of the this thread group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		exchange_tids(tsk, leader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		transfer_pid(leader, tsk, PIDTYPE_TGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		transfer_pid(leader, tsk, PIDTYPE_PGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		transfer_pid(leader, tsk, PIDTYPE_SID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		list_replace_rcu(&leader->tasks, &tsk->tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		list_replace_init(&leader->sibling, &tsk->sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		tsk->group_leader = tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		leader->group_leader = tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		tsk->exit_signal = SIGCHLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		leader->exit_signal = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		BUG_ON(leader->exit_state != EXIT_ZOMBIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		leader->exit_state = EXIT_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		 * We are going to release_task()->ptrace_unlink() silently,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		 * the tracer wont't block again waiting for this thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		if (unlikely(leader->ptrace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			__wake_up_parent(leader, leader->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		cgroup_threadgroup_change_end(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		release_task(leader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	sig->group_exit_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	sig->notify_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) no_thread_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	/* we have changed execution domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	tsk->exit_signal = SIGCHLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	BUG_ON(!thread_group_leader(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) killed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	/* protects against exit_notify() and __exit_signal() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	sig->group_exit_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	sig->notify_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  * This function makes sure the current process has its own signal table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * so that flush_signal_handlers can later reset the handlers without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * disturbing other processes.  (Other processes might share the signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  * table via the CLONE_SIGHAND option to clone().)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static int unshare_sighand(struct task_struct *me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	struct sighand_struct *oldsighand = me->sighand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	if (refcount_read(&oldsighand->count) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		struct sighand_struct *newsighand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		 * This ->sighand is shared with the CLONE_SIGHAND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		 * but not CLONE_THREAD task, switch to the new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		if (!newsighand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		refcount_set(&newsighand->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		memcpy(newsighand->action, oldsighand->action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		       sizeof(newsighand->action));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		write_lock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		spin_lock(&oldsighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		rcu_assign_pointer(me->sighand, newsighand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		spin_unlock(&oldsighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		__cleanup_sighand(oldsighand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	task_lock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	strncpy(buf, tsk->comm, buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	task_unlock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) EXPORT_SYMBOL_GPL(__get_task_comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)  * These functions flushes out all traces of the currently running executable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)  * so that a new one can be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	task_lock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	trace_task_rename(tsk, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	task_unlock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	perf_event_comm(tsk, exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)  * Calling this is the point of no return. None of the failures will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)  * seen by userspace since either the process is already taking a fatal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)  * signal (via de_thread() or coredump), or will have SEGV raised
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  * (after exec_mmap()) by search_binary_handler (see below).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) int begin_new_exec(struct linux_binprm * bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	struct task_struct *me = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	/* Once we are committed compute the creds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	retval = bprm_creds_from_file(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	 * Ensure all future errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	bprm->point_of_no_return = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	 * Make this the only thread in the thread group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	retval = de_thread(me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	 * Must be called _before_ exec_mmap() as bprm->mm is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	 * not visibile until then. This also enables the update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	 * to be lockless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	set_mm_exe_file(bprm->mm, bprm->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	/* If the binary is not readable then enforce mm->dumpable=0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	would_dump(bprm, bprm->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	if (bprm->have_execfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		would_dump(bprm, bprm->executable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	 * Release all of the old mmap stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	acct_arg_size(bprm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	retval = exec_mmap(bprm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	bprm->mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) #ifdef CONFIG_POSIX_TIMERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	exit_itimers(me->signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	flush_itimer_signals();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	 * Make the signal table private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	retval = unshare_sighand(me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	 * Ensure that the uaccess routines can actually operate on userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	 * pointers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	force_uaccess_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 					PF_NOFREEZE | PF_NO_SETAFFINITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	flush_thread();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	me->personality &= ~bprm->per_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	 * We have to apply CLOEXEC before we change whether the process is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	 * trying to access the should-be-closed file descriptors of a process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	 * undergoing exec(2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	do_close_on_exec(me->files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	if (bprm->secureexec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		/* Make sure parent cannot signal privileged process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		me->pdeath_signal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		 * For secureexec, reset the stack limit to sane default to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		 * avoid bad behavior from the prior rlimits. This has to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		 * happen before arch_pick_mmap_layout(), which examines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		 * RLIMIT_STACK, but after the point of no return to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		 * needing to clean up the change on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		if (bprm->rlim_stack.rlim_cur > _STK_LIM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			bprm->rlim_stack.rlim_cur = _STK_LIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	me->sas_ss_sp = me->sas_ss_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	 * Figure out dumpability. Note that this checking only of current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	 * is wrong, but userspace depends on it. This should be testing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	 * bprm->secureexec instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	    !(uid_eq(current_euid(), current_uid()) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	      gid_eq(current_egid(), current_gid())))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		set_dumpable(current->mm, suid_dumpable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		set_dumpable(current->mm, SUID_DUMP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	perf_event_exec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	__set_task_comm(me, kbasename(bprm->filename), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	/* An exec changes our domain. We are no longer part of the thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	   group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	flush_signal_handlers(me, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	 * install the new credentials for this executable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	security_bprm_committing_creds(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	commit_creds(bprm->cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	bprm->cred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	 * Disable monitoring for regular users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	 * when executing setuid binaries. Must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	 * wait until new credentials are committed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	 * by commit_creds() above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	if (get_dumpable(me->mm) != SUID_DUMP_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		perf_event_exit_task(me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	 * cred_guard_mutex must be held at least to this point to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	 * ptrace_attach() from altering our determination of the task's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	 * credentials; any time after this it may be unlocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	security_bprm_committed_creds(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	/* Pass the opened binary to the interpreter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	if (bprm->have_execfd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		retval = get_unused_fd_flags(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		fd_install(retval, bprm->executable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		bprm->executable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		bprm->execfd = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	up_write(&me->signal->exec_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) EXPORT_SYMBOL(begin_new_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) void would_dump(struct linux_binprm *bprm, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	if (inode_permission(inode, MAY_READ) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		struct user_namespace *old, *user_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		/* Ensure mm->user_ns contains the executable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		user_ns = old = bprm->mm->user_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		while ((user_ns != &init_user_ns) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		       !privileged_wrt_inode_uidgid(user_ns, inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			user_ns = user_ns->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		if (old != user_ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			bprm->mm->user_ns = get_user_ns(user_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			put_user_ns(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) EXPORT_SYMBOL(would_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) void setup_new_exec(struct linux_binprm * bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	/* Setup things that can depend upon the personality */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	struct task_struct *me = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	arch_setup_new_exec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	/* Set the new mm task size. We have to do that late because it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	 * depend on TIF_32BIT which is only updated in flush_thread() on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	 * some architectures like powerpc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	me->mm->task_size = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	up_write(&me->signal->exec_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	mutex_unlock(&me->signal->cred_guard_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) EXPORT_SYMBOL(setup_new_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /* Runs immediately before start_thread() takes over. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) void finalize_exec(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	/* Store any stack rlimit changes before starting thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	task_lock(current->group_leader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	task_unlock(current->group_leader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) EXPORT_SYMBOL(finalize_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)  * Prepare credentials and lock ->cred_guard_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)  * setup_new_exec() commits the new creds and drops the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)  * Or, if exec fails before, free_bprm() should release ->cred and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  * and unlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static int prepare_bprm_creds(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		return -ERESTARTNOINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	bprm->cred = prepare_exec_creds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	if (likely(bprm->cred))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	mutex_unlock(&current->signal->cred_guard_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static void free_bprm(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	if (bprm->mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		acct_arg_size(bprm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		mmput(bprm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	free_arg_pages(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	if (bprm->cred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		mutex_unlock(&current->signal->cred_guard_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		abort_creds(bprm->cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (bprm->file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		allow_write_access(bprm->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		fput(bprm->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (bprm->executable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		fput(bprm->executable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	/* If a binfmt changed the interp, free it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	if (bprm->interp != bprm->filename)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		kfree(bprm->interp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	kfree(bprm->fdpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	kfree(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	int retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (!bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (fd == AT_FDCWD || filename->name[0] == '/') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		bprm->filename = filename->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		if (filename->name[0] == '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 						  fd, filename->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		if (!bprm->fdpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		bprm->filename = bprm->fdpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	bprm->interp = bprm->filename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	retval = bprm_mm_init(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	return bprm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	free_bprm(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	return ERR_PTR(retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	/* If a binfmt changed the interp, free it first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	if (bprm->interp != bprm->filename)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		kfree(bprm->interp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	bprm->interp = kstrdup(interp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	if (!bprm->interp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) EXPORT_SYMBOL(bprm_change_interp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)  * determine how safe it is to execute the proposed program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)  * - the caller must hold ->cred_guard_mutex to protect against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)  *   PTRACE_ATTACH or seccomp thread-sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static void check_unsafe_exec(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	struct task_struct *p = current, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	unsigned n_fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	if (p->ptrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		bprm->unsafe |= LSM_UNSAFE_PTRACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	 * This isn't strictly necessary, but it makes it harder for LSMs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	 * mess up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	if (task_no_new_privs(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	t = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	n_fs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	spin_lock(&p->fs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	while_each_thread(p, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		if (t->fs == p->fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 			n_fs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	if (p->fs->users > n_fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		bprm->unsafe |= LSM_UNSAFE_SHARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		p->fs->in_exec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	spin_unlock(&p->fs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	/* Handle suid and sgid on files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	unsigned int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	kuid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	kgid_t gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	if (!mnt_may_suid(file->f_path.mnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	if (task_no_new_privs(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	inode = file->f_path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	mode = READ_ONCE(inode->i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (!(mode & (S_ISUID|S_ISGID)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	/* Be careful if suid/sgid is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	/* reload atomically mode/uid/gid now that lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	mode = inode->i_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	uid = inode->i_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	gid = inode->i_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	/* We ignore suid/sgid if there are no mappings for them in the ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		 !kgid_has_mapping(bprm->cred->user_ns, gid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	if (mode & S_ISUID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		bprm->per_clear |= PER_CLEAR_ON_SETID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		bprm->cred->euid = uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		bprm->per_clear |= PER_CLEAR_ON_SETID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		bprm->cred->egid = gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)  * Compute brpm->cred based upon the final binary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static int bprm_creds_from_file(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	/* Compute creds based on which file? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	bprm_fill_uid(bprm, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	return security_bprm_creds_from_file(bprm, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)  * Fill the binprm structure from the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)  * Read the first BINPRM_BUF_SIZE bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)  * This may be called multiple times for binary chains (scripts for example).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static int prepare_binprm(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	loff_t pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	memset(bprm->buf, 0, BINPRM_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)  * Arguments are '\0' separated strings found at the location bprm->p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)  * points to; chop off the first by relocating brpm->p to right after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)  * the first '\0' encountered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) int remove_arg_zero(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	if (!bprm->argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		offset = bprm->p & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		page = get_arg_page(bprm, bprm->p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 			ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		for (; offset < PAGE_SIZE && kaddr[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 				offset++, bprm->p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		put_arg_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	} while (offset == PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	bprm->p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	bprm->argc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) EXPORT_SYMBOL(remove_arg_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)  * cycle the list of binary formats handler, until one recognizes the image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) static int search_binary_handler(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	bool need_retry = IS_ENABLED(CONFIG_MODULES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	struct linux_binfmt *fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	retval = prepare_binprm(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	retval = security_bprm_check(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	retval = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)  retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	read_lock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	list_for_each_entry(fmt, &formats, lh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		if (!try_module_get(fmt->module))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		read_unlock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		retval = fmt->load_binary(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		read_lock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		put_binfmt(fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 			read_unlock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 			return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	read_unlock(&binfmt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	if (need_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		    printable(bprm->buf[2]) && printable(bprm->buf[3]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 			return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		need_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static int exec_binprm(struct linux_binprm *bprm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	pid_t old_pid, old_vpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	int ret, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	/* Need to fetch pid before load_binary changes it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	old_pid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	/* This allows 4 levels of binfmt rewrites before failing hard. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	for (depth = 0;; depth++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		struct file *exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		if (depth > 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			return -ELOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		ret = search_binary_handler(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		if (!bprm->interpreter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		exec = bprm->file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		bprm->file = bprm->interpreter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		bprm->interpreter = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		allow_write_access(exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		if (unlikely(bprm->have_execfd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 			if (bprm->executable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 				fput(exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 				return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 			bprm->executable = exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 			fput(exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	audit_bprm(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	trace_sched_process_exec(current, old_pid, bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	proc_exec_connector(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  * sys_execve() executes a new program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) static int bprm_execve(struct linux_binprm *bprm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		       int fd, struct filename *filename, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	struct files_struct *displaced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	 * Cancel any io_uring activity across execve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	io_uring_task_cancel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	retval = unshare_files(&displaced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	retval = prepare_bprm_creds(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		goto out_files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	check_unsafe_exec(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	current->in_execve = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	file = do_open_execat(fd, filename, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	retval = PTR_ERR(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	if (IS_ERR(file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		goto out_unmark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	sched_exec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	bprm->file = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	 * Record that a name derived from an O_CLOEXEC fd will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	 * inaccessible after exec. Relies on having exclusive access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	 * current->files (due to unshare_files above).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	if (bprm->fdpath &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	    close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	/* Set the unchanging part of bprm->cred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	retval = security_bprm_creds_for_exec(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	retval = exec_binprm(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	/* execve succeeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	current->fs->in_exec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	current->in_execve = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	rseq_execve(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	acct_update_integrals(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	task_numa_free(current, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (displaced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		put_files_struct(displaced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	 * If past the point of no return ensure the the code never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	 * returns to the userspace process.  Use an existing fatal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	 * signal if present otherwise terminate the process with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	 * SIGSEGV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	if (bprm->point_of_no_return && !fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		force_sigsegv(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) out_unmark:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	current->fs->in_exec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	current->in_execve = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) out_files:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	if (displaced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		reset_files_struct(displaced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) static int do_execveat_common(int fd, struct filename *filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 			      struct user_arg_ptr argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			      struct user_arg_ptr envp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			      int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	struct linux_binprm *bprm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	if (IS_ERR(filename))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		return PTR_ERR(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	 * We move the actual failure in case of RLIMIT_NPROC excess from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	 * set*uid() to execve() because too many poorly written programs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	 * don't check setuid() return code.  Here we additionally recheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	 * whether NPROC limit is still exceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if ((current->flags & PF_NPROC_EXCEEDED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	    atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		retval = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		goto out_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	/* We're below the limit (still or again), so we don't want to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	 * further execve() calls fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	current->flags &= ~PF_NPROC_EXCEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	bprm = alloc_bprm(fd, filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	if (IS_ERR(bprm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		retval = PTR_ERR(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		goto out_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	retval = count(argv, MAX_ARG_STRINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	if (retval == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			     current->comm, bprm->filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	bprm->argc = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	retval = count(envp, MAX_ARG_STRINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	bprm->envc = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	retval = bprm_stack_limits(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	retval = copy_string_kernel(bprm->filename, bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	bprm->exec = bprm->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	retval = copy_strings(bprm->envc, envp, bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	retval = copy_strings(bprm->argc, argv, bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	 * When argv is empty, add an empty string ("") as argv[0] to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	 * ensure confused userspace programs that start processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	 * from argv[1] won't end up walking envp. See also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	 * bprm_stack_limits().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	if (bprm->argc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		retval = copy_string_kernel("", bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		bprm->argc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	retval = bprm_execve(bprm, fd, filename, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	free_bprm(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) out_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	putname(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) int kernel_execve(const char *kernel_filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		  const char *const *argv, const char *const *envp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	struct filename *filename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	struct linux_binprm *bprm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	int fd = AT_FDCWD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	filename = getname_kernel(kernel_filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	if (IS_ERR(filename))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		return PTR_ERR(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	bprm = alloc_bprm(fd, filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	if (IS_ERR(bprm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		retval = PTR_ERR(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		goto out_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	retval = count_strings_kernel(argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	if (WARN_ON_ONCE(retval == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	bprm->argc = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	retval = count_strings_kernel(envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	bprm->envc = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	retval = bprm_stack_limits(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	retval = copy_string_kernel(bprm->filename, bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	bprm->exec = bprm->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	retval = copy_strings_kernel(bprm->envc, envp, bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	retval = copy_strings_kernel(bprm->argc, argv, bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	retval = bprm_execve(bprm, fd, filename, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	free_bprm(bprm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) out_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	putname(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) static int do_execve(struct filename *filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	const char __user *const __user *__argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	const char __user *const __user *__envp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	struct user_arg_ptr argv = { .ptr.native = __argv };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	struct user_arg_ptr envp = { .ptr.native = __envp };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) static int do_execveat(int fd, struct filename *filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		const char __user *const __user *__argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		const char __user *const __user *__envp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	struct user_arg_ptr argv = { .ptr.native = __argv };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	struct user_arg_ptr envp = { .ptr.native = __envp };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	return do_execveat_common(fd, filename, argv, envp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) static int compat_do_execve(struct filename *filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	const compat_uptr_t __user *__argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	const compat_uptr_t __user *__envp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	struct user_arg_ptr argv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		.is_compat = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		.ptr.compat = __argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	struct user_arg_ptr envp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		.is_compat = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		.ptr.compat = __envp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) static int compat_do_execveat(int fd, struct filename *filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 			      const compat_uptr_t __user *__argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 			      const compat_uptr_t __user *__envp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 			      int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	struct user_arg_ptr argv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		.is_compat = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		.ptr.compat = __argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	struct user_arg_ptr envp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		.is_compat = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		.ptr.compat = __envp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	return do_execveat_common(fd, filename, argv, envp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) void set_binfmt(struct linux_binfmt *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	if (mm->binfmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		module_put(mm->binfmt->module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	mm->binfmt = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		__module_get(new->module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) EXPORT_SYMBOL(set_binfmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) void set_dumpable(struct mm_struct *mm, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) SYSCALL_DEFINE3(execve,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		const char __user *, filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		const char __user *const __user *, argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		const char __user *const __user *, envp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	return do_execve(getname(filename), argv, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) SYSCALL_DEFINE5(execveat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		int, fd, const char __user *, filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		const char __user *const __user *, argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		const char __user *const __user *, envp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		int, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	return do_execveat(fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 			   getname_flags(filename, lookup_flags, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 			   argv, envp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	const compat_uptr_t __user *, argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	const compat_uptr_t __user *, envp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	return compat_do_execve(getname(filename), argv, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		       const char __user *, filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		       const compat_uptr_t __user *, argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		       const compat_uptr_t __user *, envp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		       int,  flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	return compat_do_execveat(fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 				  getname_flags(filename, lookup_flags, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 				  argv, envp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) #endif