Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * umh - the kernel usermode helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/binfmts.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/cred.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/fdtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/fs_struct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/async.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <trace/events/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define CAP_BSET	(void *)1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define CAP_PI		(void *)2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static DEFINE_SPINLOCK(umh_sysctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static DECLARE_RWSEM(umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static void call_usermodehelper_freeinfo(struct subprocess_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	if (info->cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		(*info->cleanup)(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static void umh_complete(struct subprocess_info *sub_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct completion *comp = xchg(&sub_info->complete, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	 * See call_usermodehelper_exec(). If xchg() returns NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	 * we own sub_info, the UMH_KILLABLE caller has gone away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	 * or the caller used UMH_NO_WAIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		complete(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		call_usermodehelper_freeinfo(sub_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * This is the task which runs the usermode application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static int call_usermodehelper_exec_async(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct subprocess_info *sub_info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct cred *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	spin_lock_irq(&current->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	flush_signal_handlers(current, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	spin_unlock_irq(&current->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * Initial kernel threads share ther FS with init, in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * get the init root directory. But we've now created a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * thread that is going to execve a user process and has its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * 'struct fs_struct'. Reset umask to the default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	current->fs->umask = 0022;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 * Our parent (unbound workqueue) runs with elevated scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 * priority. Avoid propagating that into the userspace child.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	set_user_nice(current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	new = prepare_kernel_cred(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	spin_lock(&umh_sysctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 					     new->cap_inheritable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	spin_unlock(&umh_sysctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (sub_info->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		retval = sub_info->init(sub_info, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			abort_creds(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	commit_creds(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	retval = kernel_execve(sub_info->path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			       (const char *const *)sub_info->argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			       (const char *const *)sub_info->envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	sub_info->retval = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * call_usermodehelper_exec_sync() will call umh_complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * if UHM_WAIT_PROC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (!(sub_info->wait & UMH_WAIT_PROC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		umh_complete(sub_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	if (!retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	do_exit(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Handles UMH_WAIT_PROC.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	/* If SIGCLD is ignored do_wait won't populate the status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	kernel_sigaction(SIGCHLD, SIG_DFL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (pid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		sub_info->retval = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		kernel_wait(pid, &sub_info->retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	/* Restore default kernel sig handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	kernel_sigaction(SIGCHLD, SIG_IGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	umh_complete(sub_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * We need to create the usermodehelper kernel thread from a task that is affine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * to an optimized set of CPUs (or nohz housekeeping ones) such that they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * inherit a widest affinity irrespective of call_usermodehelper() callers with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * possibly reduced affinity (eg: per-cpu workqueues). We don't want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * usermodehelper targets to contend a busy CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  * Unbound workqueues provide such wide affinity and allow to block on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * Besides, workqueues provide the privilege level that caller might not have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * to perform the usermodehelper request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void call_usermodehelper_exec_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	struct subprocess_info *sub_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		container_of(work, struct subprocess_info, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (sub_info->wait & UMH_WAIT_PROC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		call_usermodehelper_exec_sync(sub_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		 * Use CLONE_PARENT to reparent it to kthreadd; we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		 * want to pollute current->children, and we need a parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		 * that always ignores SIGCHLD to ensure auto-reaping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 				    CLONE_PARENT | SIGCHLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		if (pid < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			sub_info->retval = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			umh_complete(sub_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * (used for preventing user land processes from being created after the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * land has been frozen during a system-wide hibernation or suspend operation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * Should always be manipulated under umhelper_sem acquired for write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Number of helpers running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static atomic_t running_helpers = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * Wait queue head used by usermodehelper_disable() to wait for all running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * helpers to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * to become 'false'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * Time to wait for running_helpers to become zero before the setting of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * usermodehelper_disabled in usermodehelper_disable() fails
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define RUNNING_HELPERS_TIMEOUT	(5 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int usermodehelper_read_trylock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	down_read(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 				TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		if (!usermodehelper_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		if (usermodehelper_disabled == UMH_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		up_read(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		try_to_freeze();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		down_read(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	finish_wait(&usermodehelper_disabled_waitq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) long usermodehelper_read_lock_wait(long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (timeout < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	down_read(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		if (!usermodehelper_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		up_read(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		timeout = schedule_timeout(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		if (!timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		down_read(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	finish_wait(&usermodehelper_disabled_waitq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	return timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void usermodehelper_read_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	up_read(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * @depth: New value to assign to usermodehelper_disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * Change the value of usermodehelper_disabled (under umhelper_sem locked for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * writing) and wakeup tasks waiting for it to change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	down_write(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	usermodehelper_disabled = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	wake_up(&usermodehelper_disabled_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	up_write(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * __usermodehelper_disable - Prevent new helpers from being started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * @depth: New value to assign to usermodehelper_disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int __usermodehelper_disable(enum umh_disable_depth depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	long retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	if (!depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	down_write(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	usermodehelper_disabled = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	up_write(&umhelper_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 * From now on call_usermodehelper_exec() won't start any new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * helpers, so it is sufficient if running_helpers turns out to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 * be zero at one point (it may be increased later, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 * doesn't matter).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	retval = wait_event_timeout(running_helpers_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 					atomic_read(&running_helpers) == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 					RUNNING_HELPERS_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	__usermodehelper_set_disable_depth(UMH_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static void helper_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	atomic_inc(&running_helpers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void helper_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (atomic_dec_and_test(&running_helpers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		wake_up(&running_helpers_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  * call_usermodehelper_setup - prepare to call a usermode helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)  * @path: path to usermode executable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)  * @argv: arg vector for process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  * @envp: environment for process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * @gfp_mask: gfp mask for memory allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  * @cleanup: a cleanup function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * @init: an init function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * @data: arbitrary context sensitive data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * Returns either %NULL on allocation failure, or a subprocess_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * structure.  This should be passed to call_usermodehelper_exec to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * exec the process and free the structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  * The init function is used to customize the helper process prior to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * exec.  A non-zero return code causes the process to error out, exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * and return the failure to the calling process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  * The cleanup function is just before ethe subprocess_info is about to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  * be freed.  This can be used for freeing the argv and envp.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * Function must be runnable in either a process context or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  * context in which call_usermodehelper_exec is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		char **envp, gfp_t gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		int (*init)(struct subprocess_info *info, struct cred *new),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		void (*cleanup)(struct subprocess_info *info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct subprocess_info *sub_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	if (!sub_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #ifdef CONFIG_STATIC_USERMODEHELPER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	sub_info->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	sub_info->argv = argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	sub_info->envp = envp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	sub_info->cleanup = cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	sub_info->init = init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	sub_info->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)   out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	return sub_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) EXPORT_SYMBOL(call_usermodehelper_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  * call_usermodehelper_exec - start a usermode application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  * @sub_info: information about the subprocessa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  * @wait: wait for the application to finish and return status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  *        when the program couldn't be exec'ed. This makes it safe to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  *        from interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  * Runs a user-space application.  The application is started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * asynchronously if wait is not set, and runs as a child of system workqueues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * (ie. it runs with full root capabilities and optimized affinity).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  * Note: successful return value does not guarantee the helper was called at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  * all. You can't rely on sub_info->{init,cleanup} being called even for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  * into a successful no-op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	DECLARE_COMPLETION_ONSTACK(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (!sub_info->path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		call_usermodehelper_freeinfo(sub_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	helper_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	if (usermodehelper_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		retval = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	 * If there is no binary for us to call, then just return and get out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	 * here.  This allows us to set STATIC_USERMODEHELPER_PATH to "" and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	 * disable all call_usermodehelper() calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	if (strlen(sub_info->path) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	 * Set the completion pointer only if there is a waiter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	 * This makes it possible to use umh_complete to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	 * the data structure in case of UMH_NO_WAIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	sub_info->wait = wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	queue_work(system_unbound_wq, &sub_info->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	if (wait & UMH_KILLABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		retval = wait_for_completion_killable(&done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		if (!retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			goto wait_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		/* umh_complete() will see NULL and free sub_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		if (xchg(&sub_info->complete, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		/* fallthrough, umh_complete() was already called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	wait_for_completion(&done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) wait_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	retval = sub_info->retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	call_usermodehelper_freeinfo(sub_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	helper_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) EXPORT_SYMBOL(call_usermodehelper_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * call_usermodehelper() - prepare and start a usermode application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  * @path: path to usermode executable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  * @argv: arg vector for process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)  * @envp: environment for process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  * @wait: wait for the application to finish and return status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)  *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  *        when the program couldn't be exec'ed. This makes it safe to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  *        from interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)  * This function is the equivalent to use call_usermodehelper_setup() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)  * call_usermodehelper_exec().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	struct subprocess_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 					 NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	if (info == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	return call_usermodehelper_exec(info, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) EXPORT_SYMBOL(call_usermodehelper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int proc_cap_handler(struct ctl_table *table, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			 void *buffer, size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	struct ctl_table t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	kernel_cap_t new_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	if (write && (!capable(CAP_SETPCAP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		      !capable(CAP_SYS_MODULE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	 * convert from the global kernel_cap_t to the ulong array to print to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	 * userspace if this is a read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	spin_lock(&umh_sysctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		if (table->data == CAP_BSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			cap_array[i] = usermodehelper_bset.cap[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		else if (table->data == CAP_PI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			cap_array[i] = usermodehelper_inheritable.cap[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	spin_unlock(&umh_sysctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	t = *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	t.data = &cap_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	 * actually read or write and array of ulongs from userspace.  Remember
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	 * these are least significant 32 bits first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	 * convert from the sysctl array of ulongs to the kernel_cap_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	 * internal representation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		new_cap.cap[i] = cap_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	 * Drop everything not in the new_cap (but don't add things)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		spin_lock(&umh_sysctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		if (table->data == CAP_BSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 			usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		if (table->data == CAP_PI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		spin_unlock(&umh_sysctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct ctl_table usermodehelper_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		.procname	= "bset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		.data		= CAP_BSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		.mode		= 0600,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		.proc_handler	= proc_cap_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		.procname	= "inheritable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		.data		= CAP_PI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		.mode		= 0600,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		.proc_handler	= proc_cap_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) };