Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  linux/kernel/exit.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 1991, 1992  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/sched/autogroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/sched/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/sched/cputime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/iocontext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/acct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/tsacct_kern.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/fdtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/binfmts.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/nsproxy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/pid_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/mempolicy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/taskstats_kern.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/delayacct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/posix-timers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/cn_proc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/futex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/pipe_fs_i.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/audit.h> /* for audit_free() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/task_io_accounting_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/fs_struct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/init_task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <trace/events/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <linux/oom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <linux/shm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <linux/kcov.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/rcuwait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <linux/io_uring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #include <trace/hooks/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static void __unhash_process(struct task_struct *p, bool group_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	nr_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	detach_pid(p, PIDTYPE_PID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	if (group_dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		detach_pid(p, PIDTYPE_TGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		detach_pid(p, PIDTYPE_PGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		detach_pid(p, PIDTYPE_SID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		list_del_rcu(&p->tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		list_del_init(&p->sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		__this_cpu_dec(process_counts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	list_del_rcu(&p->thread_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	list_del_rcu(&p->thread_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * This function expects the tasklist_lock write-locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static void __exit_signal(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct signal_struct *sig = tsk->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	bool group_dead = thread_group_leader(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct sighand_struct *sighand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	struct tty_struct *tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	u64 utime, stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	sighand = rcu_dereference_check(tsk->sighand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 					lockdep_tasklist_lock_is_held());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	spin_lock(&sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #ifdef CONFIG_POSIX_TIMERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	posix_cpu_timers_exit(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (group_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		posix_cpu_timers_exit_group(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (group_dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		tty = sig->tty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		sig->tty = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		 * If there is any task waiting for the group exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		 * then notify it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		if (sig->notify_count > 0 && !--sig->notify_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			wake_up_process(sig->group_exit_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		if (tsk == sig->curr_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			sig->curr_target = next_thread(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			      sizeof(unsigned long long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	 * Accumulate here the counters for all threads as they die. We could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	 * skip the group leader because it is the last user of signal_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	 * but we want to avoid the race with thread_group_cputime() which can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	 * see the empty ->thread_head list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	task_cputime(tsk, &utime, &stime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	write_seqlock(&sig->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	sig->utime += utime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	sig->stime += stime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	sig->gtime += task_gtime(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	sig->min_flt += tsk->min_flt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	sig->maj_flt += tsk->maj_flt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	sig->nvcsw += tsk->nvcsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	sig->nivcsw += tsk->nivcsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	sig->inblock += task_io_get_inblock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	sig->oublock += task_io_get_oublock(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	sig->nr_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	__unhash_process(tsk, group_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	write_sequnlock(&sig->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	 * Do this under ->siglock, we can race with another thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	flush_sigqueue(&tsk->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	tsk->sighand = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	spin_unlock(&sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	__cleanup_sighand(sighand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	if (group_dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		flush_sigqueue(&sig->shared_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		tty_kref_put(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static void delayed_put_task_struct(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	perf_event_delayed_put(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	trace_sched_process_free(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	put_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) void put_task_struct_rcu_user(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	if (refcount_dec_and_test(&task->rcu_users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		call_rcu(&task->rcu, delayed_put_task_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) void release_task(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct task_struct *leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	struct pid *thread_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	int zap_leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	/* don't need to get the RCU readlock here - the process is dead and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	 * can't be modifying its own credentials. But shut RCU-lockdep up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	atomic_dec(&__task_cred(p)->user->processes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	cgroup_release(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	write_lock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	ptrace_release_task(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	thread_pid = get_pid(p->thread_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	__exit_signal(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	 * If we are the last non-leader member of the thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 * group, and the leader is zombie, then notify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 * group leader's parent process. (if it wants notification.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	zap_leader = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	leader = p->group_leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	if (leader != p && thread_group_empty(leader)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			&& leader->exit_state == EXIT_ZOMBIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		 * If we were the last child thread and the leader has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		 * exited already, and the leader's parent ignores SIGCHLD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		 * then we are the one who should release the leader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		zap_leader = do_notify_parent(leader, leader->exit_signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		if (zap_leader)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			leader->exit_state = EXIT_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	seccomp_filter_release(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	proc_flush_pid(thread_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	put_pid(thread_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	release_thread(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	put_task_struct_rcu_user(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	p = leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	if (unlikely(zap_leader))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) int rcuwait_wake_up(struct rcuwait *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	 * Order condition vs @task, such that everything prior to the load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	 * of @task is visible. This is the condition as to why the user called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	 * rcuwait_wake() in the first place. Pairs with set_current_state()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	 * barrier (A) in rcuwait_wait_event().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	 *    WAIT                WAKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	 *    [S] tsk = current	  [S] cond = true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	 *        MB (A)	      MB (B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	 *    [L] cond		  [L] tsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	smp_mb(); /* (B) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	task = rcu_dereference(w->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	if (task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		ret = wake_up_process(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) EXPORT_SYMBOL_GPL(rcuwait_wake_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * Determine if a process group is "orphaned", according to the POSIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * by terminal-generated stop signals.  Newly orphaned process groups are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  * to receive a SIGHUP and a SIGCONT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  * "I ask you, have you ever known what it is to be an orphan?"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static int will_become_orphaned_pgrp(struct pid *pgrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 					struct task_struct *ignored_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		if ((p == ignored_task) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		    (p->exit_state && thread_group_empty(p)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		    is_global_init(p->real_parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		if (task_pgrp(p->real_parent) != pgrp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		    task_session(p->real_parent) == task_session(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) int is_current_pgrp_orphaned(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) static bool has_stopped_jobs(struct pid *pgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		if (p->signal->flags & SIGNAL_STOP_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * Check to see if any process groups have become orphaned as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * a result of our exiting, and if they have any stopped jobs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	struct pid *pgrp = task_pgrp(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	struct task_struct *ignored_task = tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		/* exit: our father is in a different pgrp than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		 * we are and we were the only connection outside.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		parent = tsk->real_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		/* reparent: our child is in a different pgrp than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		 * we are, and it was the only connection outside.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		ignored_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	if (task_pgrp(parent) != pgrp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	    task_session(parent) == task_session(tsk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	    will_become_orphaned_pgrp(pgrp, ignored_task) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	    has_stopped_jobs(pgrp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) #ifdef CONFIG_MEMCG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * A task is exiting.   If it owned this mm, find a new owner for the mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) void mm_update_next_owner(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	struct task_struct *c, *g, *p = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * If the exiting or execing task is not the owner, it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 * someone else's problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	if (mm->owner != p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 * The current owner is exiting/execing and there are no other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 * candidates.  Do not leave the mm pointing to a possibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	 * freed task structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	if (atomic_read(&mm->mm_users) <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		WRITE_ONCE(mm->owner, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	 * Search in the children
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	list_for_each_entry(c, &p->children, sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		if (c->mm == mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			goto assign_new_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	 * Search in the siblings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	list_for_each_entry(c, &p->real_parent->children, sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		if (c->mm == mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			goto assign_new_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	 * Search through everything else, we should not get here often.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	for_each_process(g) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		if (g->flags & PF_KTHREAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		for_each_thread(g, c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			if (c->mm == mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 				goto assign_new_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			if (c->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	 * We found no owner yet mm_users > 1: this implies that we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	 * most likely racing with swapoff (try_to_unuse()) or /proc or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	 * ptrace or page migration (get_task_mm()).  Mark owner as NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	WRITE_ONCE(mm->owner, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) assign_new_owner:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	BUG_ON(c == p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	get_task_struct(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	 * The task_lock protects c->mm from changing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	 * We always want mm->owner->mm == mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	task_lock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	 * Delay read_unlock() till we have the task_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	 * to ensure that c does not slip away underneath us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (c->mm != mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		task_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		put_task_struct(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	WRITE_ONCE(mm->owner, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	task_unlock(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	put_task_struct(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) #endif /* CONFIG_MEMCG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  * Turn us into a lazy TLB process if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  * aren't already..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) static void exit_mm(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	struct core_state *core_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	exit_mm_release(current, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	sync_mm_rss(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 * Serialize with any possible pending coredump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 * We must hold mmap_lock around checking core_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	 * and clearing tsk->mm.  The core-inducing thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	 * will increment ->nr_threads for each thread in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	 * group with ->mm != NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	core_state = mm->core_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	if (core_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		struct core_thread self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		self.task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		if (self.task->flags & PF_SIGNALED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 			self.next = xchg(&core_state->dumper.next, &self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			self.task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		 * Implies mb(), the result of xchg() must be visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		 * to core_state->dumper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		if (atomic_dec_and_test(&core_state->nr_threads))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			complete(&core_state->startup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			if (!self.task) /* see coredump_finish() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			freezable_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	mmgrab(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	BUG_ON(mm != current->active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	/* more a memory barrier than a real lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	task_lock(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	current->mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	enter_lazy_tlb(mm, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	task_unlock(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	mm_update_next_owner(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	trace_android_vh_exit_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	if (test_thread_flag(TIF_MEMDIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		exit_oom_victim();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) static struct task_struct *find_alive_thread(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	for_each_thread(p, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		if (!(t->flags & PF_EXITING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) static struct task_struct *find_child_reaper(struct task_struct *father,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 						struct list_head *dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	__releases(&tasklist_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	__acquires(&tasklist_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct pid_namespace *pid_ns = task_active_pid_ns(father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	struct task_struct *reaper = pid_ns->child_reaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	struct task_struct *p, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	if (likely(reaper != father))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		return reaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	reaper = find_alive_thread(father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (reaper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		pid_ns->child_reaper = reaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		return reaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	list_for_each_entry_safe(p, n, dead, ptrace_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		list_del_init(&p->ptrace_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		release_task(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	zap_pid_ns_processes(pid_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	write_lock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	return father;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  * When we die, we re-parent all our children, and try to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538)  * 1. give them to another thread in our thread group, if such a member exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)  * 2. give it to the first ancestor process which prctl'd itself as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)  *    child_subreaper for its children (like a service manager)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  * 3. give it to the init process (PID 1) in our pid namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) static struct task_struct *find_new_reaper(struct task_struct *father,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 					   struct task_struct *child_reaper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	struct task_struct *thread, *reaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	thread = find_alive_thread(father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	if (thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		return thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	if (father->signal->has_child_subreaper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		unsigned int ns_level = task_pid(father)->level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		 * Find the first ->is_child_subreaper ancestor in our pid_ns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		 * We can't check reaper != child_reaper to ensure we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		 * cross the namespaces, the exiting parent could be injected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		 * by setns() + fork().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		 * We check pid->level, this is slightly more efficient than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		 * task_active_pid_ns(reaper) != task_active_pid_ns(father).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		for (reaper = father->real_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		     task_pid(reaper)->level == ns_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		     reaper = reaper->real_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			if (reaper == &init_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			if (!reaper->signal->is_child_subreaper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			thread = find_alive_thread(reaper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			if (thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 				return thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	return child_reaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) * Any that need to be release_task'd are put on the @dead list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) static void reparent_leader(struct task_struct *father, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 				struct list_head *dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (unlikely(p->exit_state == EXIT_DEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	/* We don't want people slaying init. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	p->exit_signal = SIGCHLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	/* If it has exited notify the new parent about this child's death. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (!p->ptrace &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	    p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		if (do_notify_parent(p, p->exit_signal)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			p->exit_state = EXIT_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			list_add(&p->ptrace_entry, dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	kill_orphaned_pgrp(p, father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  * This does two things:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  * A.  Make init inherit all the child processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  * B.  Check to see if any process groups have become orphaned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  *	as a result of our exiting, and if they have any stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) static void forget_original_parent(struct task_struct *father,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 					struct list_head *dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	struct task_struct *p, *t, *reaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	if (unlikely(!list_empty(&father->ptraced)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		exit_ptrace(father, dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	/* Can drop and reacquire tasklist_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	reaper = find_child_reaper(father, dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (list_empty(&father->children))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	reaper = find_new_reaper(father, reaper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	list_for_each_entry(p, &father->children, sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		for_each_thread(p, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			RCU_INIT_POINTER(t->real_parent, reaper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			if (likely(!t->ptrace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 				t->parent = t->real_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			if (t->pdeath_signal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 				group_send_sig_info(t->pdeath_signal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 						    SEND_SIG_NOINFO, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 						    PIDTYPE_TGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		 * If this is a threaded reparent there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		 * notify anyone anything has happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		if (!same_thread_group(reaper, father))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			reparent_leader(father, p, dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	list_splice_tail_init(&father->children, &reaper->children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * Send signals to all our closest relatives so that they know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  * to properly mourn us..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) static void exit_notify(struct task_struct *tsk, int group_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	bool autoreap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	struct task_struct *p, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	LIST_HEAD(dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	write_lock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	forget_original_parent(tsk, &dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (group_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		kill_orphaned_pgrp(tsk->group_leader, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	tsk->exit_state = EXIT_ZOMBIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	if (unlikely(tsk->ptrace)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		int sig = thread_group_leader(tsk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 				thread_group_empty(tsk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 				!ptrace_reparented(tsk) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			tsk->exit_signal : SIGCHLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		autoreap = do_notify_parent(tsk, sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	} else if (thread_group_leader(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		autoreap = thread_group_empty(tsk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			do_notify_parent(tsk, tsk->exit_signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		autoreap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (autoreap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		tsk->exit_state = EXIT_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		list_add(&tsk->ptrace_entry, &dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	/* mt-exec, de_thread() is waiting for group leader */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	if (unlikely(tsk->signal->notify_count < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		wake_up_process(tsk->signal->group_exit_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		list_del_init(&p->ptrace_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		release_task(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) #ifdef CONFIG_DEBUG_STACK_USAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) static void check_stack_usage(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	static DEFINE_SPINLOCK(low_water_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	static int lowest_to_date = THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	unsigned long free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	free = stack_not_used(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (free >= lowest_to_date)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	spin_lock(&low_water_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (free < lowest_to_date) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			current->comm, task_pid_nr(current), free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		lowest_to_date = free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	spin_unlock(&low_water_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static inline void check_stack_usage(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) void __noreturn do_exit(long code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	int group_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	 * We can get here from a kernel oops, sometimes with preemption off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	 * Start by checking for critical errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	 * Then fix up important state like USER_DS and preemption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 * Then do everything else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	WARN_ON(blk_needs_flush_plug(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (unlikely(in_interrupt()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		panic("Aiee, killing interrupt handler!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (unlikely(!tsk->pid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		panic("Attempted to kill the idle task!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	 * If do_exit is called because this processes oopsed, it's possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	 * continuing. Amongst other possible reasons, this is to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	 * mm_release()->clear_child_tid() from writing to a user-controlled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 * kernel address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	force_uaccess_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	if (unlikely(in_atomic())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		pr_info("note: %s[%d] exited with preempt_count %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			current->comm, task_pid_nr(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			preempt_count());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		preempt_count_set(PREEMPT_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	profile_task_exit(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	kcov_task_exit(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	ptrace_event(PTRACE_EVENT_EXIT, code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	validate_creds_for_do_exit(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	 * We're taking recursive faults here in do_exit. Safest is to just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	 * leave this task alone and wait for reboot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	if (unlikely(tsk->flags & PF_EXITING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		pr_alert("Fixing recursive fault but reboot is needed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		futex_exit_recursive(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	io_uring_files_cancel(tsk->files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	exit_signals(tsk);  /* sets PF_EXITING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	/* sync mm's RSS info before statistics gathering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (tsk->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		sync_mm_rss(tsk->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	acct_update_integrals(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	group_dead = atomic_dec_and_test(&tsk->signal->live);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	if (group_dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		 * If the last thread of global init has exited, panic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		 * immediately to get a useable coredump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		if (unlikely(is_global_init(tsk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			panic("Attempted to kill init! exitcode=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 				tsk->signal->group_exit_code ?: (int)code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) #ifdef CONFIG_POSIX_TIMERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		hrtimer_cancel(&tsk->signal->real_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		exit_itimers(tsk->signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		if (tsk->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	acct_collect(code, group_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (group_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		tty_audit_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	audit_free(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	tsk->exit_code = code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	taskstats_exit(tsk, group_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	exit_mm();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (group_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		acct_process();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	trace_sched_process_exit(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	exit_sem(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	exit_shm(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	exit_files(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	exit_fs(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	if (group_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		disassociate_ctty(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	exit_task_namespaces(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	exit_task_work(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	exit_thread(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	 * Flush inherited counters to the parent - before the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * gets woken up by child-exit notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 * because of cgroup mode, must be called before cgroup_exit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	perf_event_exit_task(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	sched_autogroup_exit_task(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	cgroup_exit(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	 * FIXME: do that only when needed, using sched_exit tracepoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	flush_ptrace_hw_breakpoint(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	exit_tasks_rcu_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	exit_notify(tsk, group_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	proc_exit_connector(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	mpol_put_task_policy(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) #ifdef CONFIG_FUTEX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (unlikely(current->pi_state_cache))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		kfree(current->pi_state_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	 * Make sure we are holding no locks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	debug_check_no_locks_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (tsk->io_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		exit_io_context(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (tsk->splice_pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		free_pipe_info(tsk->splice_pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (tsk->task_frag.page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		put_page(tsk->task_frag.page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	validate_creds_for_do_exit(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	check_stack_usage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (tsk->nr_dirtied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	exit_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	exit_tasks_rcu_finish();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	lockdep_free_task(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	do_task_dead();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) EXPORT_SYMBOL_GPL(do_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) void complete_and_exit(struct completion *comp, long code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		complete(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	do_exit(code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) EXPORT_SYMBOL(complete_and_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) SYSCALL_DEFINE1(exit, int, error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	do_exit((error_code&0xff)<<8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883)  * Take down every thread in the group.  This is called by fatal signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884)  * as well as by sys_exit_group (below).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) do_group_exit(int exit_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct signal_struct *sig = current->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	BUG_ON(exit_code & 0x80); /* core dumps don't get here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	if (signal_group_exit(sig))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		exit_code = sig->group_exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	else if (!thread_group_empty(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		struct sighand_struct *const sighand = current->sighand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		spin_lock_irq(&sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		if (signal_group_exit(sig))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			/* Another thread got here before we took the lock.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			exit_code = sig->group_exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			sig->group_exit_code = exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			sig->flags = SIGNAL_GROUP_EXIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			zap_other_threads(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		spin_unlock_irq(&sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	do_exit(exit_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	/* NOTREACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915)  * this kills every thread in the thread group. Note that any externally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916)  * wait4()-ing process will get the correct exit code - even if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917)  * thread is not the thread group leader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) SYSCALL_DEFINE1(exit_group, int, error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	do_group_exit((error_code & 0xff) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	/* NOTREACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) struct waitid_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	uid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	int cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) struct wait_opts {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	enum pid_type		wo_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	int			wo_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct pid		*wo_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	struct waitid_info	*wo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	int			wo_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	struct rusage		*wo_rusage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	wait_queue_entry_t		child_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	int			notask_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	return	wo->wo_type == PIDTYPE_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		task_pid_type(p, wo->wo_type) == wo->wo_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if (!eligible_pid(wo, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * Wait for all children (clone and not) if __WALL is set or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 * if it is traced by us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (ptrace || (wo->wo_flags & __WALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	 * Otherwise, wait for clone children *only* if __WCLONE is set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	 * otherwise, wait for non-clone children *only*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * Note: a "clone" child here is one that reports to its parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * using a signal other than SIGCHLD, or a non-leader thread which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * we can only see if it is traced by us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  * the lock and this task is uninteresting.  If we return nonzero, we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  * released the lock and the system call should return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	int state, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	pid_t pid = task_pid_vnr(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct waitid_info *infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	if (!likely(wo->wo_flags & WEXITED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	if (unlikely(wo->wo_flags & WNOWAIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		status = p->exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		get_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		sched_annotate_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		if (wo->wo_rusage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		put_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		goto out_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * Move the task's state to DEAD/TRACE, only one thread can do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	state = (ptrace_reparented(p) && thread_group_leader(p)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		EXIT_TRACE : EXIT_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 * We own this thread, nobody else can reap it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	sched_annotate_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * Check thread_group_leader() to exclude the traced sub-threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	if (state == EXIT_DEAD && thread_group_leader(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		struct signal_struct *sig = p->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		struct signal_struct *psig = current->signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		unsigned long maxrss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		u64 tgutime, tgstime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		 * The resource counters for the group leader are in its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		 * own task_struct.  Those for dead threads in the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		 * are in its signal_struct, as are those for the child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		 * processes it has previously reaped.  All these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		 * accumulate in the parent's signal_struct c* fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		 * We don't bother to take a lock here to protect these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		 * p->signal fields because the whole thread group is dead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		 * and nobody can change them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		 * psig->stats_lock also protects us from our sub-theads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		 * which can reap other children at the same time. Until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		 * we change k_getrusage()-like users to rely on this lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		 * we have to take ->siglock as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		 * We use thread_group_cputime_adjusted() to get times for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		 * the thread group, which consolidates times for all threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		 * in the group including the group leader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		spin_lock_irq(&current->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		write_seqlock(&psig->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		psig->cutime += tgutime + sig->cutime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		psig->cstime += tgstime + sig->cstime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		psig->cmin_flt +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			p->min_flt + sig->min_flt + sig->cmin_flt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		psig->cmaj_flt +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		psig->cnvcsw +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			p->nvcsw + sig->nvcsw + sig->cnvcsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		psig->cnivcsw +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			p->nivcsw + sig->nivcsw + sig->cnivcsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		psig->cinblock +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			task_io_get_inblock(p) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			sig->inblock + sig->cinblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		psig->coublock +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			task_io_get_oublock(p) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			sig->oublock + sig->coublock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		maxrss = max(sig->maxrss, sig->cmaxrss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		if (psig->cmaxrss < maxrss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			psig->cmaxrss = maxrss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		task_io_accounting_add(&psig->ioac, &p->ioac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		task_io_accounting_add(&psig->ioac, &sig->ioac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		write_sequnlock(&psig->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		spin_unlock_irq(&current->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (wo->wo_rusage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		? p->signal->group_exit_code : p->exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	wo->wo_stat = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	if (state == EXIT_TRACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		write_lock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		/* We dropped tasklist, ptracer could die and untrace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		ptrace_unlink(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		/* If parent wants a zombie, don't release it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		state = EXIT_ZOMBIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		if (do_notify_parent(p, p->exit_signal))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			state = EXIT_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		p->exit_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (state == EXIT_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		release_task(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) out_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	infop = wo->wo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	if (infop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		if ((status & 0x7f) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			infop->cause = CLD_EXITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			infop->status = status >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			infop->status = status & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		infop->pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		infop->uid = uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	return pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static int *task_stopped_code(struct task_struct *p, bool ptrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	if (ptrace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			return &p->exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		if (p->signal->flags & SIGNAL_STOP_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			return &p->signal->group_exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)  * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)  * @wo: wait options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  * @ptrace: is the wait for ptrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)  * @p: task to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  * read_lock(&tasklist_lock), which is released if return value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  * non-zero.  Also, grabs and releases @p->sighand->siglock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)  * 0 if wait condition didn't exist and search for other wait conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)  * should continue.  Non-zero return, -errno on failure and @p's pid on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)  * success, implies that tasklist_lock is released and wait condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  * search should terminate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static int wait_task_stopped(struct wait_opts *wo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				int ptrace, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	struct waitid_info *infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	int exit_code, *p_code, why;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	uid_t uid = 0; /* unneeded, required by compiler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	 * Traditionally we see ptrace'd stopped tasks regardless of options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	if (!ptrace && !(wo->wo_flags & WUNTRACED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (!task_stopped_code(p, ptrace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	exit_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	spin_lock_irq(&p->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	p_code = task_stopped_code(p, ptrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	if (unlikely(!p_code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		goto unlock_sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	exit_code = *p_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (!exit_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		goto unlock_sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	if (!unlikely(wo->wo_flags & WNOWAIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		*p_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	uid = from_kuid_munged(current_user_ns(), task_uid(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) unlock_sig:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	spin_unlock_irq(&p->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	if (!exit_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	 * Now we are pretty sure this task is interesting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	 * Make sure it doesn't get reaped out from under us while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	 * give up the lock and then examine it below.  We don't want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	 * keep holding onto the tasklist_lock while we call getrusage and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	 * possibly take page faults for user memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	get_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	pid = task_pid_vnr(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	sched_annotate_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	if (wo->wo_rusage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	put_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	if (likely(!(wo->wo_flags & WNOWAIT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		wo->wo_stat = (exit_code << 8) | 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	infop = wo->wo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	if (infop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		infop->cause = why;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		infop->status = exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		infop->pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		infop->uid = uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	return pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  * Handle do_wait work for one task in a live, non-stopped state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * the lock and this task is uninteresting.  If we return nonzero, we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  * released the lock and the system call should return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct waitid_info *infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	uid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (!unlikely(wo->wo_flags & WCONTINUED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	spin_lock_irq(&p->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	/* Re-check with the lock held.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		spin_unlock_irq(&p->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	if (!unlikely(wo->wo_flags & WNOWAIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	uid = from_kuid_munged(current_user_ns(), task_uid(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	spin_unlock_irq(&p->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	pid = task_pid_vnr(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	get_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	sched_annotate_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (wo->wo_rusage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	put_task_struct(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	infop = wo->wo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	if (!infop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		wo->wo_stat = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		infop->cause = CLD_CONTINUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		infop->pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		infop->uid = uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		infop->status = SIGCONT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	return pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)  * Consider @p for a wait by @parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)  * -ECHILD should be in ->notask_error before the first call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  * Returns zero if the search for a child should continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  * then ->notask_error is 0 if @p is an eligible child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  * or still -ECHILD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static int wait_consider_task(struct wait_opts *wo, int ptrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 				struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	 * We can race with wait_task_zombie() from another thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	 * can't confuse the checks below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	int exit_state = READ_ONCE(p->exit_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	if (unlikely(exit_state == EXIT_DEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	ret = eligible_child(wo, ptrace, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	if (unlikely(exit_state == EXIT_TRACE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		 * ptrace == 0 means we are the natural parent. In this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		 * we should clear notask_error, debugger will notify us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		if (likely(!ptrace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			wo->notask_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (likely(!ptrace) && unlikely(p->ptrace)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		 * If it is traced by its real parent's group, just pretend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		 * the caller is ptrace_do_wait() and reap this child if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		 * is zombie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		 * This also hides group stop state from real parent; otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		 * a single stop can be reported twice as group and ptrace stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		 * If a ptracer wants to distinguish these two events for its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		 * own children it should create a separate process which takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		 * the role of real parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		if (!ptrace_reparented(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			ptrace = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	/* slay zombie? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (exit_state == EXIT_ZOMBIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		/* we don't reap group leaders with subthreads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		if (!delay_group_leader(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			 * A zombie ptracee is only visible to its ptracer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			 * Notification and reaping will be cascaded to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 			 * real parent when the ptracer detaches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 			if (unlikely(ptrace) || likely(!p->ptrace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				return wait_task_zombie(wo, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		 * Allow access to stopped/continued state via zombie by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		 * falling through.  Clearing of notask_error is complex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		 * When !@ptrace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		 * If WEXITED is set, notask_error should naturally be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		 * cleared.  If not, subset of WSTOPPED|WCONTINUED is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		 * so, if there are live subthreads, there are events to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		 * wait for.  If all subthreads are dead, it's still safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		 * to clear - this function will be called again in finite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		 * amount time once all the subthreads are released and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		 * will then return without clearing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		 * When @ptrace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		 * Stopped state is per-task and thus can't change once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		 * target task dies.  Only continued and exited can happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		 * Clear notask_error if WCONTINUED | WEXITED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			wo->notask_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		 * @p is alive and it's gonna stop, continue or exit, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		 * there always is something to wait for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		wo->notask_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	 * Wait for stopped.  Depending on @ptrace, different stopped state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	 * is used and the two don't interact with each other.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	ret = wait_task_stopped(wo, ptrace, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	 * Wait for continued.  There's only one continued state and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	 * ptracer can consume it which can confuse the real parent.  Don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	 * use WCONTINUED from ptracer.  You don't need or want it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	return wait_task_continued(wo, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)  * Do the work of do_wait() for one thread in the group, @tsk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  * -ECHILD should be in ->notask_error before the first call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  * Returns nonzero for a final return, when we have unlocked tasklist_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  * Returns zero if the search for a child should continue; then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  * ->notask_error is 0 if there were any eligible children,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  * or still -ECHILD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	list_for_each_entry(p, &tsk->children, sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		int ret = wait_consider_task(wo, 0, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		int ret = wait_consider_task(wo, 1, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 				int sync, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	struct wait_opts *wo = container_of(wait, struct wait_opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 						child_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	struct task_struct *p = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	if (!eligible_pid(wo, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	return default_wake_function(wait, mode, sync, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	__wake_up_sync_key(&parent->signal->wait_chldexit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 			   TASK_INTERRUPTIBLE, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static long do_wait(struct wait_opts *wo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	trace_sched_process_wait(wo->wo_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	wo->child_wait.private = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	 * If there is nothing that can match our criteria, just get out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	 * We will clear ->notask_error to zero if we see any child that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	 * might later match our criteria, even if we are not able to reap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	 * it yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	wo->notask_error = -ECHILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	if ((wo->wo_type < PIDTYPE_MAX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	   (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		goto notask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		retval = do_wait_thread(wo, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		retval = ptrace_do_wait(wo, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		if (wo->wo_flags & __WNOTHREAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	} while_each_thread(current, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) notask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	retval = wo->notask_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (!retval && !(wo->wo_flags & WNOHANG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		retval = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		if (!signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			  int options, struct rusage *ru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	struct wait_opts wo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	struct pid *pid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	enum pid_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	unsigned int f_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			__WNOTHREAD|__WCLONE|__WALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	case P_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		type = PIDTYPE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	case P_PID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		type = PIDTYPE_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		if (upid <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		pid = find_get_pid(upid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	case P_PGID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		type = PIDTYPE_PGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		if (upid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		if (upid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			pid = find_get_pid(upid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 			pid = get_task_pid(current, PIDTYPE_PGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	case P_PIDFD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		type = PIDTYPE_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		if (upid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		pid = pidfd_get_pid(upid, &f_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		if (IS_ERR(pid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			return PTR_ERR(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	wo.wo_type	= type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	wo.wo_pid	= pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	wo.wo_flags	= options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	wo.wo_info	= infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	wo.wo_rusage	= ru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (f_flags & O_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		wo.wo_flags |= WNOHANG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	ret = do_wait(&wo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	if (!ret && !(options & WNOHANG) && (f_flags & O_NONBLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	put_pid(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		infop, int, options, struct rusage __user *, ru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	struct rusage r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	struct waitid_info info = {.status = 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	int signo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		signo = SIGCHLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	if (!infop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	if (!user_write_access_begin(infop, sizeof(*infop)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	unsafe_put_user(signo, &infop->si_signo, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	unsafe_put_user(0, &infop->si_errno, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	unsafe_put_user(info.cause, &infop->si_code, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	unsafe_put_user(info.pid, &infop->si_pid, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	unsafe_put_user(info.uid, &infop->si_uid, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	unsafe_put_user(info.status, &infop->si_status, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	user_write_access_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) Efault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	user_write_access_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		  struct rusage *ru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	struct wait_opts wo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	struct pid *pid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	enum pid_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 			__WNOTHREAD|__WCLONE|__WALL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	/* -INT_MIN is not defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	if (upid == INT_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	if (upid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		type = PIDTYPE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	else if (upid < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		type = PIDTYPE_PGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		pid = find_get_pid(-upid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	} else if (upid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		type = PIDTYPE_PGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		pid = get_task_pid(current, PIDTYPE_PGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	} else /* upid > 0 */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		type = PIDTYPE_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		pid = find_get_pid(upid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	wo.wo_type	= type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	wo.wo_pid	= pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	wo.wo_flags	= options | WEXITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	wo.wo_info	= NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	wo.wo_stat	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	wo.wo_rusage	= ru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	ret = do_wait(&wo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	put_pid(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) int kernel_wait(pid_t pid, int *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	struct wait_opts wo = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		.wo_type	= PIDTYPE_PID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		.wo_pid		= find_get_pid(pid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		.wo_flags	= WEXITED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	ret = do_wait(&wo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	if (ret > 0 && wo.wo_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		*stat = wo.wo_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	put_pid(wo.wo_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		int, options, struct rusage __user *, ru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	struct rusage r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) #ifdef __ARCH_WANT_SYS_WAITPID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)  * sys_waitpid() remains for compatibility. waitpid() should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)  * implemented by calling sys_wait4() from libc.a.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	return kernel_wait4(pid, stat_addr, options, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) COMPAT_SYSCALL_DEFINE4(wait4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	compat_pid_t, pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	compat_uint_t __user *, stat_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	int, options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	struct compat_rusage __user *, ru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	struct rusage r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		if (ru && put_compat_rusage(&r, ru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) COMPAT_SYSCALL_DEFINE5(waitid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		int, which, compat_pid_t, pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		struct compat_siginfo __user *, infop, int, options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		struct compat_rusage __user *, uru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	struct rusage ru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	struct waitid_info info = {.status = 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	int signo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		signo = SIGCHLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		if (uru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			/* kernel_waitid() overwrites everything in ru */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 			if (COMPAT_USE_64BIT_TIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 				err = copy_to_user(uru, &ru, sizeof(ru));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 				err = put_compat_rusage(&ru, uru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	if (!infop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	if (!user_write_access_begin(infop, sizeof(*infop)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	unsafe_put_user(signo, &infop->si_signo, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	unsafe_put_user(0, &infop->si_errno, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	unsafe_put_user(info.cause, &infop->si_code, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	unsafe_put_user(info.pid, &infop->si_pid, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	unsafe_put_user(info.uid, &infop->si_uid, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	unsafe_put_user(info.status, &infop->si_status, Efault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	user_write_access_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) Efault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	user_write_access_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)  * thread_group_exited - check that a thread group has exited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)  * @pid: tgid of thread group to be checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)  * Test if the thread group represented by tgid has exited (all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)  * threads are zombies, dead or completely gone).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)  * Return: true if the thread group has exited. false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) bool thread_group_exited(struct pid *pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	bool exited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	task = pid_task(pid, PIDTYPE_PID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	exited = !task ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		(READ_ONCE(task->exit_state) && thread_group_empty(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	return exited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) EXPORT_SYMBOL(thread_group_exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) __weak void abort(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	/* if that doesn't kill us, halt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	panic("Oops failed to kill thread");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) EXPORT_SYMBOL(abort);