Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (c) 2004-2009 Silicon Graphics, Inc.  All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Cross Partition Communication (XPC) support - standard version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *	XPC provides a message passing capability that crosses partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *	boundaries. This module is made up of two parts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *	    partition	This part detects the presence/absence of other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *			partitions. It provides a heartbeat and monitors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *			the heartbeats of other partitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *	    channel	This part manages the channels and sends/receives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *			messages across them to/from other partitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *	There are a couple of additional functions residing in XP, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *	provide an interface to XPC for its users.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *	Caveats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *	  . Currently on sn2, we have no way to determine which nasid an IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *	    came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *	    followed by an IPI. The amo indicates where data is to be pulled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *	    from, so after the IPI arrives, the remote partition checks the amo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *	    word. The IPI can actually arrive before the amo however, so other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *	    code must periodically check for this case. Also, remote amo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *	    operations do not reliably time out. Thus we do a remote PIO read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *	    solely to know whether the remote partition is down and whether we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *	    should stop sending IPIs to it. This remote PIO read operation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *	    set up in a special nofault region so SAL knows to ignore (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *	    cleanup) any errors due to the remote amo write, PIO read, and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  *	    PIO write operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *	    If/when new hardware solves this IPI problem, we should abandon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *	    the current approach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include "xpc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) /* define two XPC debug device structures to be used with dev_dbg() et al */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static struct device_driver xpc_dbg_name = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	.name = "xpc"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static struct device xpc_part_dbg_subname = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	.init_name = "",	/* set to "part" at xpc_init() time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	.driver = &xpc_dbg_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static struct device xpc_chan_dbg_subname = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	.init_name = "",	/* set to "chan" at xpc_init() time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	.driver = &xpc_dbg_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) struct device *xpc_part = &xpc_part_dbg_subname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) struct device *xpc_chan = &xpc_chan_dbg_subname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static int xpc_kdebug_ignore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) /* systune related variables for /proc/sys directories */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) static int xpc_hb_min_interval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static int xpc_hb_max_interval = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static int xpc_hb_check_min_interval = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static int xpc_hb_check_max_interval = 120;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) static int xpc_disengage_min_timelimit;	/* = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static int xpc_disengage_max_timelimit = 120;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) static struct ctl_table xpc_sys_xpc_hb_dir[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	 .procname = "hb_interval",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	 .data = &xpc_hb_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	 .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	 .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	 .proc_handler = proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 .extra1 = &xpc_hb_min_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	 .extra2 = &xpc_hb_max_interval},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	 .procname = "hb_check_interval",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	 .data = &xpc_hb_check_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	 .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	 .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	 .proc_handler = proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	 .extra1 = &xpc_hb_check_min_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	 .extra2 = &xpc_hb_check_max_interval},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static struct ctl_table xpc_sys_xpc_dir[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	 .procname = "hb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	 .mode = 0555,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	 .child = xpc_sys_xpc_hb_dir},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	 .procname = "disengage_timelimit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	 .data = &xpc_disengage_timelimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	 .maxlen = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	 .mode = 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	 .proc_handler = proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	 .extra1 = &xpc_disengage_min_timelimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	 .extra2 = &xpc_disengage_max_timelimit},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static struct ctl_table xpc_sys_dir[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	 .procname = "xpc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	 .mode = 0555,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 .child = xpc_sys_xpc_dir},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static struct ctl_table_header *xpc_sysctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) /* non-zero if any remote partition disengage was timed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) int xpc_disengage_timedout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) /* #of activate IRQs received and not yet processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) int xpc_activate_IRQ_rcvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) /* IRQ handler notifies this wait queue on receipt of an IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static unsigned long xpc_hb_check_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static struct timer_list xpc_hb_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) /* notification that the xpc_hb_checker thread has exited */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static DECLARE_COMPLETION(xpc_hb_checker_exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) /* notification that the xpc_discovery thread has exited */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static DECLARE_COMPLETION(xpc_discovery_exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static struct notifier_block xpc_reboot_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	.notifier_call = xpc_system_reboot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static int xpc_system_die(struct notifier_block *, unsigned long, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static struct notifier_block xpc_die_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	.notifier_call = xpc_system_die,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) struct xpc_arch_operations xpc_arch_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * Timer function to enforce the timelimit on the partition disengage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) xpc_timeout_partition_disengage(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct xpc_partition *part = from_timer(part, t, disengage_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	(void)xpc_partition_disengaged(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	DBUG_ON(part->disengage_timeout != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * Timer to produce the heartbeat.  The timer structures function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * already set when this is initially called.  A tunable is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * specify when the next timeout should occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) xpc_hb_beater(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	xpc_arch_ops.increment_heartbeat();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		wake_up_interruptible(&xpc_activate_IRQ_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	add_timer(&xpc_hb_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) xpc_start_hb_beater(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	xpc_arch_ops.heartbeat_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	timer_setup(&xpc_hb_timer, xpc_hb_beater, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	xpc_hb_beater(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) xpc_stop_hb_beater(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	del_timer_sync(&xpc_hb_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	xpc_arch_ops.heartbeat_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * At periodic intervals, scan through all active partitions and ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * their heartbeat is still active.  If not, the partition is deactivated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) xpc_check_remote_hb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct xpc_partition *part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	short partid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	enum xp_retval ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	for (partid = 0; partid < xp_max_npartitions; partid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		if (xpc_exiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		if (partid == xp_partition_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		part = &xpc_partitions[partid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		if (part->act_state == XPC_P_AS_INACTIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		    part->act_state == XPC_P_AS_DEACTIVATING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		ret = xpc_arch_ops.get_remote_heartbeat(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		if (ret != xpSuccess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			XPC_DEACTIVATE_PARTITION(part, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  * This thread is responsible for nearly all of the partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  * activation/deactivation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) xpc_hb_checker(void *ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	int force_IRQ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	/* this thread was marked active by xpc_hb_init() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	/* set our heartbeating to other partitions into motion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	xpc_start_hb_beater();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	while (!xpc_exiting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			"been received\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			(int)(xpc_hb_check_timeout - jiffies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			xpc_activate_IRQ_rcvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		/* checking of remote heartbeats is skewed by IRQ handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			xpc_hb_check_timeout = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			    (xpc_hb_check_interval * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 			dev_dbg(xpc_part, "checking remote heartbeats\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			xpc_check_remote_hb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		/* check for outstanding IRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			force_IRQ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			dev_dbg(xpc_part, "processing activate IRQs "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 				"received\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			xpc_arch_ops.process_activate_IRQ_rcvd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		/* wait for IRQ or timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		(void)wait_event_interruptible(xpc_activate_IRQ_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 					       (time_is_before_eq_jiffies(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 						xpc_hb_check_timeout) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 						xpc_activate_IRQ_rcvd > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 						xpc_exiting));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	xpc_stop_hb_beater();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	dev_dbg(xpc_part, "heartbeat checker is exiting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	/* mark this thread as having exited */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	complete(&xpc_hb_checker_exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * This thread will attempt to discover other partitions to activate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  * based on info provided by SAL. This new thread is short lived and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * will exit once discovery is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) xpc_initiate_discovery(void *ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	xpc_discovery();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	dev_dbg(xpc_part, "discovery thread is exiting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	/* mark this thread as having exited */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	complete(&xpc_discovery_exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * The first kthread assigned to a newly activated partition is the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * that kthread until the partition is brought down, at which time that kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * returns back to XPC HB. (The return of that kthread will signify to XPC HB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * that XPC has dismantled all communication infrastructure for the associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * partition.) This kthread becomes the channel manager for that partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * Each active partition has a channel manager, who, besides connecting and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * disconnecting channels, will ensure that each of the partition's connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * channels has the required number of assigned kthreads to get the work done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) xpc_channel_mgr(struct xpc_partition *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	while (part->act_state != XPC_P_AS_DEACTIVATING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	       atomic_read(&part->nchannels_active) > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	       !xpc_partition_disengaged(part)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		xpc_process_sent_chctl_flags(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		 * Wait until we've been requested to activate kthreads or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		 * all of the channel's message queues have been torn down or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		 * a signal is pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		 * The channel_mgr_requests is set to 1 after being awakened,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		 * This is done to prevent the channel mgr from making one pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		 * through the loop for each request, since he will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		 * be servicing all the requests in one pass. The reason it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		 * set to 1 instead of 0 is so that other kthreads will know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		 * that the channel mgr is running and won't bother trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		 * wake him up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		atomic_dec(&part->channel_mgr_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		(void)wait_event_interruptible(part->channel_mgr_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 				(atomic_read(&part->channel_mgr_requests) > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 				 part->chctl.all_flags != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 				 (part->act_state == XPC_P_AS_DEACTIVATING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 				 atomic_read(&part->nchannels_active) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 				 xpc_partition_disengaged(part))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		atomic_set(&part->channel_mgr_requests, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * Guarantee that the kzalloc'd memory is cacheline aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	/* see if kzalloc will give us cachline aligned memory by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	*base = kzalloc(size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	if (*base == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		return *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	kfree(*base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	/* nope, we'll have to do it ourselves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	*base = kzalloc(size + L1_CACHE_BYTES, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	if (*base == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	return (void *)L1_CACHE_ALIGN((u64)*base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  * Setup the channel structures necessary to support XPartition Communication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  * between the specified remote partition and the local one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static enum xp_retval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) xpc_setup_ch_structures(struct xpc_partition *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	enum xp_retval ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	int ch_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct xpc_channel *ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	short partid = XPC_PARTID(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	 * Allocate all of the channel structures as a contiguous chunk of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	 * memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	DBUG_ON(part->channels != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	part->channels = kcalloc(XPC_MAX_NCHANNELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 				 sizeof(struct xpc_channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 				 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	if (part->channels == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		dev_err(xpc_chan, "can't get memory for channels\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		return xpNoMemory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	/* allocate the remote open and close args */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	part->remote_openclose_args =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	    xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 					  GFP_KERNEL, &part->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 					  remote_openclose_args_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (part->remote_openclose_args == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		dev_err(xpc_chan, "can't get memory for remote connect args\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		ret = xpNoMemory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		goto out_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	part->chctl.all_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	spin_lock_init(&part->chctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	atomic_set(&part->channel_mgr_requests, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	init_waitqueue_head(&part->channel_mgr_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	part->nchannels = XPC_MAX_NCHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	atomic_set(&part->nchannels_active, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	atomic_set(&part->nchannels_engaged, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		ch = &part->channels[ch_number];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		ch->partid = partid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		ch->number = ch_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		ch->flags = XPC_C_DISCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		atomic_set(&ch->kthreads_assigned, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		atomic_set(&ch->kthreads_idle, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		atomic_set(&ch->kthreads_active, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		atomic_set(&ch->references, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		atomic_set(&ch->n_to_notify, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		spin_lock_init(&ch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		init_completion(&ch->wdisconnect_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		atomic_set(&ch->n_on_msg_allocate_wq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		init_waitqueue_head(&ch->msg_allocate_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		init_waitqueue_head(&ch->idle_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	ret = xpc_arch_ops.setup_ch_structures(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	if (ret != xpSuccess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		goto out_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	 * With the setting of the partition setup_state to XPC_P_SS_SETUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	 * we're declaring that this partition is ready to go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	part->setup_state = XPC_P_SS_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	return xpSuccess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	/* setup of ch structures failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) out_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	kfree(part->remote_openclose_args_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	part->remote_openclose_args = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) out_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	kfree(part->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	part->channels = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * Teardown the channel structures necessary to support XPartition Communication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  * between the specified remote partition and the local one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) xpc_teardown_ch_structures(struct xpc_partition *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	DBUG_ON(atomic_read(&part->nchannels_active) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	 * Make this partition inaccessible to local processes by marking it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	 * as no longer setup. Then wait before proceeding with the teardown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	 * until all existing references cease.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	part->setup_state = XPC_P_SS_WTEARDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	/* now we can begin tearing down the infrastructure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	xpc_arch_ops.teardown_ch_structures(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	kfree(part->remote_openclose_args_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	part->remote_openclose_args = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	kfree(part->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	part->channels = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	part->setup_state = XPC_P_SS_TORNDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  * When XPC HB determines that a partition has come up, it will create a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  * kthread and that kthread will call this function to attempt to set up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523)  * basic infrastructure used for Cross Partition Communication with the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524)  * upped partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  * The kthread that was created by XPC HB and which setup the XPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  * infrastructure will remain assigned to the partition becoming the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  * manager for that partition until the partition is deactivating, at which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * time the kthread will teardown the XPC infrastructure and then exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) xpc_activating(void *__partid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	short partid = (u64)__partid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	struct xpc_partition *part = &xpc_partitions[partid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	spin_lock_irqsave(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (part->act_state == XPC_P_AS_DEACTIVATING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		part->act_state = XPC_P_AS_INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		spin_unlock_irqrestore(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		part->remote_rp_pa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/* indicate the thread is activating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	part->act_state = XPC_P_AS_ACTIVATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	XPC_SET_REASON(part, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	spin_unlock_irqrestore(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	dev_dbg(xpc_part, "activating partition %d\n", partid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	xpc_arch_ops.allow_hb(partid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (xpc_setup_ch_structures(part) == xpSuccess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		(void)xpc_part_ref(part);	/* this will always succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			xpc_mark_partition_active(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			xpc_channel_mgr(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			/* won't return until partition is deactivating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		xpc_part_deref(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		xpc_teardown_ch_structures(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	xpc_arch_ops.disallow_hb(partid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	xpc_mark_partition_inactive(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (part->reason == xpReactivating) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		/* interrupting ourselves results in activating partition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		xpc_arch_ops.request_partition_reactivation(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) xpc_activate_partition(struct xpc_partition *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	short partid = XPC_PARTID(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct task_struct *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	spin_lock_irqsave(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	part->act_state = XPC_P_AS_ACTIVATION_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	XPC_SET_REASON(part, xpCloneKThread, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	spin_unlock_irqrestore(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			      partid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	if (IS_ERR(kthread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		spin_lock_irqsave(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		part->act_state = XPC_P_AS_INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		spin_unlock_irqrestore(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) xpc_activate_kthreads(struct xpc_channel *ch, int needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	int idle = atomic_read(&ch->kthreads_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	int assigned = atomic_read(&ch->kthreads_assigned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	int wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	DBUG_ON(needed <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (idle > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		wakeup = (needed > idle) ? idle : needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		needed -= wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			"channel=%d\n", wakeup, ch->partid, ch->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		/* only wakeup the requested number of kthreads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		wake_up_nr(&ch->idle_wq, wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (needed <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (needed + assigned > ch->kthreads_assigned_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		needed = ch->kthreads_assigned_limit - assigned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		if (needed <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		needed, ch->partid, ch->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	xpc_create_kthreads(ch, needed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * This function is where XPC's kthreads wait for messages to deliver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		xpc_arch_ops.n_of_deliverable_payloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		/* deliver messages to their intended recipients */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		while (n_of_deliverable_payloads(ch) > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		       !(ch->flags & XPC_C_DISCONNECTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			xpc_deliver_payload(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		if (atomic_inc_return(&ch->kthreads_idle) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		    ch->kthreads_idle_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			/* too many idle kthreads on this channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			atomic_dec(&ch->kthreads_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		dev_dbg(xpc_chan, "idle kthread calling "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			"wait_event_interruptible_exclusive()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		(void)wait_event_interruptible_exclusive(ch->idle_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 				(n_of_deliverable_payloads(ch) > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 				 (ch->flags & XPC_C_DISCONNECTING)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		atomic_dec(&ch->kthreads_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	} while (!(ch->flags & XPC_C_DISCONNECTING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) xpc_kthread_start(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	short partid = XPC_UNPACK_ARG1(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	u16 ch_number = XPC_UNPACK_ARG2(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	struct xpc_partition *part = &xpc_partitions[partid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	struct xpc_channel *ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	int n_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		xpc_arch_ops.n_of_deliverable_payloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		partid, ch_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	ch = &part->channels[ch_number];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (!(ch->flags & XPC_C_DISCONNECTING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		/* let registerer know that connection has been established */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		spin_lock_irqsave(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			ch->flags |= XPC_C_CONNECTEDCALLOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			spin_unlock_irqrestore(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			xpc_connected_callout(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			spin_lock_irqsave(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			spin_unlock_irqrestore(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			 * It is possible that while the callout was being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			 * made that the remote partition sent some messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			 * If that is the case, we may need to activate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			 * additional kthreads to help deliver them. We only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			 * need one less than total #of messages to deliver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			n_needed = n_of_deliverable_payloads(ch) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 				xpc_activate_kthreads(ch, n_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			spin_unlock_irqrestore(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		xpc_kthread_waitmsgs(part, ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/* let registerer know that connection is disconnecting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	spin_lock_irqsave(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	    !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		spin_unlock_irqrestore(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		xpc_disconnect_callout(ch, xpDisconnecting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		spin_lock_irqsave(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	spin_unlock_irqrestore(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	    atomic_dec_return(&part->nchannels_engaged) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		xpc_arch_ops.indicate_partition_disengaged(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	xpc_msgqueue_deref(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		partid, ch_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	xpc_part_deref(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * For each partition that XPC has established communications with, there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  * a minimum of one kernel thread assigned to perform any operation that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * may potentially sleep or block (basically the callouts to the asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * functions registered via xpc_connect()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  * Additional kthreads are created and destroyed by XPC as the workload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767)  * demands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769)  * A kthread is assigned to one of the active channels that exists for a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770)  * partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) xpc_create_kthreads(struct xpc_channel *ch, int needed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		    int ignore_disconnecting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	struct xpc_partition *part = &xpc_partitions[ch->partid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct task_struct *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	void (*indicate_partition_disengaged) (struct xpc_partition *) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		xpc_arch_ops.indicate_partition_disengaged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	while (needed-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		 * The following is done on behalf of the newly created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		 * kthread. That kthread is responsible for doing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		 * counterpart to the following before it exits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		if (ignore_disconnecting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 				/* kthreads assigned had gone to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 				BUG_ON(!(ch->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 					 XPC_C_DISCONNECTINGCALLOUT_MADE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		} else if (ch->flags & XPC_C_DISCONNECTING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			   atomic_inc_return(&part->nchannels_engaged) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			xpc_arch_ops.indicate_partition_engaged(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		(void)xpc_part_ref(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		xpc_msgqueue_ref(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		kthread = kthread_run(xpc_kthread_start, (void *)args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 				      "xpc%02dc%d", ch->partid, ch->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		if (IS_ERR(kthread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			/* the fork failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			 * NOTE: if (ignore_disconnecting &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			 * then we'll deadlock if all other kthreads assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			 * to this channel are blocked in the channel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			 * registerer, because the only thing that will unblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			 * them is the xpDisconnecting callout that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			 * failed kthread_run() would have made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			    atomic_dec_return(&part->nchannels_engaged) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 				indicate_partition_disengaged(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			xpc_msgqueue_deref(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			xpc_part_deref(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			if (atomic_read(&ch->kthreads_assigned) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			    ch->kthreads_idle_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 				 * Flag this as an error only if we have an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				 * insufficient #of kthreads for the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 				 * to function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 				spin_lock_irqsave(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 						       &irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 				spin_unlock_irqrestore(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) xpc_disconnect_wait(int ch_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	short partid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	struct xpc_partition *part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	struct xpc_channel *ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	int wakeup_channel_mgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	/* now wait for all callouts to the caller's function to cease */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	for (partid = 0; partid < xp_max_npartitions; partid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		part = &xpc_partitions[partid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		if (!xpc_part_ref(part))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		ch = &part->channels[ch_number];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		if (!(ch->flags & XPC_C_WDISCONNECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			xpc_part_deref(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		wait_for_completion(&ch->wdisconnect_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		spin_lock_irqsave(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		wakeup_channel_mgr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		if (ch->delayed_chctl_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			if (part->act_state != XPC_P_AS_DEACTIVATING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 				spin_lock(&part->chctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 				part->chctl.flags[ch->number] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				    ch->delayed_chctl_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 				spin_unlock(&part->chctl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 				wakeup_channel_mgr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			ch->delayed_chctl_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		ch->flags &= ~XPC_C_WDISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		spin_unlock_irqrestore(&ch->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		if (wakeup_channel_mgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			xpc_wakeup_channel_mgr(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		xpc_part_deref(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) xpc_setup_partitions(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	short partid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	struct xpc_partition *part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	xpc_partitions = kcalloc(xp_max_npartitions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 				 sizeof(struct xpc_partition),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 				 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (xpc_partitions == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		dev_err(xpc_part, "can't get memory for partition structure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	 * The first few fields of each entry of xpc_partitions[] need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	 * be initialized now so that calls to xpc_connect() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	 * xpc_disconnect() can be made prior to the activation of any remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	 * PARTITION HAS BEEN ACTIVATED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	for (partid = 0; partid < xp_max_npartitions; partid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		part = &xpc_partitions[partid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		part->activate_IRQ_rcvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		spin_lock_init(&part->act_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		part->act_state = XPC_P_AS_INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		XPC_SET_REASON(part, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		timer_setup(&part->disengage_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			    xpc_timeout_partition_disengage, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		part->setup_state = XPC_P_SS_UNSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		init_waitqueue_head(&part->teardown_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		atomic_set(&part->references, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	return xpc_arch_ops.setup_partitions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) xpc_teardown_partitions(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	xpc_arch_ops.teardown_partitions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	kfree(xpc_partitions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) xpc_do_exit(enum xp_retval reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	short partid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	int active_part_count, printed_waiting_msg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	struct xpc_partition *part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	unsigned long printmsg_time, disengage_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	DBUG_ON(xpc_exiting == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * Let the heartbeat checker thread and the discovery thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 * (if one is running) know that they should exit. Also wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	 * the heartbeat checker thread in case it's sleeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	xpc_exiting = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	wake_up_interruptible(&xpc_activate_IRQ_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	/* wait for the discovery thread to exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	wait_for_completion(&xpc_discovery_exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	/* wait for the heartbeat checker thread to exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	wait_for_completion(&xpc_hb_checker_exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	/* sleep for a 1/3 of a second or so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	(void)msleep_interruptible(300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/* wait for all partitions to become inactive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	xpc_disengage_timedout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		active_part_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		for (partid = 0; partid < xp_max_npartitions; partid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			part = &xpc_partitions[partid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			if (xpc_partition_disengaged(part) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			    part->act_state == XPC_P_AS_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			active_part_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			XPC_DEACTIVATE_PARTITION(part, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			if (part->disengage_timeout > disengage_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 				disengage_timeout = part->disengage_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		if (xpc_arch_ops.any_partition_engaged()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 			if (time_is_before_jiffies(printmsg_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 				dev_info(xpc_part, "waiting for remote "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 					 "partitions to deactivate, timeout in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 					 "%ld seconds\n", (disengage_timeout -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 					 jiffies) / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 				printmsg_time = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				    (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 				printed_waiting_msg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		} else if (active_part_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			if (printed_waiting_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 				dev_info(xpc_part, "waiting for local partition"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 					 " to deactivate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				printed_waiting_msg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			if (!xpc_disengage_timedout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				dev_info(xpc_part, "all partitions have "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 					 "deactivated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		/* sleep for a 1/3 of a second or so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		(void)msleep_interruptible(300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	DBUG_ON(xpc_arch_ops.any_partition_engaged());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	xpc_teardown_rsvd_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (reason == xpUnloading) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		(void)unregister_die_notifier(&xpc_die_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		(void)unregister_reboot_notifier(&xpc_reboot_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/* clear the interface to XPC's functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	xpc_clear_interface();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	if (xpc_sysctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		unregister_sysctl_table(xpc_sysctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	xpc_teardown_partitions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (is_uv_system())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		xpc_exit_uv();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  * This function is called when the system is being rebooted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	enum xp_retval reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	case SYS_RESTART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		reason = xpSystemReboot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	case SYS_HALT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		reason = xpSystemHalt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	case SYS_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		reason = xpSystemPoweroff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		reason = xpSystemGoingDown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	xpc_do_exit(reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /* Used to only allow one cpu to complete disconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static unsigned int xpc_die_disconnecting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)  * Notify other partitions to deactivate from us by first disengaging from all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * references to our memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) xpc_die_deactivate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	struct xpc_partition *part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	short partid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	int any_engaged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	long keep_waiting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	long wait_to_print;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	if (cmpxchg(&xpc_die_disconnecting, 0, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	/* keep xpc_hb_checker thread from doing anything (just in case) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	xpc_exiting = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	xpc_arch_ops.disallow_all_hbs();   /*indicate we're deactivated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	for (partid = 0; partid < xp_max_npartitions; partid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		part = &xpc_partitions[partid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		if (xpc_arch_ops.partition_engaged(partid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		    part->act_state != XPC_P_AS_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			xpc_arch_ops.request_partition_deactivation(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			xpc_arch_ops.indicate_partition_disengaged(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	 * Though we requested that all other partitions deactivate from us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	 * we only wait until they've all disengaged or we've reached the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	 * defined timelimit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	 * Given that one iteration through the following while-loop takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	 * approximately 200 microseconds, calculate the #of loops to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	 * before bailing and the #of loops before printing a waiting message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	keep_waiting = xpc_disengage_timelimit * 1000 * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		any_engaged = xpc_arch_ops.any_partition_engaged();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		if (!any_engaged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			dev_info(xpc_part, "all partitions have deactivated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		if (!keep_waiting--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			for (partid = 0; partid < xp_max_npartitions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			     partid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				if (xpc_arch_ops.partition_engaged(partid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 					dev_info(xpc_part, "deactivate from "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 						 "remote partition %d timed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 						 "out\n", partid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		if (!wait_to_print--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			dev_info(xpc_part, "waiting for remote partitions to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 				 "deactivate, timeout in %ld seconds\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				 keep_waiting / (1000 * 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			    1000 * 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)  * This function is called when the system is being restarted or halted due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)  * to some sort of system failure. If this is the case we need to notify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  * other partitions to disengage from all references to our memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)  * This function can also be called when our heartbeater could be offlined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)  * for a time. In this case we need to notify other partitions to not worry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)  * about the lack of a heartbeat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) #ifdef CONFIG_IA64		/* !!! temporary kludge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	case DIE_MACHINE_RESTART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	case DIE_MACHINE_HALT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		xpc_die_deactivate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	case DIE_KDEBUG_ENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		/* Should lack of heartbeat be ignored by other partitions? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		if (!xpc_kdebug_ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	case DIE_MCA_MONARCH_ENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	case DIE_INIT_MONARCH_ENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		xpc_arch_ops.offline_heartbeat();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	case DIE_KDEBUG_LEAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		/* Is lack of heartbeat being ignored by other partitions? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		if (!xpc_kdebug_ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	case DIE_MCA_MONARCH_LEAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	case DIE_INIT_MONARCH_LEAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		xpc_arch_ops.online_heartbeat();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	struct die_args *die_args = _die_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	case DIE_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		if (die_args->trapnr == X86_TRAP_DF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			xpc_die_deactivate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		if (((die_args->trapnr == X86_TRAP_MF) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		     (die_args->trapnr == X86_TRAP_XF)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		    !user_mode(die_args->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			xpc_die_deactivate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	case DIE_INT3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	case DIE_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	case DIE_OOPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	case DIE_GPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		xpc_die_deactivate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) xpc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	struct task_struct *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	dev_set_name(xpc_part, "part");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	dev_set_name(xpc_chan, "chan");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	if (is_uv_system()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		ret = xpc_init_uv();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	ret = xpc_setup_partitions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		dev_err(xpc_part, "can't get memory for partition structure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		goto out_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	xpc_sysctl = register_sysctl_table(xpc_sys_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	 * Fill the partition reserved page with the information needed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	 * other partitions to discover we are alive and establish initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	 * communications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	ret = xpc_setup_rsvd_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		dev_err(xpc_part, "can't setup our reserved page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		goto out_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	/* add ourselves to the reboot_notifier_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	ret = register_reboot_notifier(&xpc_reboot_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		dev_warn(xpc_part, "can't register reboot notifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	/* add ourselves to the die_notifier list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	ret = register_die_notifier(&xpc_die_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		dev_warn(xpc_part, "can't register die notifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	 * The real work-horse behind xpc.  This processes incoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	 * interrupts and monitors remote heartbeats.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	if (IS_ERR(kthread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		dev_err(xpc_part, "failed while forking hb check thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		goto out_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	 * Startup a thread that will attempt to discover other partitions to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	 * activate based on info provided by SAL. This new thread is short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	 * lived and will exit once discovery is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	kthread = kthread_run(xpc_initiate_discovery, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 			      XPC_DISCOVERY_THREAD_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	if (IS_ERR(kthread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		dev_err(xpc_part, "failed while forking discovery thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		/* mark this new thread as a non-starter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		complete(&xpc_discovery_exited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		xpc_do_exit(xpUnloading);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	/* set the interface to point at XPC's functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			  xpc_initiate_send, xpc_initiate_send_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			  xpc_initiate_received, xpc_initiate_partid_to_nasids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	/* initialization was not successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) out_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	xpc_teardown_rsvd_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	(void)unregister_die_notifier(&xpc_die_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	(void)unregister_reboot_notifier(&xpc_reboot_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) out_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (xpc_sysctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		unregister_sysctl_table(xpc_sysctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	xpc_teardown_partitions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) out_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	if (is_uv_system())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		xpc_exit_uv();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) module_init(xpc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) static void __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) xpc_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	xpc_do_exit(xpUnloading);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) module_exit(xpc_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) MODULE_AUTHOR("Silicon Graphics, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) module_param(xpc_hb_interval, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		 "heartbeat increments.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) module_param(xpc_hb_check_interval, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		 "heartbeat checks.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) module_param(xpc_disengage_timelimit, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		 "for disengage to complete.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) module_param(xpc_kdebug_ignore, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		 "other partitions when dropping into kdebug.");