Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Cross Partition Communication (XPC) partition support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *	This is the part of XPC that detects the presence/absence of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *	other partitions. It provides a heartbeat and monitors the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *	heartbeats of other partitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "xpc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/uv/uv_hub.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /* XPC is exiting flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) int xpc_exiting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /* this partition's reserved page pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) struct xpc_rsvd_page *xpc_rsvd_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static unsigned long *xpc_part_nasids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) unsigned long *xpc_mach_nasids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) static int xpc_nasid_mask_nbytes;	/* #of bytes in nasid mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) int xpc_nasid_mask_nlongs;	/* #of longs in nasid mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) struct xpc_partition *xpc_partitions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * Guarantee that the kmalloc'd memory is cacheline aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	/* see if kmalloc will give us cachline aligned memory by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	*base = kmalloc(size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	if (*base == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		return *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	kfree(*base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	/* nope, we'll have to do it ourselves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	*base = kmalloc(size + L1_CACHE_BYTES, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (*base == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	return (void *)L1_CACHE_ALIGN((u64)*base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * Given a nasid, get the physical address of the  partition's reserved page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * for that nasid. This function returns 0 on any error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) xpc_get_rsvd_page_pa(int nasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	enum xp_retval ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u64 cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	unsigned long rp_pa = nasid;	/* seed with nasid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	size_t len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	size_t buf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	void *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	void *buf_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	enum xp_retval (*get_partition_rsvd_page_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		(void *, u64 *, unsigned long *, size_t *) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		xpc_arch_ops.get_partition_rsvd_page_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		/* !!! rp_pa will need to be _gpa on UV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		 * ??? So do we save it into the architecture specific parts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		 * ??? of the xpc_partition structure? Do we rename this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		 * ??? function or have two versions? Rename rp_pa for UV to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		 * ??? rp_gpa?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			"address=0x%016lx, len=0x%016lx\n", ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			(unsigned long)cookie, rp_pa, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		if (ret != xpNeedMoreInfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		if (len > buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			kfree(buf_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			buf_len = L1_CACHE_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 							    &buf_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			if (buf_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 				dev_err(xpc_part, "unable to kmalloc "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 					"len=0x%016lx\n", buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 				ret = xpNoMemory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (ret != xpSuccess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	kfree(buf_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (ret != xpSuccess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		rp_pa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	return rp_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * Fill the partition reserved page with the information needed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * other partitions to discover we are alive and establish initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  * communications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) xpc_setup_rsvd_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct xpc_rsvd_page *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	unsigned long rp_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	unsigned long new_ts_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	/* get the local reserved page's address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (rp_pa == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		dev_err(xpc_part, "SAL failed to locate the reserved page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	if (rp->SAL_version < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		/* SAL_versions < 3 had a SAL_partid defined as a u8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		rp->SAL_partid &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	BUG_ON(rp->SAL_partid != xp_partition_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		dev_err(xpc_part, "the reserved page's partid of %d is outside "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 			"supported range (< 0 || >= %d)\n", rp->SAL_partid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			xp_max_npartitions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	rp->version = XPC_RP_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	rp->max_npartitions = xp_max_npartitions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	/* establish the actual sizes of the nasid masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (rp->SAL_version == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		/* SAL_version 1 didn't set the nasids_size field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		rp->SAL_nasids_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	xpc_nasid_mask_nbytes = rp->SAL_nasids_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 					      BITS_PER_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/* setup the pointers to the various items in the reserved page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	ret = xpc_arch_ops.setup_rsvd_page(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	 * Set timestamp of when reserved page was setup by XPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	 * This signifies to the remote partition that our reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	 * page is initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	new_ts_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		new_ts_jiffies++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	rp->ts_jiffies = new_ts_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	xpc_rsvd_page = rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) xpc_teardown_rsvd_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/* a zero timestamp indicates our rsvd page is not initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	xpc_rsvd_page->ts_jiffies = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * Get a copy of a portion of the remote partition's rsvd page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  * remote_rp points to a buffer that is cacheline aligned for BTE copies and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * is large enough to contain a copy of their reserved page header and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * part_nasids mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) enum xp_retval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		  struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	int l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	enum xp_retval ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	/* get the reserved page's physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (*remote_rp_pa == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		return xpNoRsvdPageAddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	/* pull over the reserved page header and part_nasids mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			       XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (ret != xpSuccess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (discovered_nasids != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		unsigned long *remote_part_nasids =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		    XPC_RP_PART_NASIDS(remote_rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		for (l = 0; l < xpc_nasid_mask_nlongs; l++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			discovered_nasids[l] |= remote_part_nasids[l];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	/* zero timestamp indicates the reserved page has not been setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	if (remote_rp->ts_jiffies == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		return xpRsvdPageNotSet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (XPC_VERSION_MAJOR(remote_rp->version) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	    XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		return xpBadVersion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	/* check that both remote and local partids are valid for each side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (remote_rp->SAL_partid < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	    remote_rp->SAL_partid >= xp_max_npartitions ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	    remote_rp->max_npartitions <= xp_partition_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		return xpInvalidPartid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (remote_rp->SAL_partid == xp_partition_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return xpLocalPartid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	return xpSuccess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * See if the other side has responded to a partition deactivate request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * from us. Though we requested the remote partition to deactivate with regard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * to us, we really only need to wait for the other side to disengage from us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) xpc_partition_disengaged(struct xpc_partition *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	short partid = XPC_PARTID(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	int disengaged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	disengaged = !xpc_arch_ops.partition_engaged(partid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (part->disengage_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		if (!disengaged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			if (time_is_after_jiffies(part->disengage_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 				/* timelimit hasn't been reached yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			 * Other side hasn't responded to our deactivate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 			 * request in a timely fashion, so assume it's dead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			dev_info(xpc_part, "deactivate request to remote "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 				 "partition %d timed out\n", partid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			xpc_disengage_timedout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			xpc_arch_ops.assume_partition_disengaged(partid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			disengaged = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		part->disengage_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		/* cancel the timer function, provided it's not us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		if (!in_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			del_singleshot_timer_sync(&part->disengage_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			part->act_state != XPC_P_AS_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		if (part->act_state != XPC_P_AS_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			xpc_wakeup_channel_mgr(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		xpc_arch_ops.cancel_partition_deactivation_request(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	return disengaged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  * Mark specified partition as active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) enum xp_retval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) xpc_mark_partition_active(struct xpc_partition *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	enum xp_retval ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	spin_lock_irqsave(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	if (part->act_state == XPC_P_AS_ACTIVATING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		part->act_state = XPC_P_AS_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		ret = xpSuccess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		DBUG_ON(part->reason == xpSuccess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		ret = part->reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	spin_unlock_irqrestore(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  * Start the process of deactivating the specified partition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) xpc_deactivate_partition(const int line, struct xpc_partition *part,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			 enum xp_retval reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	spin_lock_irqsave(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (part->act_state == XPC_P_AS_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		XPC_SET_REASON(part, reason, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		spin_unlock_irqrestore(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		if (reason == xpReactivating) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			/* we interrupt ourselves to reactivate partition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			xpc_arch_ops.request_partition_reactivation(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	if (part->act_state == XPC_P_AS_DEACTIVATING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		if ((part->reason == xpUnloading && reason != xpUnloading) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		    reason == xpReactivating) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			XPC_SET_REASON(part, reason, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		spin_unlock_irqrestore(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	part->act_state = XPC_P_AS_DEACTIVATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	XPC_SET_REASON(part, reason, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	spin_unlock_irqrestore(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	/* ask remote partition to deactivate with regard to us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	xpc_arch_ops.request_partition_deactivation(part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	/* set a timelimit on the disengage phase of the deactivation request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	part->disengage_timer.expires = part->disengage_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	add_timer(&part->disengage_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		XPC_PARTID(part), reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	xpc_partition_going_down(part, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * Mark specified partition as inactive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) xpc_mark_partition_inactive(struct xpc_partition *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		XPC_PARTID(part));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	spin_lock_irqsave(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	part->act_state = XPC_P_AS_INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	spin_unlock_irqrestore(&part->act_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	part->remote_rp_pa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * SAL has provided a partition and machine mask.  The partition mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  * contains a bit for each even nasid in our partition.  The machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  * mask contains a bit for each even nasid in the entire machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  * Using those two bit arrays, we can determine which nasids are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  * known in the machine.  Each should also have a reserved page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  * initialized if they are available for partitioning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) xpc_discovery(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	void *remote_rp_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	struct xpc_rsvd_page *remote_rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	unsigned long remote_rp_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	int region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	int region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	int max_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	int nasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	unsigned long *discovered_nasids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	enum xp_retval ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 						  xpc_nasid_mask_nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 						  GFP_KERNEL, &remote_rp_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	if (remote_rp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	discovered_nasids = kcalloc(xpc_nasid_mask_nlongs, sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 				    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	if (discovered_nasids == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		kfree(remote_rp_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	 * The term 'region' in this context refers to the minimum number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	 * nodes that can comprise an access protection grouping. The access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	 * protection is in regards to memory, IOI and IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	region_size = xp_region_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	if (is_uv_system())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		max_regions = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		max_regions = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		switch (region_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		case 128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			max_regions *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 			max_regions *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			max_regions *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			region_size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	for (region = 0; region < max_regions; region++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		if (xpc_exiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		dev_dbg(xpc_part, "searching region %d\n", region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		for (nasid = (region * region_size * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		     nasid < ((region + 1) * region_size * 2); nasid += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			if (xpc_exiting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 			dev_dbg(xpc_part, "checking nasid %d\n", nasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			if (test_bit(nasid / 2, xpc_part_nasids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 				dev_dbg(xpc_part, "PROM indicates Nasid %d is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 					"part of the local partition; skipping "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 					"region\n", nasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 			if (!(test_bit(nasid / 2, xpc_mach_nasids))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 				dev_dbg(xpc_part, "PROM indicates Nasid %d was "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 					"not on Numa-Link network at reset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 					nasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			if (test_bit(nasid / 2, discovered_nasids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 				dev_dbg(xpc_part, "Nasid %d is part of a "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 					"partition which was previously "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 					"discovered\n", nasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			/* pull over the rsvd page header & part_nasids mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			ret = xpc_get_remote_rp(nasid, discovered_nasids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 						remote_rp, &remote_rp_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 			if (ret != xpSuccess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 				dev_dbg(xpc_part, "unable to get reserved page "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 					"from nasid %d, reason=%d\n", nasid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 					ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 				if (ret == xpLocalPartid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 			xpc_arch_ops.request_partition_activation(remote_rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 							 remote_rp_pa, nasid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	kfree(discovered_nasids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	kfree(remote_rp_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  * Given a partid, get the nasids owned by that partition from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  * remote partition's reserved page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) enum xp_retval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	struct xpc_partition *part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	unsigned long part_nasid_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	part = &xpc_partitions[partid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	if (part->remote_rp_pa == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		return xpPartitionDown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	memset(nasid_mask, 0, xpc_nasid_mask_nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 				xpc_nasid_mask_nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }