Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * pSeries NUMA support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #define pr_fmt(fmt) "numa: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/mmzone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/nodemask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/pfn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/cpuset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/node.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/stop_machine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/cputhreads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/sparsemem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/paca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <asm/drmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static int numa_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static char *cmdline __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static int numa_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) int numa_cpu_lookup_table[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) struct pglist_data *node_data[MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) EXPORT_SYMBOL(numa_cpu_lookup_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) EXPORT_SYMBOL(node_to_cpumask_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) EXPORT_SYMBOL(node_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static int min_common_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static int n_mem_addr_cells, n_mem_size_cells;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) static int form1_affinity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define MAX_DISTANCE_REF_POINTS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static int distance_ref_points_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static const __be32 *distance_ref_points;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * Allocate node_to_cpumask_map based on number of available nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * Requires node_possible_map to be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * Note: cpumask_of_node() is not valid until after this is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static void __init setup_node_to_cpumask_map(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	unsigned int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	/* setup nr_node_ids if not done yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	if (nr_node_ids == MAX_NUMNODES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		setup_nr_node_ids();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	/* allocate the map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	for_each_node(node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	/* cpumask_of_node() will now work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) static int __init fake_numa_create_new_node(unsigned long end_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 						unsigned int *nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	unsigned long long mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	char *p = cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	static unsigned int fake_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	static unsigned long long curr_boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 * Modify node id, iff we started creating NUMA nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 * We want to continue from where we left of the last time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	if (fake_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		*nid = fake_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	 * In case there are no more arguments to parse, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	 * node_id should be the same as the last fake node id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	 * (we've handled this above).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	mem = memparse(p, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (!mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (mem < curr_boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	curr_boundary = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	if ((end_pfn << PAGE_SHIFT) > mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		 * Skip commas and spaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		while (*p == ',' || *p == ' ' || *p == '\t')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		cmdline = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		fake_nid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		*nid = fake_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		dbg("created new fake_node with id %d\n", fake_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static void reset_numa_cpu_lookup_table(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		numa_cpu_lookup_table[cpu] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static void map_cpu_to_node(int cpu, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	update_numa_cpu_lookup_table(cpu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	dbg("adding cpu %d to node %d\n", cpu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static void unmap_cpu_from_node(unsigned long cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	int node = numa_cpu_lookup_table[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	dbg("removing cpu %lu from node %d\n", cpu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		       cpu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	int dist = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	int i, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	for (i = 0; i < distance_ref_points_depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		index = be32_to_cpu(distance_ref_points[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		if (cpu1_assoc[index] == cpu2_assoc[index])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		dist++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	return dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) /* must hold reference to node during call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) static const __be32 *of_get_associativity(struct device_node *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	return of_get_property(dev, "ibm,associativity", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) int __node_distance(int a, int b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	int distance = LOCAL_DISTANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	if (!form1_affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	for (i = 0; i < distance_ref_points_depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		/* Double the distance for each NUMA level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		distance *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	return distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) EXPORT_SYMBOL(__node_distance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static void initialize_distance_lookup_table(int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		const __be32 *associativity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (!form1_affinity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	for (i = 0; i < distance_ref_points_depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		const __be32 *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		distance_lookup_table[nid][i] = of_read_number(entry, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * info is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) static int associativity_to_nid(const __be32 *associativity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	int nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (!numa_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	if (of_read_number(associativity, 1) >= min_common_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		nid = of_read_number(&associativity[min_common_depth], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	/* POWER4 LPAR uses 0xffff as invalid node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	if (nid == 0xffff || nid >= nr_node_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	if (nid > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		of_read_number(associativity, 1) >= distance_ref_points_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		 * Skip the length field and send start of associativity array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		initialize_distance_lookup_table(nid, associativity + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) /* Returns the nid associated with the given device tree node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * or -1 if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) static int of_node_to_nid_single(struct device_node *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	int nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	const __be32 *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	tmp = of_get_associativity(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	if (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		nid = associativity_to_nid(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) /* Walk the device tree upwards, looking for an associativity id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) int of_node_to_nid(struct device_node *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	int nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	of_node_get(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	while (device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		nid = of_node_to_nid_single(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		if (nid != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		device = of_get_next_parent(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	of_node_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) EXPORT_SYMBOL(of_node_to_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) static int __init find_min_common_depth(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	struct device_node *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (firmware_has_feature(FW_FEATURE_OPAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		root = of_find_node_by_path("/ibm,opal");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		root = of_find_node_by_path("/rtas");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	if (!root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		root = of_find_node_by_path("/");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	 * This property is a set of 32-bit integers, each representing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	 * an index into the ibm,associativity nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 * With form 0 affinity the first integer is for an SMP configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	 * (should be all 0's) and the second is for a normal NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	 * configuration. We have only one level of NUMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	 * With form 1 affinity the first integer is the most significant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	 * NUMA boundary and the following are progressively less significant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	 * boundaries. There can be more than one level of NUMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	distance_ref_points = of_get_property(root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 					"ibm,associativity-reference-points",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 					&distance_ref_points_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	if (!distance_ref_points) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		dbg("NUMA: ibm,associativity-reference-points not found.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	distance_ref_points_depth /= sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (firmware_has_feature(FW_FEATURE_OPAL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	    firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		dbg("Using form 1 affinity\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		form1_affinity = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (form1_affinity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		depth = of_read_number(distance_ref_points, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		if (distance_ref_points_depth < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			printk(KERN_WARNING "NUMA: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 				"short ibm,associativity-reference-points\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		depth = of_read_number(&distance_ref_points[1], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	 * Warn and cap if the hardware supports more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	 * MAX_DISTANCE_REF_POINTS domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		printk(KERN_WARNING "NUMA: distance array capped at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 			"%d entries\n", MAX_DISTANCE_REF_POINTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	of_node_put(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	return depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	of_node_put(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	struct device_node *memory = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	memory = of_find_node_by_type(memory, "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	if (!memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		panic("numa.c: No memory nodes found!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	*n_addr_cells = of_n_addr_cells(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	*n_size_cells = of_n_size_cells(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	of_node_put(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) static unsigned long read_n_cells(int n, const __be32 **buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	unsigned long result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	while (n--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		result = (result << 32) | of_read_number(*buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		(*buf)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) struct assoc_arrays {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	u32	n_arrays;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	u32	array_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	const __be32 *arrays;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  * Retrieve and validate the list of associativity arrays for drconf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  * memory from the ibm,associativity-lookup-arrays property of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  * device tree..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  * The layout of the ibm,associativity-lookup-arrays property is a number N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  * indicating the number of associativity arrays, followed by a number M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  * indicating the size of each associativity array, followed by a list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)  * of N associativity arrays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) static int of_get_assoc_arrays(struct assoc_arrays *aa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	struct device_node *memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	const __be32 *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	if (!memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	if (!prop || len < 2 * sizeof(unsigned int)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		of_node_put(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	aa->n_arrays = of_read_number(prop++, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	aa->array_sz = of_read_number(prop++, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	of_node_put(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	/* Now that we know the number of arrays and size of each array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * revalidate the size of the property read in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	aa->arrays = prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * This is like of_node_to_nid_single() for memory represented in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  * ibm,dynamic-reconfiguration-memory node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) int of_drconf_to_nid_single(struct drmem_lmb *lmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	struct assoc_arrays aa = { .arrays = NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	int default_nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	int nid = default_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	int rc, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	if ((min_common_depth < 0) || !numa_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		return default_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	rc = of_get_assoc_arrays(&aa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		return default_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (min_common_depth <= aa.array_sz &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	    !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		nid = of_read_number(&aa.arrays[index], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		if (nid == 0xffff || nid >= nr_node_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			nid = default_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		if (nid > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			index = lmb->aa_index * aa.array_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			initialize_distance_lookup_table(nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 							&aa.arrays[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) #ifdef CONFIG_PPC_SPLPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static int vphn_get_nid(long lcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	long rc, hwid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	 * On a shared lpar, device tree will not have node associativity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 * At this time lppaca, or its __old_status field may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * updated. Hence kernel cannot detect if its on a shared lpar. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 * request an explicit associativity irrespective of whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 * lpar is shared or dedicated. Use the device tree property as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	 * fallback. cpu_to_phys_id is only valid between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	 * smp_setup_cpu_maps() and smp_setup_pacas().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	if (firmware_has_feature(FW_FEATURE_VPHN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		if (cpu_to_phys_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			hwid = cpu_to_phys_id[lcpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			hwid = get_hard_smp_processor_id(lcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		if (rc == H_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			return associativity_to_nid(associativity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	return NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) static int vphn_get_nid(long unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	return NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) #endif  /* CONFIG_PPC_SPLPAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * Figure out to which domain a cpu belongs and stick it there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * Return the id of the domain used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static int numa_setup_cpu(unsigned long lcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct device_node *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	int fcpu = cpu_first_thread_sibling(lcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	int nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	if (!cpu_present(lcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		set_cpu_numa_node(lcpu, first_online_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		return first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	 * If a valid cpu-to-node mapping is already available, use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * directly instead of querying the firmware, since it represents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 * the most recent mapping notified to us by the platform (eg: VPHN).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	 * Since cpu_to_node binding remains the same for all threads in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	 * core. If a valid cpu-to-node mapping is already available, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	 * the first thread in the core, use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	nid = numa_cpu_lookup_table[fcpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	if (nid >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		map_cpu_to_node(lcpu, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	nid = vphn_get_nid(lcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (nid != NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		goto out_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	cpu = of_get_cpu_node(lcpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (!cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		if (cpu_present(lcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			goto out_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	nid = of_node_to_nid_single(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	of_node_put(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) out_present:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	if (nid < 0 || !node_possible(nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		nid = first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	 * Update for the first thread of the core. All threads of a core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 * have to be part of the same node. This not only avoids querying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	 * for every other thread in the core, but always avoids a case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	 * where virtual node associativity change causes subsequent threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	 * of a core to be associated with different nid. However if first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	 * thread is already online, expect it to have a valid mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (fcpu != lcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		WARN_ON(cpu_online(fcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		map_cpu_to_node(fcpu, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	map_cpu_to_node(lcpu, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) static void verify_cpu_node_mapping(int cpu, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	int base, sibling, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	/* Verify that all the threads in the core belong to the same node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	base = cpu_first_thread_sibling(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	for (i = 0; i < threads_per_core; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		sibling = base + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		if (sibling == cpu || cpu_is_offline(sibling))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		if (cpu_to_node(sibling) != node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			WARN(1, "CPU thread siblings %d and %d don't belong"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 				" to the same node!\n", cpu, sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) /* Must run before sched domains notifier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) static int ppc_numa_cpu_prepare(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	nid = numa_setup_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	verify_cpu_node_mapping(cpu, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) static int ppc_numa_cpu_dead(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	unmap_cpu_from_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * Check and possibly modify a memory region to enforce the memory limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * Returns the size the region should have to enforce the memory limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  * This will either be the original value of size, a truncated value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  * or zero. If the returned value of size is 0 the region should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  * discarded as it lies wholly above the memory limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) static unsigned long __init numa_enforce_memory_limit(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 						      unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 * we've already adjusted it for the limit and it takes care of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	 * having memory holes below the limit.  Also, in the case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	if (start + size <= memblock_end_of_DRAM())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (start >= memblock_end_of_DRAM())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	return memblock_end_of_DRAM() - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * Reads the counter for a given entry in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  * linux,drconf-usable-memory property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static inline int __init read_usm_ranges(const __be32 **usm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	 * For each lmb in ibm,dynamic-memory a corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	 * entry in linux,drconf-usable-memory property contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 * a counter followed by that many (base, size) duple.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 * read the counter from linux,drconf-usable-memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	return read_n_cells(n_mem_size_cells, usm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  * node.  This assumes n_mem_{addr,size}_cells have been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 					const __be32 **usm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 					void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	unsigned int ranges, is_kexec_kdump = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	unsigned long base, size, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 * Skip this block if the reserved bit is set in flags (0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 * or if the block is not assigned to this partition (0x8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if ((lmb->flags & DRCONF_MEM_RESERVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	if (*usm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		is_kexec_kdump = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	base = lmb->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	size = drmem_lmb_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	ranges = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	if (is_kexec_kdump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		ranges = read_usm_ranges(usm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		if (!ranges) /* there are no (base, size) duple */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		if (is_kexec_kdump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			base = read_n_cells(n_mem_addr_cells, usm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			size = read_n_cells(n_mem_size_cells, usm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		nid = of_drconf_to_nid_single(lmb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 					  &nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		node_set_online(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		sz = numa_enforce_memory_limit(base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		if (sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			memblock_set_node(base, sz, &memblock.memory, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	} while (--ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) static int __init parse_numa_properties(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct device_node *memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	int default_nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (numa_enabled == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		printk(KERN_WARNING "NUMA disabled by user\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	min_common_depth = find_min_common_depth();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (min_common_depth < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		 * if we fail to parse min_common_depth from device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		 * mark the numa disabled, boot with numa disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		numa_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		return min_common_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 * Even though we connect cpus to numa domains later in SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	 * init, we need to know the node ids now. This is because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	 * each node to be onlined must have NODE_DATA etc backing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	for_each_present_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		struct device_node *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		int nid = vphn_get_nid(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		 * Don't fall back to default_nid yet -- we will plug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		 * cpus into nodes once the memory scan has discovered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		 * the topology.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		if (nid == NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			cpu = of_get_cpu_node(i, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			BUG_ON(!cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			nid = of_node_to_nid_single(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			of_node_put(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		/* node_set_online() is an UB if 'nid' is negative */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		if (likely(nid >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			node_set_online(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	for_each_node_by_type(memory, "memory") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		int ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		const __be32 *memcell_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		memcell_buf = of_get_property(memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			"linux,usable-memory", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		if (!memcell_buf || len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			memcell_buf = of_get_property(memory, "reg", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (!memcell_buf || len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		/* ranges in cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) new_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		/* these are order-sensitive, and modify the buffer pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		size = read_n_cells(n_mem_size_cells, &memcell_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		 * Assumption: either all memory nodes or none will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		 * have associativity properties.  If none, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		 * everything goes to default_nid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		nid = of_node_to_nid_single(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		if (nid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			nid = default_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		node_set_online(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		size = numa_enforce_memory_limit(start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			memblock_set_node(start, size, &memblock.memory, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		if (--ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			goto new_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	 * Now do the same thing for each MEMBLOCK listed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	 * ibm,dynamic-memory property in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	 * ibm,dynamic-reconfiguration-memory node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	if (memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		of_node_put(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) static void __init setup_nonnuma(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	unsigned long top_of_ram = memblock_end_of_DRAM();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	unsigned long total_ram = memblock_phys_mem_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	unsigned long start_pfn, end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	unsigned int nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	       top_of_ram, total_ram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	       (top_of_ram - total_ram) >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		fake_numa_create_new_node(end_pfn, &nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		memblock_set_node(PFN_PHYS(start_pfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 				  PFN_PHYS(end_pfn - start_pfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 				  &memblock.memory, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		node_set_online(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) void __init dump_numa_cpu_topology(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	unsigned int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	unsigned int cpu, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	if (!numa_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	for_each_online_node(node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		pr_info("Node %d CPUs:", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		 * If we used a CPU iterator here we would miss printing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		 * the holes in the cpumap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			if (cpumask_test_cpu(cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 					node_to_cpumask_map[node])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 				if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 					pr_cont(" %u", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 				++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 				if (count > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 					pr_cont("-%u", cpu - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 				count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		if (count > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 			pr_cont("-%u", nr_cpu_ids - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) /* Initialize NODE_DATA for a node on the local memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	u64 spanned_pages = end_pfn - start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	u64 nd_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	void *nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	int tnid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	if (!nd_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		panic("Cannot allocate %zu bytes for node %d data\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		      nd_size, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	nd = __va(nd_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	/* report and initialize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		nd_pa, nd_pa + nd_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	if (tnid != nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	node_data[nid] = nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	NODE_DATA(nid)->node_id = nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	NODE_DATA(nid)->node_start_pfn = start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) static void __init find_possible_nodes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	struct device_node *rtas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	const __be32 *domains = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	int prop_length, max_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	if (!numa_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	rtas = of_find_node_by_path("/rtas");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (!rtas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	 * ibm,current-associativity-domains is a fairly recent property. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	 * it doesn't exist, then fallback on ibm,max-associativity-domains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	 * Current denotes what the platform can support compared to max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	 * which denotes what the Hypervisor can support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	 * If the LPAR is migratable, new nodes might be activated after a LPM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	 * so we should consider the max number in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		domains = of_get_property(rtas,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 					  "ibm,current-associativity-domains",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 					  &prop_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (!domains) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		domains = of_get_property(rtas, "ibm,max-associativity-domains",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 					&prop_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		if (!domains)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	max_nodes = of_read_number(&domains[min_common_depth], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	for (i = 0; i < max_nodes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		if (!node_possible(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			node_set(i, node_possible_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	prop_length /= sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (prop_length > min_common_depth + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		coregroup_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	of_node_put(rtas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) void __init mem_topology_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	 * Linux/mm assumes node 0 to be online at boot. However this is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	 * true on PowerPC, where node 0 is similar to any other node, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 * could be cpuless, memoryless node. So force node 0 to be offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 * for now. This will prevent cpuless, memoryless node 0 showing up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 * unnecessarily as online. If a node has cpus or memory that need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	 * to be online, then node will anyway be marked online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	node_set_offline(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (parse_numa_properties())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		setup_nonnuma();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	 * Modify the set of possible NUMA nodes to reflect information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	 * available about the set of online nodes, and the set of nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	 * that we expect to make use of for this platform's affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	 * calculations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	nodes_and(node_possible_map, node_possible_map, node_online_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	find_possible_nodes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	setup_node_to_cpumask_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	reset_numa_cpu_lookup_table();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		 * Powerpc with CONFIG_NUMA always used to have a node 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		 * even if it was memoryless or cpuless. For all cpus that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		 * are possible but not present, cpu_to_node() would point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		 * to node 0. To remove a cpuless, memoryless dummy node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		 * powerpc need to make sure all possible but not present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		 * cpu_to_node are set to a proper node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		numa_setup_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) void __init initmem_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	max_pfn = max_low_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	memblock_dump_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	for_each_online_node(nid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		unsigned long start_pfn, end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		setup_node_data(nid, start_pfn, end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	sparse_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	 * even before we online them, so that we can use cpu_to_{node,mem}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 * early in boot, cf. smp_prepare_cpus().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	 * _nocalls() + manual invocation is used because cpuhp is not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * initialized for the boot CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 				  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static int __init early_numa(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	if (strstr(p, "off"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		numa_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	if (strstr(p, "debug"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		numa_debug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	p = strstr(p, "fake=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		cmdline = p + strlen("fake=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) early_param("numa", early_numa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  * Find the node associated with a hot added memory section for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)  * memory represented in the device tree by the property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)  * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	struct drmem_lmb *lmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	unsigned long lmb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	int nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	lmb_size = drmem_lmb_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	for_each_drmem_lmb(lmb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		/* skip this block if it is reserved or not assigned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		 * this partition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		if ((lmb->flags & DRCONF_MEM_RESERVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		if ((scn_addr < lmb->base_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		    || (scn_addr >= (lmb->base_addr + lmb_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		nid = of_drconf_to_nid_single(lmb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)  * Find the node associated with a hot added memory section for memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)  * represented in the device tree as a node (i.e. memory@XXXX) for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)  * each memblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static int hot_add_node_scn_to_nid(unsigned long scn_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct device_node *memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	int nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	for_each_node_by_type(memory, "memory") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		unsigned long start, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		int ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		const __be32 *memcell_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		memcell_buf = of_get_property(memory, "reg", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		if (!memcell_buf || len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		/* ranges in cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		while (ranges--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			size = read_n_cells(n_mem_size_cells, &memcell_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			if ((scn_addr < start) || (scn_addr >= (start + size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			nid = of_node_to_nid_single(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		if (nid >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	of_node_put(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  * Find the node associated with a hot added memory section.  Section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  * sections are fully contained within a single MEMBLOCK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) int hot_add_scn_to_nid(unsigned long scn_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	struct device_node *memory = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	if (!numa_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		return first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	if (memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		nid = hot_add_drconf_scn_to_nid(scn_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		of_node_put(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		nid = hot_add_node_scn_to_nid(scn_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	if (nid < 0 || !node_possible(nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		nid = first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static u64 hot_add_drconf_memory_max(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	struct device_node *memory = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	struct device_node *dn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	const __be64 *lrdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	dn = of_find_node_by_path("/rtas");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	if (dn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		of_node_put(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		if (lrdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			return be64_to_cpup(lrdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	if (memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		of_node_put(memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		return drmem_lmb_memory_max();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  * memory_hotplug_max - return max address of memory that may be added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  * This is currently only used on systems that support drconfig memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)  * hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) u64 memory_hotplug_max(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)         return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) #endif /* CONFIG_MEMORY_HOTPLUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) /* Virtual Processor Home Node (VPHN) support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) #ifdef CONFIG_PPC_SPLPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static int topology_inited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)  * Retrieve the new associativity information for a virtual processor's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)  * home node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) static long vphn_get_associativity(unsigned long cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 					__be32 *associativity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	rc = hcall_vphn(get_hard_smp_processor_id(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 				VPHN_FLAG_VCPU, associativity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	case H_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		dbg("VPHN hcall succeeded. Reset polling...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	case H_FUNCTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	case H_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			"preventing VPHN. Disabling polling...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	case H_PARAMETER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			"Disabling polling...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) int find_and_online_cpu_nid(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	int new_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	/* Use associativity from first thread for all siblings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (vphn_get_associativity(cpu, associativity))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		return cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	new_nid = associativity_to_nid(associativity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	if (new_nid < 0 || !node_possible(new_nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		new_nid = first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if (NODE_DATA(new_nid) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		 * Need to ensure that NODE_DATA is initialized for a node from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		 * available memory (see memblock_alloc_try_nid). If unable to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		 * init the node, then default to nearest node that has memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		 * installed. Skip onlining a node if the subsystems are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		 * yet initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		if (!topology_inited || try_online_node(new_nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			new_nid = first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		 * Default to using the nearest node that has memory installed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		 * Otherwise, it would be necessary to patch the kernel MM code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		 * to deal with more memoryless-node error conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		new_nid = first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		cpu, new_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	return new_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) int cpu_to_coregroup_id(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	if (cpu < 0 || cpu > nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	if (!coregroup_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	if (!firmware_has_feature(FW_FEATURE_VPHN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (vphn_get_associativity(cpu, associativity))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	index = of_read_number(associativity, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	if (index > min_common_depth + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		return of_read_number(&associativity[index - 1], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	return cpu_to_core_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static int topology_update_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	topology_inited = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) device_initcall(topology_update_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) #endif /* CONFIG_PPC_SPLPAR */