Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* cpumap.c: used for optimizing CPU assignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/cpudata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "cpumap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	CPUINFO_LVL_ROOT = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	CPUINFO_LVL_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	CPUINFO_LVL_CORE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	CPUINFO_LVL_PROC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	CPUINFO_LVL_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	ROVER_NO_OP              = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	/* Increment rover every time level is visited */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	ROVER_INC_ON_VISIT       = 1 << 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	/* Increment parent's rover every time rover wraps around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	ROVER_INC_PARENT_ON_LOOP = 1 << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) struct cpuinfo_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	int num_cpus;    /* Number of CPUs in this hierarchy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	int parent_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	int child_start; /* Array index of the first child node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	int child_end;   /* Array index of the last child node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	int rover;       /* Child node iterator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) struct cpuinfo_level {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	int start_index; /* Index of first node of a level in a cpuinfo tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int end_index;   /* Index of last node of a level in a cpuinfo tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int num_nodes;   /* Number of nodes in a level in a cpuinfo tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) struct cpuinfo_tree {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	int total_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/* Offsets into nodes[] for each level of the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct cpuinfo_level level[CPUINFO_LVL_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct cpuinfo_node  nodes[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static struct cpuinfo_tree *cpuinfo_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static u16 cpu_distribution_map[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static DEFINE_SPINLOCK(cpu_map_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /* Niagara optimized cpuinfo tree traversal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static const int niagara_iterate_method[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	[CPUINFO_LVL_ROOT] = ROVER_NO_OP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	/* Strands (or virtual CPUs) within a core may not run concurrently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * on the Niagara, as instruction pipeline(s) are shared.  Distribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * work to strands in different cores first for better concurrency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * Go to next NUMA node when all cores are used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	[CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* Strands are grouped together by proc_id in cpuinfo_sparc, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * a proc_id represents an instruction pipeline.  Distribute work to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * strands in different proc_id groups if the core has multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * instruction pipelines (e.g. the Niagara 2/2+ has two).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	[CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/* Pick the next strand in the proc_id group. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	[CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) /* Generic cpuinfo tree traversal.  Distribute work round robin across NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static const int generic_iterate_method[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	[CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	[CPUINFO_LVL_NODE] = ROVER_NO_OP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	[CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	[CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static int cpuinfo_id(int cpu, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	switch (level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	case CPUINFO_LVL_ROOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	case CPUINFO_LVL_NODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		id = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	case CPUINFO_LVL_CORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		id = cpu_data(cpu).core_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	case CPUINFO_LVL_PROC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		id = cpu_data(cpu).proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		id = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * Enumerate the CPU information in __cpu_data to determine the start index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * end index, and number of nodes for each level in the cpuinfo tree.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * total number of cpuinfo nodes required to build the tree is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	int prev_id[CPUINFO_LVL_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	int i, n, num_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		struct cpuinfo_level *lv = &tree_level[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		prev_id[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		lv->start_index = lv->end_index = lv->num_nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	num_nodes = 1; /* Include the root node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	for (i = 0; i < num_possible_cpus(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		if (!cpu_online(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		n = cpuinfo_id(i, CPUINFO_LVL_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		if (n > prev_id[CPUINFO_LVL_NODE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			tree_level[CPUINFO_LVL_NODE].num_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			prev_id[CPUINFO_LVL_NODE] = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			num_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		n = cpuinfo_id(i, CPUINFO_LVL_CORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		if (n > prev_id[CPUINFO_LVL_CORE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			tree_level[CPUINFO_LVL_CORE].num_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			prev_id[CPUINFO_LVL_CORE] = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			num_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		n = cpuinfo_id(i, CPUINFO_LVL_PROC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		if (n > prev_id[CPUINFO_LVL_PROC]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			tree_level[CPUINFO_LVL_PROC].num_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			prev_id[CPUINFO_LVL_PROC] = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 			num_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	tree_level[CPUINFO_LVL_ROOT].num_nodes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	n = tree_level[CPUINFO_LVL_NODE].num_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	tree_level[CPUINFO_LVL_NODE].start_index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	tree_level[CPUINFO_LVL_NODE].end_index   = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	tree_level[CPUINFO_LVL_CORE].start_index = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	n += tree_level[CPUINFO_LVL_CORE].num_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	tree_level[CPUINFO_LVL_CORE].end_index   = n - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	tree_level[CPUINFO_LVL_PROC].start_index = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	n += tree_level[CPUINFO_LVL_PROC].num_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	tree_level[CPUINFO_LVL_PROC].end_index   = n - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	return num_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Build a tree representation of the CPU hierarchy using the per CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * information in __cpu_data.  Entries in __cpu_data[0..NR_CPUS] are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * assumed to be sorted in ascending order based on node, core_id, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * proc_id (in order of significance).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static struct cpuinfo_tree *build_cpuinfo_tree(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct cpuinfo_tree *new_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	struct cpuinfo_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	int num_cpus[CPUINFO_LVL_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	int level_rover[CPUINFO_LVL_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	int prev_id[CPUINFO_LVL_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	int n, id, cpu, prev_cpu, last_cpu, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	n = enumerate_cpuinfo_nodes(tmp_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	new_tree = kzalloc(struct_size(new_tree, nodes, n), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (!new_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	new_tree->total_nodes = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	prev_cpu = cpu = cpumask_first(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/* Initialize all levels in the tree with the first CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		n = new_tree->level[level].start_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		level_rover[level] = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		node = &new_tree->nodes[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		id = cpuinfo_id(cpu, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		if (unlikely(id < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			kfree(new_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		node->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		node->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		node->num_cpus = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		node->parent_index = (level > CPUINFO_LVL_ROOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		    ? new_tree->level[level - 1].start_index : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		node->child_start = node->child_end = node->rover =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		    (level == CPUINFO_LVL_PROC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		    ? cpu : new_tree->level[level + 1].start_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		prev_id[level] = node->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		num_cpus[level] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		if (cpu_online(last_cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	while (++cpu <= last_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		if (!cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		     level--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			id = cpuinfo_id(cpu, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			if (unlikely(id < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 				kfree(new_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			if ((id != prev_id[level]) || (cpu == last_cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 				prev_id[level] = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 				node = &new_tree->nodes[level_rover[level]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 				node->num_cpus = num_cpus[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 				num_cpus[level] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 				if (cpu == last_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 					node->num_cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 				/* Connect tree node to parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				if (level == CPUINFO_LVL_ROOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 					node->parent_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 					node->parent_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 					    level_rover[level - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 				if (level == CPUINFO_LVL_PROC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 					node->child_end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 					    (cpu == last_cpu) ? cpu : prev_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 					node->child_end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 					    level_rover[level + 1] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 				/* Initialize the next node in the same level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 				n = ++level_rover[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 				if (n <= new_tree->level[level].end_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 					node = &new_tree->nodes[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 					node->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 					node->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 					/* Connect node to child */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 					node->child_start = node->child_end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 					node->rover =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 					    (level == CPUINFO_LVL_PROC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 					    ? cpu : level_rover[level + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 				num_cpus[level]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		prev_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	return new_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void increment_rover(struct cpuinfo_tree *t, int node_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)                             int root_index, const int *rover_inc_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	struct cpuinfo_node *node = &t->nodes[node_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	int top_level, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	top_level = t->nodes[root_index].level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	for (level = node->level; level >= top_level; level--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		node->rover++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		if (node->rover <= node->child_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		node->rover = node->child_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		/* If parent's rover does not need to be adjusted, stop here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		if ((level == top_level) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		    !(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		node = &t->nodes[node->parent_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	const int *rover_inc_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	int level, new_index, index = root_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	switch (sun4v_chip_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	case SUN4V_CHIP_NIAGARA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	case SUN4V_CHIP_NIAGARA2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	case SUN4V_CHIP_NIAGARA3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	case SUN4V_CHIP_NIAGARA4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	case SUN4V_CHIP_NIAGARA5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	case SUN4V_CHIP_SPARC_M6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	case SUN4V_CHIP_SPARC_M7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	case SUN4V_CHIP_SPARC_M8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	case SUN4V_CHIP_SPARC_SN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	case SUN4V_CHIP_SPARC64X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		rover_inc_table = niagara_iterate_method;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		rover_inc_table = generic_iterate_method;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	     level++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		new_index = t->nodes[index].rover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		if (rover_inc_table[level] & ROVER_INC_ON_VISIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			increment_rover(t, index, root_index, rover_inc_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		index = new_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static void _cpu_map_rebuild(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (cpuinfo_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		kfree(cpuinfo_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		cpuinfo_tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	cpuinfo_tree = build_cpuinfo_tree();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	if (!cpuinfo_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	/* Build CPU distribution map that spans all online CPUs.  No need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * to check if the CPU is online, as that is done when the cpuinfo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	 * tree is being built.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Fallback if the cpuinfo tree could not be built.  CPU mapping is linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  * round robin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int simple_map_to_cpu(unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	int i, end, cpu_rover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	cpu_rover = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	end = index % num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	for (i = 0; i < num_possible_cpus(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		if (cpu_online(cpu_rover)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			if (cpu_rover >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 				return cpu_rover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			cpu_rover++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	/* Impossible, since num_online_cpus() <= num_possible_cpus() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	return cpumask_first(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static int _map_to_cpu(unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	struct cpuinfo_node *root_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	if (unlikely(!cpuinfo_tree)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		_cpu_map_rebuild();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		if (!cpuinfo_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			return simple_map_to_cpu(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	root_node = &cpuinfo_tree->nodes[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	if (unlikely(root_node->num_cpus != num_online_cpus())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		_cpu_map_rebuild();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		if (!cpuinfo_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			return simple_map_to_cpu(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	return cpu_distribution_map[index % root_node->num_cpus];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) int map_to_cpu(unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	int mapped_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	spin_lock_irqsave(&cpu_map_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	mapped_cpu = _map_to_cpu(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	while (unlikely(!cpu_online(mapped_cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		mapped_cpu = _map_to_cpu(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	spin_unlock_irqrestore(&cpu_map_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	return mapped_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) EXPORT_SYMBOL(map_to_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) void cpu_map_rebuild(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	spin_lock_irqsave(&cpu_map_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	_cpu_map_rebuild();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	spin_unlock_irqrestore(&cpu_map_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }